import heapq
class Solution:
def getOrder(self, tasks):
# Attach original indices to tasks and sort by enqueue time
indexed_tasks = sorted([(enq, proc, idx) for idx, (enq, proc) in enumerate(tasks)])
n = len(tasks)
res = []
heap = []
time = 0
i = 0
while i < n or heap:
# Add all tasks that have arrived by current time
while i < n and indexed_tasks[i][0] <= time:
enq, proc, idx = indexed_tasks[i]
heapq.heappush(heap, (proc, idx))
i += 1
if heap:
proc, idx = heapq.heappop(heap)
time += proc
res.append(idx)
else:
# No available tasks, jump time to next task's enqueue
if i < n:
time = max(time, indexed_tasks[i][0])
return res
#include <vector>
#include <queue>
#include <algorithm>
using namespace std;
class Solution {
public:
vector<int> getOrder(vector<vector<int>>& tasks) {
int n = tasks.size();
vector<tuple<int, int, int>> indexed;
for (int i = 0; i < n; ++i) {
indexed.emplace_back(tasks[i][0], tasks[i][1], i);
}
sort(indexed.begin(), indexed.end());
priority_queue<pair<int, int>, vector<pair<int, int>>, greater<>> pq;
vector<int> res;
long long time = 0;
int i = 0;
while (i < n || !pq.empty()) {
while (i < n && get<0>(indexed[i]) <= time) {
pq.emplace(get<1>(indexed[i]), get<2>(indexed[i]));
++i;
}
if (!pq.empty()) {
auto [proc, idx] = pq.top(); pq.pop();
time += proc;
res.push_back(idx);
} else if (i < n) {
time = max(time, (long long)get<0>(indexed[i]));
}
}
return res;
}
};
import java.util.*;
class Solution {
public int[] getOrder(int[][] tasks) {
int n = tasks.length;
int[][] indexed = new int[n][3];
for (int i = 0; i < n; ++i) {
indexed[i][0] = tasks[i][0];
indexed[i][1] = tasks[i][1];
indexed[i][2] = i;
}
Arrays.sort(indexed, (a, b) -> Integer.compare(a[0], b[0]));
PriorityQueue<int[]> pq = new PriorityQueue<>((a, b) -> a[0] == b[0] ? a[1] - b[1] : a[0] - b[0]);
int[] res = new int[n];
int time = 0, i = 0, resIdx = 0;
while (i < n || !pq.isEmpty()) {
while (i < n && indexed[i][0] <= time) {
pq.offer(new int[]{indexed[i][1], indexed[i][2]});
i++;
}
if (!pq.isEmpty()) {
int[] curr = pq.poll();
time += curr[0];
res[resIdx++] = curr[1];
} else if (i < n) {
time = Math.max(time, indexed[i][0]);
}
}
return res;
}
}
class MinHeap {
constructor() {
this.heap = [];
}
push(val) {
this.heap.push(val);
this._bubbleUp();
}
pop() {
if (this.heap.length === 1) return this.heap.pop();
const top = this.heap[0];
this.heap[0] = this.heap.pop();
this._bubbleDown();
return top;
}
_bubbleUp() {
let idx = this.heap.length - 1;
while (idx > 0) {
let parent = Math.floor((idx - 1) / 2);
if (this._compare(this.heap[idx], this.heap[parent]) < 0) {
[this.heap[idx], this.heap[parent]] = [this.heap[parent], this.heap[idx]];
idx = parent;
} else break;
}
}
_bubbleDown() {
let idx = 0, len = this.heap.length;
while (true) {
let left = 2 * idx + 1, right = 2 * idx + 2, smallest = idx;
if (left < len && this._compare(this.heap[left], this.heap[smallest]) < 0) smallest = left;
if (right < len && this._compare(this.heap[right], this.heap[smallest]) < 0) smallest = right;
if (smallest !== idx) {
[this.heap[idx], this.heap[smallest]] = [this.heap[smallest], this.heap[idx]];
idx = smallest;
} else break;
}
}
_compare(a, b) {
if (a[0] !== b[0]) return a[0] - b[0];
return a[1] - b[1];
}
size() {
return this.heap.length;
}
}
var getOrder = function(tasks) {
let n = tasks.length;
let indexed = tasks.map((t, i) => [t[0], t[1], i]);
indexed.sort((a, b) => a[0] - b[0]);
let res = [];
let heap = new MinHeap();
let time = 0, i = 0;
while (i < n || heap.size() > 0) {
while (i < n && indexed[i][0] <= time) {
heap.push([indexed[i][1], indexed[i][2]]);
i++;
}
if (heap.size() > 0) {
let [proc, idx] = heap.pop();
time += proc;
res.push(idx);
} else if (i < n) {
time = Math.max(time, indexed[i][0]);
}
}
return res;
};
You are given a list of tasks
, where each task is described by two integers: enqueueTime
(when the task becomes available) and processingTime
(how long it takes to process). Only one task can be processed at a time (single-threaded CPU), and the CPU can only process tasks that have arrived (i.e., their enqueueTime
is less than or equal to the current time).
The CPU should always select the available task with the shortest processingTime
to run next. If there are multiple tasks with the same processingTime
, choose the one with the smallest original index.
Your goal is to return the order in which the CPU will process the tasks, as an array of their original indices.
At first glance, it might seem natural to simulate the CPU step by step, checking all available tasks at each time unit and picking the one with the smallest processingTime
. However, this brute-force approach could be very slow, especially if there are many tasks or large time gaps.
To optimize, we need a way to efficiently keep track of which tasks are available at the current time and quickly select the one with the shortest processing time (and smallest index for ties). This suggests using a priority queue (min-heap), which allows us to always grab the "best" task in O(log n) time.
Additionally, since tasks can arrive at different times, we should process them in order of their enqueueTime
and "push" them into our heap as soon as they become available.
enqueueTime
. This lets us process them in the order they become available.
processingTime
, originalIndex
). This way, the heap always gives us the next task to process.
time
variable representing the current time.enqueueTime
≤ time
.time
), and record its index.time
forward to the next task's enqueueTime
.This approach ensures we always process the available task with the shortest processing time, efficiently handling time jumps and tie-breaking by index.
Let's walk through an example with tasks = [[1,2],[2,4],[3,2],[4,1]]
.
[(1,2,0), (2,4,1), (3,2,2), (4,1,3)]
time = 0
, heap = []
, i = 0
time = 1
: Task 0 arrives, push (2,0) to heap.time = 3
, result = [0].time = 3
: Task 1 and Task 2 have arrived. Push (4,1) and (2,2).time = 5
, result = [0,2].time = 5
: Task 3 has arrived. Push (1,3).time = 6
, result = [0,2,3].time = 10
, result = [0,2,3,1].Final order: [0, 2, 3, 1]
The optimized solution is efficient and scales well for large inputs.
The key to efficiently solving the Single-Threaded CPU problem is to simulate the CPU's operation using a min-heap to always select the next best available task. By sorting tasks by enqueue time and using a heap for quick selection, we ensure the CPU always processes the optimal task at each step. This strategy is both elegant and efficient, reducing a potentially quadratic problem to O(n log n) time.