from collections import defaultdict, OrderedDict
class LFUCache:
def __init__(self, capacity: int):
self.cap = capacity
self.key_to_val = {}
self.key_to_freq = {}
self.freq_to_keys = defaultdict(OrderedDict)
self.min_freq = 0
def get(self, key: int) -> int:
if key not in self.key_to_val:
return -1
self._increase_freq(key)
return self.key_to_val[key]
def put(self, key: int, value: int) -> None:
if self.cap == 0:
return
if key in self.key_to_val:
self.key_to_val[key] = value
self._increase_freq(key)
return
if len(self.key_to_val) >= self.cap:
evict_key, _ = self.freq_to_keys[self.min_freq].popitem(last=False)
del self.key_to_val[evict_key]
del self.key_to_freq[evict_key]
self.key_to_val[key] = value
self.key_to_freq[key] = 1
self.freq_to_keys[1][key] = None
self.min_freq = 1
def _increase_freq(self, key):
freq = self.key_to_freq[key]
self.freq_to_keys[freq].pop(key)
if not self.freq_to_keys[freq]:
del self.freq_to_keys[freq]
if self.min_freq == freq:
self.min_freq += 1
self.key_to_freq[key] = freq + 1
self.freq_to_keys[freq + 1][key] = None
#include <unordered_map>
#include <list>
class LFUCache {
int cap, minFreq;
struct Node {
int key, val, freq;
Node(int k, int v, int f) : key(k), val(v), freq(f) {}
};
unordered_map<int, list<Node>::iterator> keyToNode;
unordered_map<int, list<Node>> freqToNodes;
public:
LFUCache(int capacity) {
cap = capacity;
minFreq = 0;
}
int get(int key) {
if (keyToNode.find(key) == keyToNode.end()) return -1;
auto node = keyToNode[key];
int val = node->val, freq = node->freq;
freqToNodes[freq].erase(node);
if (freqToNodes[freq].empty()) {
freqToNodes.erase(freq);
if (minFreq == freq) minFreq++;
}
freqToNodes[freq + 1].push_front(Node(key, val, freq + 1));
keyToNode[key] = freqToNodes[freq + 1].begin();
return val;
}
void put(int key, int value) {
if (cap == 0) return;
if (get(key) != -1) {
keyToNode[key]->val = value;
return;
}
if (keyToNode.size() == cap) {
auto node = freqToNodes[minFreq].back();
keyToNode.erase(node.key);
freqToNodes[minFreq].pop_back();
if (freqToNodes[minFreq].empty()) freqToNodes.erase(minFreq);
}
freqToNodes[1].push_front(Node(key, value, 1));
keyToNode[key] = freqToNodes[1].begin();
minFreq = 1;
}
};
import java.util.*;
class LFUCache {
private int cap, minFreq;
private Map<Integer, Integer> keyToVal;
private Map<Integer, Integer> keyToFreq;
private Map<Integer, LinkedHashSet<Integer>> freqToKeys;
public LFUCache(int capacity) {
cap = capacity;
minFreq = 0;
keyToVal = new HashMap<>();
keyToFreq = new HashMap<>();
freqToKeys = new HashMap<>();
}
public int get(int key) {
if (!keyToVal.containsKey(key)) return -1;
increaseFreq(key);
return keyToVal.get(key);
}
public void put(int key, int value) {
if (cap == 0) return;
if (keyToVal.containsKey(key)) {
keyToVal.put(key, value);
increaseFreq(key);
return;
}
if (keyToVal.size() >= cap) {
int evict = freqToKeys.get(minFreq).iterator().next();
freqToKeys.get(minFreq).remove(evict);
keyToVal.remove(evict);
keyToFreq.remove(evict);
}
keyToVal.put(key, value);
keyToFreq.put(key, 1);
freqToKeys.computeIfAbsent(1, k -> new LinkedHashSet<>()).add(key);
minFreq = 1;
}
private void increaseFreq(int key) {
int freq = keyToFreq.get(key);
freqToKeys.get(freq).remove(key);
if (freqToKeys.get(freq).isEmpty()) {
freqToKeys.remove(freq);
if (minFreq == freq) minFreq++;
}
keyToFreq.put(key, freq + 1);
freqToKeys.computeIfAbsent(freq + 1, k -> new LinkedHashSet<>()).add(key);
}
}
class LFUCache {
constructor(capacity) {
this.cap = capacity;
this.minFreq = 0;
this.keyToVal = new Map();
this.keyToFreq = new Map();
this.freqToKeys = new Map();
}
get(key) {
if (!this.keyToVal.has(key)) return -1;
this._increaseFreq(key);
return this.keyToVal.get(key);
}
put(key, value) {
if (this.cap === 0) return;
if (this.keyToVal.has(key)) {
this.keyToVal.set(key, value);
this._increaseFreq(key);
return;
}
if (this.keyToVal.size >= this.cap) {
let keys = this.freqToKeys.get(this.minFreq);
let evictKey = keys.keys().next().value;
keys.delete(evictKey);
this.keyToVal.delete(evictKey);
this.keyToFreq.delete(evictKey);
}
this.keyToVal.set(key, value);
this.keyToFreq.set(key, 1);
if (!this.freqToKeys.has(1)) this.freqToKeys.set(1, new Set());
this.freqToKeys.get(1).add(key);
this.minFreq = 1;
}
_increaseFreq(key) {
let freq = this.keyToFreq.get(key);
this.freqToKeys.get(freq).delete(key);
if (this.freqToKeys.get(freq).size === 0) {
this.freqToKeys.delete(freq);
if (this.minFreq === freq) this.minFreq++;
}
this.keyToFreq.set(key, freq + 1);
if (!this.freqToKeys.has(freq + 1)) this.freqToKeys.set(freq + 1, new Set());
this.freqToKeys.get(freq + 1).add(key);
}
}
The LFU (Least Frequently Used) Cache problem asks you to design and implement a data structure that supports the following operations:
get(key): Retrieve the value of the key if it exists in the cache. Otherwise, return -1.put(key, value): Set or insert the value if the key is not already present. When the cache reaches its capacity, it should invalidate and remove the least frequently used item before inserting a new item. If there are multiple keys with the same minimum frequency, remove the least recently used one among them.
The cache should be initialized with a positive integer capacity. You must ensure that both get and put operations run in constant time, on average (O(1)).
Key constraints:
At first glance, the LFU Cache problem might seem similar to the LRU (Least Recently Used) Cache problem, but with an added twist: instead of evicting based on recency alone, we must also track how frequently each key is accessed.
A brute-force approach would be to store all keys and counts in a list or array, and every time we need to evict, scan through all keys to find the one with the lowest frequency. However, this is inefficient and fails to meet the constant time requirement.
To optimize, we need:
O(1)).O(1)).O(1)).This leads us to combine multiple data structures: hash maps for fast lookup, and ordered structures (like linked lists or ordered dictionaries) for tracking recency within each frequency.
The LFU Cache can be implemented efficiently by combining several hash maps and ordered data structures:
key.O(1) access for get and put operations.key has been accessed.Step-by-step Algorithm:
minFreq.get).minFreq).minFreq to 1.By using hash maps and ordered sets/lists, all operations can be performed in average constant time.
Let's walk through an example with capacity = 2 and the following sequence:
put(1, 1)put(2, 2)get(1) → returns 1put(3, 3)get(2) → returns -1get(3) → returns 3get(1) → returns 1put(4, 4)get(3) → returns -1get(4) → returns 4get(1) → returns 1Step-by-step:
This example demonstrates how the cache tracks both frequency and recency to decide which element to evict.
Brute-force Approach:
get or put operation could require scanning all keys to find the LFU/LRU key, leading to O(n) time per operation (where n is the number of keys in the cache).get, put, frequency update, and eviction) are O(1) on average.O(capacity), as we only store up to capacity items and associated metadata.The efficiency comes from the combination of data structures that enable fast lookup, update, and eviction.
The LFU Cache problem is a classic example of combining multiple data structures to meet strict performance requirements. By mapping keys to values and frequencies, and grouping keys by frequency with order-preserving structures, we can efficiently track both frequency and recency. The key insight is to always evict the least frequently used key, and among ties, the least recently used one. This approach ensures all operations run in constant time, making the solution both elegant and practical for real-world caching scenarios.