Want Help Cracking FAANG?

(Then click this)

×
Back to Question Bank

460. LFU Cache - Leetcode Solution

Code Implementation

from collections import defaultdict, OrderedDict

class LFUCache:

    def __init__(self, capacity: int):
        self.cap = capacity
        self.key_to_val = {}
        self.key_to_freq = {}
        self.freq_to_keys = defaultdict(OrderedDict)
        self.min_freq = 0

    def get(self, key: int) -> int:
        if key not in self.key_to_val:
            return -1
        self._increase_freq(key)
        return self.key_to_val[key]

    def put(self, key: int, value: int) -> None:
        if self.cap == 0:
            return

        if key in self.key_to_val:
            self.key_to_val[key] = value
            self._increase_freq(key)
            return

        if len(self.key_to_val) >= self.cap:
            evict_key, _ = self.freq_to_keys[self.min_freq].popitem(last=False)
            del self.key_to_val[evict_key]
            del self.key_to_freq[evict_key]

        self.key_to_val[key] = value
        self.key_to_freq[key] = 1
        self.freq_to_keys[1][key] = None
        self.min_freq = 1

    def _increase_freq(self, key):
        freq = self.key_to_freq[key]
        self.freq_to_keys[freq].pop(key)
        if not self.freq_to_keys[freq]:
            del self.freq_to_keys[freq]
            if self.min_freq == freq:
                self.min_freq += 1
        self.key_to_freq[key] = freq + 1
        self.freq_to_keys[freq + 1][key] = None
      
#include <unordered_map>
#include <list>

class LFUCache {
    int cap, minFreq;
    struct Node {
        int key, val, freq;
        Node(int k, int v, int f) : key(k), val(v), freq(f) {}
    };
    unordered_map<int, list<Node>::iterator> keyToNode;
    unordered_map<int, list<Node>> freqToNodes;
public:
    LFUCache(int capacity) {
        cap = capacity;
        minFreq = 0;
    }
    
    int get(int key) {
        if (keyToNode.find(key) == keyToNode.end()) return -1;
        auto node = keyToNode[key];
        int val = node->val, freq = node->freq;
        freqToNodes[freq].erase(node);
        if (freqToNodes[freq].empty()) {
            freqToNodes.erase(freq);
            if (minFreq == freq) minFreq++;
        }
        freqToNodes[freq + 1].push_front(Node(key, val, freq + 1));
        keyToNode[key] = freqToNodes[freq + 1].begin();
        return val;
    }
    
    void put(int key, int value) {
        if (cap == 0) return;
        if (get(key) != -1) {
            keyToNode[key]->val = value;
            return;
        }
        if (keyToNode.size() == cap) {
            auto node = freqToNodes[minFreq].back();
            keyToNode.erase(node.key);
            freqToNodes[minFreq].pop_back();
            if (freqToNodes[minFreq].empty()) freqToNodes.erase(minFreq);
        }
        freqToNodes[1].push_front(Node(key, value, 1));
        keyToNode[key] = freqToNodes[1].begin();
        minFreq = 1;
    }
};
      
import java.util.*;

class LFUCache {
    private int cap, minFreq;
    private Map<Integer, Integer> keyToVal;
    private Map<Integer, Integer> keyToFreq;
    private Map<Integer, LinkedHashSet<Integer>> freqToKeys;

    public LFUCache(int capacity) {
        cap = capacity;
        minFreq = 0;
        keyToVal = new HashMap<>();
        keyToFreq = new HashMap<>();
        freqToKeys = new HashMap<>();
    }
    
    public int get(int key) {
        if (!keyToVal.containsKey(key)) return -1;
        increaseFreq(key);
        return keyToVal.get(key);
    }
    
    public void put(int key, int value) {
        if (cap == 0) return;
        if (keyToVal.containsKey(key)) {
            keyToVal.put(key, value);
            increaseFreq(key);
            return;
        }
        if (keyToVal.size() >= cap) {
            int evict = freqToKeys.get(minFreq).iterator().next();
            freqToKeys.get(minFreq).remove(evict);
            keyToVal.remove(evict);
            keyToFreq.remove(evict);
        }
        keyToVal.put(key, value);
        keyToFreq.put(key, 1);
        freqToKeys.computeIfAbsent(1, k -> new LinkedHashSet<>()).add(key);
        minFreq = 1;
    }
    
    private void increaseFreq(int key) {
        int freq = keyToFreq.get(key);
        freqToKeys.get(freq).remove(key);
        if (freqToKeys.get(freq).isEmpty()) {
            freqToKeys.remove(freq);
            if (minFreq == freq) minFreq++;
        }
        keyToFreq.put(key, freq + 1);
        freqToKeys.computeIfAbsent(freq + 1, k -> new LinkedHashSet<>()).add(key);
    }
}
      
class LFUCache {
    constructor(capacity) {
        this.cap = capacity;
        this.minFreq = 0;
        this.keyToVal = new Map();
        this.keyToFreq = new Map();
        this.freqToKeys = new Map();
    }

    get(key) {
        if (!this.keyToVal.has(key)) return -1;
        this._increaseFreq(key);
        return this.keyToVal.get(key);
    }

    put(key, value) {
        if (this.cap === 0) return;
        if (this.keyToVal.has(key)) {
            this.keyToVal.set(key, value);
            this._increaseFreq(key);
            return;
        }
        if (this.keyToVal.size >= this.cap) {
            let keys = this.freqToKeys.get(this.minFreq);
            let evictKey = keys.keys().next().value;
            keys.delete(evictKey);
            this.keyToVal.delete(evictKey);
            this.keyToFreq.delete(evictKey);
        }
        this.keyToVal.set(key, value);
        this.keyToFreq.set(key, 1);
        if (!this.freqToKeys.has(1)) this.freqToKeys.set(1, new Set());
        this.freqToKeys.get(1).add(key);
        this.minFreq = 1;
    }

    _increaseFreq(key) {
        let freq = this.keyToFreq.get(key);
        this.freqToKeys.get(freq).delete(key);
        if (this.freqToKeys.get(freq).size === 0) {
            this.freqToKeys.delete(freq);
            if (this.minFreq === freq) this.minFreq++;
        }
        this.keyToFreq.set(key, freq + 1);
        if (!this.freqToKeys.has(freq + 1)) this.freqToKeys.set(freq + 1, new Set());
        this.freqToKeys.get(freq + 1).add(key);
    }
}
      

Problem Description

The LFU (Least Frequently Used) Cache problem asks you to design and implement a data structure that supports the following operations:

  • get(key): Retrieve the value of the key if it exists in the cache. Otherwise, return -1.
  • put(key, value): Set or insert the value if the key is not already present. When the cache reaches its capacity, it should invalidate and remove the least frequently used item before inserting a new item. If there are multiple keys with the same minimum frequency, remove the least recently used one among them.

The cache should be initialized with a positive integer capacity. You must ensure that both get and put operations run in constant time, on average (O(1)).

Key constraints:

  • There is only one valid way to evict keys: always evict the least frequently used key, and if there is a tie, evict the least recently used among them.
  • Do not reuse elements after they have been evicted.
  • All keys and values are integers.

Thought Process

At first glance, the LFU Cache problem might seem similar to the LRU (Least Recently Used) Cache problem, but with an added twist: instead of evicting based on recency alone, we must also track how frequently each key is accessed.

A brute-force approach would be to store all keys and counts in a list or array, and every time we need to evict, scan through all keys to find the one with the lowest frequency. However, this is inefficient and fails to meet the constant time requirement.

To optimize, we need:

  • Quick lookup of a key's value (O(1)).
  • Quick update of a key's frequency and recency (O(1)).
  • Quick access to the least frequently used (and least recently used among ties) key (O(1)).

This leads us to combine multiple data structures: hash maps for fast lookup, and ordered structures (like linked lists or ordered dictionaries) for tracking recency within each frequency.

Solution Approach

The LFU Cache can be implemented efficiently by combining several hash maps and ordered data structures:

  1. Key to Value Map:
    • Stores the actual value for each key.
    • Allows O(1) access for get and put operations.
  2. Key to Frequency Map:
    • Tracks how many times each key has been accessed.
    • Allows quick frequency update when a key is accessed or updated.
  3. Frequency to Keys Map:
    • For each frequency, keeps an ordered set or list of keys with that frequency.
    • Within each frequency, maintains the order of insertion or access to track recency.
    • Allows us to find and evict the least recently used key among those with the lowest frequency.
  4. Minimum Frequency Tracker:
    • Keeps track of the minimum frequency present in the cache at any time.
    • When evicting, always look at this frequency group.

Step-by-step Algorithm:

  1. Get Operation:
    • If the key is not present, return -1.
    • If present, increase its frequency by 1.
    • Move the key to the next frequency group, updating its recency.
    • If the previous frequency group is empty and was the minimum, increment minFreq.
  2. Put Operation:
    • If the cache has zero capacity, do nothing.
    • If the key is already present, update its value and increase its frequency (as in get).
    • If the cache is at capacity, evict the least recently used key among those with the lowest frequency (minFreq).
    • Insert the new key with frequency 1, and set minFreq to 1.

By using hash maps and ordered sets/lists, all operations can be performed in average constant time.

Example Walkthrough

Let's walk through an example with capacity = 2 and the following sequence:

  • put(1, 1)
  • put(2, 2)
  • get(1) → returns 1
  • put(3, 3)
  • get(2) → returns -1
  • get(3) → returns 3
  • get(1) → returns 1
  • put(4, 4)
  • get(3) → returns -1
  • get(4) → returns 4
  • get(1) → returns 1

Step-by-step:

  1. Insert 1: cache = {1=1} (freq 1)
  2. Insert 2: cache = {1=1, 2=2} (freq 1 for both)
  3. Get 1: returns 1, now 1 has freq 2, 2 has freq 1
  4. Insert 3: Cache is full. 2 and 3 both have freq 1, but 2 is LRU, so evict 2. Insert 3 with freq 1. Cache = {1=1 (freq 2), 3=3 (freq 1)}
  5. Get 2: not found, returns -1
  6. Get 3: returns 3, now 3 has freq 2, cache = {1=1 (freq 2), 3=3 (freq 2)}
  7. Get 1: returns 1, now 1 has freq 3
  8. Insert 4: Cache is full. 3 (freq 2) is LFU and LRU among freq 2, so evict 3. Insert 4 with freq 1. Cache = {1=1 (freq 3), 4=4 (freq 1)}
  9. Get 3: not found, returns -1
  10. Get 4: returns 4, now 4 has freq 2
  11. Get 1: returns 1, now 1 has freq 4

This example demonstrates how the cache tracks both frequency and recency to decide which element to evict.

Time and Space Complexity

Brute-force Approach:

  • Each get or put operation could require scanning all keys to find the LFU/LRU key, leading to O(n) time per operation (where n is the number of keys in the cache).
Optimized Approach:
  • By using hash maps and ordered sets/lists, all main operations (get, put, frequency update, and eviction) are O(1) on average.
  • Space complexity is O(capacity), as we only store up to capacity items and associated metadata.

The efficiency comes from the combination of data structures that enable fast lookup, update, and eviction.

Summary

The LFU Cache problem is a classic example of combining multiple data structures to meet strict performance requirements. By mapping keys to values and frequencies, and grouping keys by frequency with order-preserving structures, we can efficiently track both frequency and recency. The key insight is to always evict the least frequently used key, and among ties, the least recently used one. This approach ensures all operations run in constant time, making the solution both elegant and practical for real-world caching scenarios.