LRU cache design

后端 未结 11 1894
借酒劲吻你
借酒劲吻你 2020-11-27 09:26

Least Recently Used (LRU) Cache is to discard the least recently used items first How do you design and implement such a cache class? The design requirements are as follows:

相关标签:
11条回答
  • 2020-11-27 09:54

    LRU Page Replacement Technique:

    When a page is referenced, the required page may be in the cache.

    If in the cache: we need to bring it to the front of the cache queue.

    If NOT in the cache: we bring that in cache. In simple words, we add a new page to the front of the cache queue. If the cache is full, i.e. all the frames are full, we remove a page from the rear of cache queue, and add the new page to the front of cache queue.

    # Cache Size
    csize = int(input())
    
    # Sequence of pages 
    pages = list(map(int,input().split()))
    
    # Take a cache list
    cache=[]
    
    # Keep track of number of elements in cache
    n=0
    
    # Count Page Fault
    fault=0
    
    for page in pages:
        # If page exists in cache
        if page in cache:
            # Move the page to front as it is most recent page
            # First remove from cache and then append at front
            cache.remove(page)
            cache.append(page)
        else:
            # Cache is full
            if(n==csize):
                # Remove the least recent page 
                cache.pop(0)
            else:
                # Increment element count in cache
                n=n+1
    
            # Page not exist in cache => Page Fault
            fault += 1
            cache.append(page)
    
    print("Page Fault:",fault)
    

    Input/Output

    Input:
    3
    1 2 3 4 1 2 5 1 2 3 4 5
    
    Output:
    Page Fault: 10
    
    0 讨论(0)
  • 2020-11-27 09:56

    I see here several unnecessary complicated implementations, so I decided to provide my implementation as well. The cache has only two methods, get and set. Hopefully it is better readable and understandable:

    #include<unordered_map>
    #include<list>
    
    using namespace std;
    
    template<typename K, typename V = K>
    class LRUCache
    {
    
    private:
        list<K>items;
        unordered_map <K, pair<V, typename list<K>::iterator>> keyValuesMap;
        int csize;
    
    public:
        LRUCache(int s) :csize(s) {
            if (csize < 1)
                csize = 10;
        }
    
        void set(const K key, const V value) {
            auto pos = keyValuesMap.find(key);
            if (pos == keyValuesMap.end()) {
                items.push_front(key);
                keyValuesMap[key] = { value, items.begin() };
                if (keyValuesMap.size() > csize) {
                    keyValuesMap.erase(items.back());
                    items.pop_back();
                }
            }
            else {
                items.erase(pos->second.second);
                items.push_front(key);
                keyValuesMap[key] = { value, items.begin() };
            }
        }
    
        bool get(const K key, V &value) {
            auto pos = keyValuesMap.find(key);
            if (pos == keyValuesMap.end())
                return false;
            items.erase(pos->second.second);
            items.push_front(key);
            keyValuesMap[key] = { pos->second.first, items.begin() };
            value = pos->second.first;
            return true;
        }
    };
    
    0 讨论(0)
  • 2020-11-27 10:00

    A linked list + hashtable of pointers to the linked list nodes is the usual way to implement LRU caches. This gives O(1) operations (assuming a decent hash). Advantage of this (being O(1)): you can do a multithreaded version by just locking the whole structure. You don't have to worry about granular locking etc.

    Briefly, the way it works:

    On an access of a value, you move the corresponding node in the linked list to the head.

    When you need to remove a value from the cache, you remove from the tail end.

    When you add a value to cache, you just place it at the head of the linked list.

    Thanks to doublep, here is site with a C++ implementation: Miscellaneous Container Templates.

    0 讨论(0)
  • 2020-11-27 10:05

    Here is my implementation for a basic, simple LRU cache.

    //LRU Cache
    #include <cassert>
    #include <list>
    
    template <typename K,
              typename V
              >
    class LRUCache
        {
        // Key access history, most recent at back
        typedef std::list<K> List;
    
        // Key to value and key history iterator
        typedef unordered_map< K,
                               std::pair<
                                         V,
                                         typename std::list<K>::iterator
                                        >
                             > Cache;
    
        typedef V (*Fn)(const K&);
    
    public:
        LRUCache( size_t aCapacity, Fn aFn ) 
            : mFn( aFn )
            , mCapacity( aCapacity )
            {}
    
        //get value for key aKey
        V operator()( const K& aKey )
            {
            typename Cache::iterator it = mCache.find( aKey );
            if( it == mCache.end() ) //cache-miss: did not find the key
                {
                V v = mFn( aKey );
                insert( aKey, v );
                return v;
                }
    
            // cache-hit
            // Update access record by moving accessed key to back of the list
            mList.splice( mList.end(), mList, (it)->second.second );
    
            // return the retrieved value
            return (it)->second.first;
            }
    
    private:
            // insert a new key-value pair in the cache
        void insert( const K& aKey, V aValue )
            {
            //method should be called only when cache-miss happens
            assert( mCache.find( aKey ) == mCache.end() );
    
            // make space if necessary
            if( mList.size() == mCapacity )
                {
                evict();
                }
    
            // record k as most-recently-used key
            typename std::list<K>::iterator it = mList.insert( mList.end(), aKey );
    
            // create key-value entry, linked to the usage record
            mCache.insert( std::make_pair( aKey, std::make_pair( aValue, it ) ) );
            }
    
            //Purge the least-recently used element in the cache
        void evict()
            {
            assert( !mList.empty() );
    
            // identify least-recently-used key
            const typename Cache::iterator it = mCache.find( mList.front() );
    
            //erase both elements to completely purge record
            mCache.erase( it );
            mList.pop_front();
            }
    
    private:
        List mList;
        Cache mCache;
        Fn mFn;
        size_t mCapacity;
        };
    
    0 讨论(0)
  • 2020-11-27 10:06

    Is cache a data structure that supports retrieval value by key like hash table? LRU means the cache has certain size limitation that we need drop least used entries periodically.

    If you implement with linked-list + hashtable of pointers how can you do O(1) retrieval of value by key?

    I would implement LRU cache with a hash table that the value of each entry is value + pointers to prev/next entry.

    Regarding the multi-threading access, I would prefer reader-writer lock (ideally implemented by spin lock since contention is usually fast) to monitor.

    0 讨论(0)
  • 2020-11-27 10:10

    Detailed explanation here in my blogpost.

    class LRUCache {
      constructor(capacity) {
        
            this.head = null;
            this.tail = null;
            this.capacity = capacity;
            this.count = 0;
        this.hashMap  = new Map();    
      }
     
      get(key) {
        var node = this.hashMap.get(key);
        if(node) {
          if(node == this.head) {
            // node is already at the head, just return the value
            return node.val;
          }      
          if(this.tail == node && this.tail.prev) {
            // if the node is at the tail,
            // set tail to the previous node if it exists.
            this.tail = this.tail.prev;
            this.tail.next = null;
          }
          // link neibouring nodes together
          if(node.prev)
            node.prev.next = node.next;
          if(node.next)
            node.next.prev = node.prev;      
          // add the new head node
          node.prev = null;
          node.next = this.head;
          this.head.prev = node;
          this.head = node;
          return node.val;
        }
        return -1;
      }
      put(key, val) {
        this.count ++;
        var newNode = { key, val, prev: null, next: null };
        if(this.head == null) {
          // this.hashMap is empty creating new node
          this.head =  newNode;
          this.tail = newNode;
        }
        else {
          var oldNode = this.hashMap.get(key);
          if(oldNode) {
            // if node with the same key exists, 
            // clear prev and next pointers before deleting the node.
            if(oldNode.next) {
              if(oldNode.prev)
                oldNode.next.prev = oldNode.prev;
              else
                this.head = oldNode.next;
            }
            if(oldNode.prev) {          
              oldNode.prev.next = oldNode.next;
              if(oldNode == this.tail)
                this.tail = oldNode.prev;
            }
            // removing the node
            this.hashMap.delete(key);
            this.count --;        
          }
          // adding the new node and set up the pointers to it's neibouring nodes      
          var currentHead = this.head;
          currentHead.prev = newNode;        
          newNode.next = currentHead;
          this.head = newNode;
          if(this.tail == null)
            this.tail = currentHead;
          if(this.count == this.capacity + 1) {
            // remove last nove if over capacity
            var lastNode = this.tail;
            this.tail = lastNode.prev;
            if(!this.tail) {
              //debugger;
            }
            this.tail.next = null;
            this.hashMap.delete(lastNode.key);
            this.count --;
          }
        }
        this.hashMap.set(key, newNode);
        return null;
      }
    }
    
    var cache = new LRUCache(3);
    cache.put(1,1); // 1
    cache.put(2,2); // 2,1
    cache.put(3,3); // 3,2,1
    
    console.log( cache.get(2) ); // 2,3,1
    console.log( cache.get(1) ); // 1,2,3
    cache.put(4,4);              // 4,1,2 evicts 3
    console.log( cache.get(3) ); // 3 is no longer in cache

    0 讨论(0)
提交回复
热议问题