Skip to content

Instantly share code, notes, and snippets.

@orsonadams
Last active March 13, 2020 23:05
Show Gist options
  • Save orsonadams/a5fb180ef12a9d8abce2d96e3a087674 to your computer and use it in GitHub Desktop.
Save orsonadams/a5fb180ef12a9d8abce2d96e3a087674 to your computer and use it in GitHub Desktop.
Not so correct implementation of LRU Cache
# @parsethis
# Basic LRU Cache implementation using a dictionary and a queue
# Goes without saying that you'd not use this, functools.lru_cache is what you want
# TODO, is not threadsafe, not nearly use locking (see functools.lru_cache )
# hashing keys could be flattened, properly hashed
# maybe you want a doubly linked list and not a python queue
#
#
from collections import deque
def lru_cache(maxsize=0):
cache = {}
queue = deque()
if maxsize:
def decorator(f):
def wrapper(*args, **kwargs):
key = (args, tuple(kwargs.items()))
item = cache.get(_args)
if item:
# update the position of the key
# to be at the front of the list
queue.remove(key)
queue.append(key)
return item
else:
if len(queue) == maxsize:
remove = queue.popleft()
cache.pop(remove)
item = f(*args, **kwargs)
cache[key] = item
queue.append(key)
return wrapper
return decorator
else:
def wrapper(*args, **kwargs):
key = (args, tuple(kwargs.items()))
item = cache.get(key)
if item:
queue.remove(key)
queue.append(key)
return item
else:
item = f(*args, **kwargs)
cache[key] = item
return wrapper
@lru_cache(maxsize=2)
def func(*args, **kwargs): return
func(1, 1) # miss
func(1, 1) # hit
func(1, 1) # hit
func(2, 5) # miss
func(3, 4) # miss
func(1, 1) # miss
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment