You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

cache.py 5.0 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162
  1. """
  2. Python module wrapper for _functools C module
  3. to allow utilities written in Python to be added
  4. to the functools module.
  5. Written by Nick Coghlan <ncoghlan at gmail.com>,
  6. Raymond Hettinger <python at rcn.com>,
  7. and Łukasz Langa <lukasz at langa.pl>.
  8. Copyright (C) 2006-2013 Python Software Foundation.
  9. See C source code for _functools credits/copyright
  10. Modified from
  11. https://github.com/python/cpython/blob/3.12/Lib/functools.py
  12. """
  13. from typing import Callable, Generic, TypeVar
  14. K = TypeVar("K")
  15. T = TypeVar("T")
  16. PREV, NEXT, KEY, RESULT = 0, 1, 2, 3 # names for the link fields
  17. class Cache(Generic[K, T]):
  18. """
  19. A generic caching mechanism that stores the results of a function call and
  20. retrieves them to avoid repeated calculations.
  21. This class implements a dictionary-based cache with a circular doubly linked
  22. list to manage the cache entries efficiently. It is designed to be generic,
  23. allowing for caching of any callable function.
  24. Parameters
  25. ----------
  26. func : Callable[[K], T]
  27. The function to be cached. This function takes an argument of type K and
  28. returns a value of type T.
  29. """
  30. def __init__(self, func: Callable[[K], T]):
  31. self.func = func
  32. self.has_init = False
  33. self.cache = False
  34. self.cache_dict = {}
  35. self.key_func = None
  36. self.max_size = 0
  37. self.hits, self.misses = 0, 0
  38. self.full = False
  39. self.root = [] # root of the circular doubly linked list
  40. self.root[:] = [self.root, self.root, None, None]
  41. def __getitem__(self, obj, *args) -> T:
  42. return self.get_from_dict(obj, *args)
  43. def clear_cache(self):
  44. """
  45. Invalidate the entire cache.
  46. """
  47. self.cache_dict.clear()
  48. def init_cache(self, obj):
  49. """
  50. Initialize the cache settings.
  51. Parameters
  52. ----------
  53. obj : Any
  54. The object containing settings for cache initialization.
  55. """
  56. if self.has_init:
  57. return
  58. self.cache = True
  59. self.cache_dict = {}
  60. self.key_func = obj.key_func
  61. self.max_size = obj.cache_size
  62. self.hits, self.misses = 0, 0
  63. self.full = False
  64. self.root = [] # root of the circular doubly linked list
  65. self.root[:] = [self.root, self.root, None, None]
  66. self.has_init = True
  67. def get_from_dict(self, obj, *args) -> T:
  68. """
  69. Retrieve a value from the cache or compute it using ``self.func``.
  70. Parameters
  71. ----------
  72. obj : Any
  73. The object to which the cached method/function belongs.
  74. *args : Any
  75. Arguments used in key generation for cache retrieval or function computation.
  76. Returns
  77. -------
  78. T
  79. The value from the cache or computed by the function.
  80. """
  81. # x is not used in cache key
  82. pred_pseudo_label, y, _x, *res_args = args
  83. cache_key = (self.key_func(pred_pseudo_label), self.key_func(y), *res_args)
  84. link = self.cache_dict.get(cache_key)
  85. if link is not None:
  86. # Move the link to the front of the circular queue
  87. link_prev, link_next, _key, result = link
  88. link_prev[NEXT] = link_next
  89. link_next[PREV] = link_prev
  90. last = self.root[PREV]
  91. last[NEXT] = self.root[PREV] = link
  92. link[PREV] = last
  93. link[NEXT] = self.root
  94. self.hits += 1
  95. return result
  96. self.misses += 1
  97. result = self.func(obj, *args)
  98. if self.full:
  99. # Use the old root to store the new key and result.
  100. oldroot = self.root
  101. oldroot[KEY] = cache_key
  102. oldroot[RESULT] = result
  103. # Empty the oldest link and make it the new root.
  104. self.root = oldroot[NEXT]
  105. oldkey = self.root[KEY]
  106. self.root[KEY] = self.root[RESULT] = None
  107. # Now update the cache dictionary.
  108. del self.cache_dict[oldkey]
  109. self.cache_dict[cache_key] = oldroot
  110. else:
  111. # Put result in a new link at the front of the queue.
  112. last = self.root[PREV]
  113. link = [last, self.root, cache_key, result]
  114. last[NEXT] = self.root[PREV] = self.cache_dict[cache_key] = link
  115. if isinstance(self.max_size, int):
  116. self.full = len(self.cache_dict) >= self.max_size
  117. return result
  118. def abl_cache():
  119. """
  120. Decorator to enable caching for a function.
  121. Returns
  122. -------
  123. Callable
  124. The wrapped function with caching capability.
  125. """
  126. def decorator(func):
  127. cache_instance = Cache(func)
  128. def wrapper(obj, *args):
  129. if obj.use_cache:
  130. cache_instance.init_cache(obj)
  131. return cache_instance.get_from_dict(obj, *args)
  132. return func(obj, *args)
  133. return wrapper
  134. return decorator

An efficient Python toolkit for Abductive Learning (ABL), a novel paradigm that integrates machine learning and logical reasoning in a unified framework.