Ansel 0.0
A darktable fork - bloat + design vision
Loading...
Searching...
No Matches
common/cache.c
Go to the documentation of this file.
1/*
2 This file is part of darktable,
3 Copyright (C) 2011-2021 darktable developers.
4
5 darktable is free software: you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation, either version 3 of the License, or
8 (at your option) any later version.
9
10 darktable is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with darktable. If not, see <http://www.gnu.org/licenses/>.
17*/
18
19#include "config.h"
20
21#include "common/cache.h"
22#include "common/darktable.h"
23#include "common/dtpthread.h"
24
25#include <assert.h>
26#include <inttypes.h>
27#include <stdio.h>
28#include <stdlib.h>
29
30// this implements a concurrent LRU cache
31
33 dt_cache_t *cache,
34 size_t entry_size,
35 size_t cost_quota)
36{
37 cache->cost = 0;
38 cache->lru = 0;
39 cache->entry_size = entry_size;
40 cache->cost_quota = cost_quota;
41 dt_pthread_mutex_init(&cache->lock, 0);
42 cache->allocate = 0;
43 cache->allocate_data = 0;
44 cache->cleanup = 0;
45 cache->cleanup_data = 0;
46 cache->hashtable = g_hash_table_new(0, 0);
47}
48
50{
51 g_hash_table_destroy(cache->hashtable);
52 for(GList *l = cache->lru; l; l = g_list_next(l))
53 {
55
56 if(cache->cleanup)
57 {
58 assert(entry->data_size);
60
61 cache->cleanup(cache->cleanup_data, entry);
62 }
63 else
64 dt_free_align(entry->data);
65
67 g_slice_free1(sizeof(*entry), entry);
68 }
69 g_list_free(cache->lru);
71}
72
73int32_t dt_cache_contains(dt_cache_t *cache, const uint32_t key)
74{
76 int32_t result = g_hash_table_contains(cache->hashtable, GINT_TO_POINTER(key));
78 return result;
79}
80
82 dt_cache_t *cache,
83 int (*process)(const uint32_t key, const void *data, void *user_data),
84 void *user_data)
85{
87 GHashTableIter iter;
88 gpointer key, value;
89
90 g_hash_table_iter_init (&iter, cache->hashtable);
91 while (g_hash_table_iter_next (&iter, &key, &value))
92 {
93 dt_cache_entry_t *entry = (dt_cache_entry_t *)value;
94 const int err = process(GPOINTER_TO_INT(key), entry->data, user_data);
95 if(err)
96 {
98 return err;
99 }
100 }
102 return 0;
103}
104
105// return read locked bucket, or NULL if it's not already there.
106// never attempt to allocate a new slot.
107dt_cache_entry_t *dt_cache_testget(dt_cache_t *cache, const uint32_t key, char mode)
108{
109 gpointer orig_key, value;
110 gboolean res;
112 res = g_hash_table_lookup_extended(
113 cache->hashtable, GINT_TO_POINTER(key), &orig_key, &value);
114 if(res)
115 {
116 dt_cache_entry_t *entry = (dt_cache_entry_t *)value;
117 // lock the cache entry
118 const int result
119 = (mode == 'w') ? dt_pthread_rwlock_trywrlock(&entry->lock) : dt_pthread_rwlock_tryrdlock(&entry->lock);
120 if(result)
121 { // need to give up mutex so other threads have a chance to get in between and
122 // free the lock we're trying to acquire:
124 return 0;
125 }
126 // bubble up in lru list:
127 cache->lru = g_list_remove_link(cache->lru, entry->link);
128 cache->lru = g_list_concat(cache->lru, entry->link);
130
131 if(mode == 'w')
132 {
133 assert(entry->data_size);
135 }
136
137 // WARNING: do *NOT* unpoison here. it must be done by the caller!
138
139 return entry;
140 }
142 return 0;
143}
144
145// if found, the data void* is returned. if not, it is set to be
146// the given *data and a new hash table entry is created, which can be
147// found using the given key later on.
148dt_cache_entry_t *dt_cache_get_with_caller(dt_cache_t *cache, const uint32_t key, char mode, const char *file, int line)
149{
150 gpointer orig_key, value;
151 gboolean res;
152 int result;
153restart:
155 res = g_hash_table_lookup_extended(
156 cache->hashtable, GINT_TO_POINTER(key), &orig_key, &value);
157 if(res)
158 { // yay, found. read lock and pass on.
159 dt_cache_entry_t *entry = (dt_cache_entry_t *)value;
160 if(mode == 'w') result = dt_pthread_rwlock_trywrlock_with_caller(&entry->lock, file, line);
161 else result = dt_pthread_rwlock_tryrdlock_with_caller(&entry->lock, file, line);
162 if(result)
163 { // need to give up mutex so other threads have a chance to get in between and
164 // free the lock we're trying to acquire:
166 g_usleep(5);
167 goto restart;
168 }
169 // bubble up in lru list:
170 cache->lru = g_list_remove_link(cache->lru, entry->link);
171 cache->lru = g_list_concat(cache->lru, entry->link);
173
174#ifdef _DEBUG
175 const pthread_t writer = dt_pthread_rwlock_get_writer(&entry->lock);
176 if(mode == 'w')
177 {
178 assert(pthread_equal(writer, pthread_self()));
179 }
180 else
181 {
182 assert(!pthread_equal(writer, pthread_self()));
183 }
184#endif
185
186 if(mode == 'w')
187 {
188 assert(entry->data_size);
190 }
191
192 // WARNING: do *NOT* unpoison here. it must be done by the caller!
193
194 return entry;
195 }
196
197 // else, not found, need to allocate.
198
199 // first try to clean up.
200 // also wait if we can't free more than the requested fill ratio.
201 if(cache->cost > 0.8f * cache->cost_quota)
202 {
203 // need to roll back all the way to get a consistent lock state:
204 dt_cache_gc(cache, 0.8f);
205 }
206
207 // here dies your 32-bit system:
208 dt_cache_entry_t *entry = (dt_cache_entry_t *)g_slice_alloc(sizeof(dt_cache_entry_t));
209 entry->data = 0;
210 entry->data_size = cache->entry_size;
211 entry->cost = 1;
212 entry->link = g_list_append(0, entry);
213 entry->key = key;
214 entry->_lock_demoting = 0;
215
216 if(cache->allocate)
217 cache->allocate(cache->allocate_data, entry);
218 else
219 entry->data = dt_alloc_align(entry->data_size);
220
221 if(!entry->data)
222 {
223 g_free(entry);
225 return NULL;
226 }
227
228 int ret = dt_pthread_rwlock_init(&entry->lock, 0);
229 if(ret) fprintf(stderr, "rwlock init: %d\n", ret);
230
231 g_hash_table_insert(cache->hashtable, GINT_TO_POINTER(key), entry);
232 assert(cache->allocate || entry->data_size);
233
234 assert(entry->data_size);
236
237 // if allocate callback is given, always return a write lock
238 const int write = ((mode == 'w') || cache->allocate);
239
240 // write lock in case the caller requests it:
241 if(write) dt_pthread_rwlock_wrlock_with_caller(&entry->lock, file, line);
242 else dt_pthread_rwlock_rdlock_with_caller(&entry->lock, file, line);
243
244 cache->cost += entry->cost;
245
246 // put at end of lru list (most recently used):
247 cache->lru = g_list_concat(cache->lru, entry->link);
248
250 // WARNING: do *NOT* unpoison here. it must be done by the caller!
251
252 return entry;
253}
254
255int dt_cache_remove(dt_cache_t *cache, const uint32_t key)
256{
257 gpointer orig_key, value;
258 gboolean res;
259 int result;
260 dt_cache_entry_t *entry;
261restart:
263
264 res = g_hash_table_lookup_extended(
265 cache->hashtable, GINT_TO_POINTER(key), &orig_key, &value);
266 entry = (dt_cache_entry_t *)value;
267 if(!res)
268 { // not found in cache, not deleting.
270 return 1;
271 }
272 // need write lock to be able to delete:
273 result = dt_pthread_rwlock_trywrlock(&entry->lock);
274 if(result)
275 {
277 g_usleep(5);
278 goto restart;
279 }
280
281 if(entry->_lock_demoting)
282 {
283 // oops, we are currently demoting (rw -> r) lock to this entry in some thread. do not touch!
286 g_usleep(5);
287 goto restart;
288 }
289
290 gboolean removed = g_hash_table_remove(cache->hashtable, GINT_TO_POINTER(key));
291 (void)removed; // make non-assert compile happy
292 assert(removed);
293 cache->lru = g_list_delete_link(cache->lru, entry->link);
294
295 if(cache->cleanup)
296 {
297 assert(entry->data_size);
299
300 cache->cleanup(cache->cleanup_data, entry);
301 }
302 else
303 dt_free_align(entry->data);
304
307 cache->cost -= entry->cost;
308 g_slice_free1(sizeof(*entry), entry);
309
311 return 0;
312}
313
314// best-effort garbage collection. never blocks, never fails. well, sometimes it just doesn't free anything.
315void dt_cache_gc(dt_cache_t *cache, const float fill_ratio)
316{
317 GList *l = cache->lru;
318 while(l)
319 {
321 assert(entry->link->data == entry);
322 l = g_list_next(l); // we might remove this element, so walk to the next one while we still have the pointer..
323 if(cache->cost < cache->cost_quota * fill_ratio) break;
324
325 // if still locked by anyone else give up:
326 if(dt_pthread_rwlock_trywrlock(&entry->lock)) continue;
327
328 if(entry->_lock_demoting)
329 {
330 // oops, we are currently demoting (rw -> r) lock to this entry in some thread. do not touch!
332 continue;
333 }
334
335 // delete!
336 g_hash_table_remove(cache->hashtable, GINT_TO_POINTER(entry->key));
337 cache->lru = g_list_delete_link(cache->lru, entry->link);
338 cache->cost -= entry->cost;
339
340 if(cache->cleanup)
341 {
342 assert(entry->data_size);
344
345 cache->cleanup(cache->cleanup_data, entry);
346 }
347 else
348 dt_free_align(entry->data);
349
352 g_slice_free1(sizeof(*entry), entry);
353 }
354}
355
356void dt_cache_release_with_caller(dt_cache_t *cache, dt_cache_entry_t *entry, const char *file, int line)
357{
358#if((__has_feature(address_sanitizer) || defined(__SANITIZE_ADDRESS__)) && 1)
359 // yes, this is *HIGHLY* unportable and is accessing implementation details.
360#ifdef _DEBUG
361
362# if defined(HAVE_THREAD_RWLOCK_ARCH_T_READERS)
363 if (entry->lock.lock.__data.__readers <= 1)
364# elif defined(HAVE_THREAD_RWLOCK_ARCH_T_NR_READERS)
365 if (entry->lock.lock.__data.__nr_readers <= 1)
366# else /* HAVE_THREAD_RWLOCK_ARCH_T_(NR_)READERS */
367# error "No valid reader member"
368# endif /* HAVE_THREAD_RWLOCK_ARCH_T_(NR_)READERS */
369
370#else /* _DEBUG */
371
372# if defined(HAVE_THREAD_RWLOCK_ARCH_T_READERS)
373 if (entry->lock.__data.__readers <= 1)
374# elif defined(HAVE_THREAD_RWLOCK_ARCH_T_NR_READERS)
375 if(entry->lock.__data.__nr_readers <= 1)
376# else /* HAVE_THREAD_RWLOCK_ARCH_T_(NR_)READERS */
377# error "No valid reader member"
378# endif /* HAVE_THREAD_RWLOCK_ARCH_T_(NR_)READERS */
379
380#endif /* _DEBUG */
381 {
382 // only if there are no other reades we may poison.
383 assert(entry->data_size);
385 }
386#endif
387
389}
390
391// clang-format off
392// modelines: These editor modelines have been set for all relevant files by tools/update_modelines.py
393// vim: shiftwidth=2 expandtab tabstop=2 cindent
394// kate: tab-indents: off; indent-width 2; replace-tabs on; indent-mode cstyle; remove-trailing-spaces modified;
395// clang-format on
void process(struct dt_iop_module_t *self, dt_dev_pixelpipe_iop_t *piece, const void *const ivoid, void *const ovoid, const dt_iop_roi_t *const roi_in, const dt_iop_roi_t *const roi_out)
Definition ashift.c:3088
typedef void((*dt_cache_allocate_t)(void *userdata, dt_cache_entry_t *entry))
int32_t dt_cache_contains(dt_cache_t *cache, const uint32_t key)
Definition common/cache.c:73
void dt_cache_gc(dt_cache_t *cache, const float fill_ratio)
Definition common/cache.c:315
void dt_cache_release_with_caller(dt_cache_t *cache, dt_cache_entry_t *entry, const char *file, int line)
Definition common/cache.c:356
dt_cache_entry_t * dt_cache_testget(dt_cache_t *cache, const uint32_t key, char mode)
Definition common/cache.c:107
void dt_cache_init(dt_cache_t *cache, size_t entry_size, size_t cost_quota)
Definition common/cache.c:32
dt_cache_entry_t * dt_cache_get_with_caller(dt_cache_t *cache, const uint32_t key, char mode, const char *file, int line)
Definition common/cache.c:148
int dt_cache_remove(dt_cache_t *cache, const uint32_t key)
Definition common/cache.c:255
int dt_cache_for_all(dt_cache_t *cache, int(*process)(const uint32_t key, const void *data, void *user_data), void *user_data)
Definition common/cache.c:81
void dt_cache_cleanup(dt_cache_t *cache)
Definition common/cache.c:49
char * key
Definition common/metadata.c:40
#define ASAN_POISON_MEMORY_REGION(addr, size)
Definition config.cmake.h:67
#define ASAN_UNPOISON_MEMORY_REGION(addr, size)
Definition config.cmake.h:68
#define dt_free_align(A)
Definition darktable.h:334
#define dt_pthread_rwlock_tryrdlock
Definition dtpthread.h:342
#define dt_pthread_rwlock_destroy
Definition dtpthread.h:338
#define dt_pthread_rwlock_wrlock_with_caller(A, B, C)
Definition dtpthread.h:346
#define dt_pthread_rwlock_trywrlock_with_caller(A, B, C)
Definition dtpthread.h:348
#define dt_pthread_rwlock_tryrdlock_with_caller(A, B, C)
Definition dtpthread.h:347
static int dt_pthread_mutex_unlock(dt_pthread_mutex_t *mutex) RELEASE(mutex) NO_THREAD_SAFETY_ANALYSIS
Definition dtpthread.h:321
static int dt_pthread_mutex_init(dt_pthread_mutex_t *mutex, const pthread_mutexattr_t *mutexattr)
Definition dtpthread.h:306
#define dt_pthread_rwlock_trywrlock
Definition dtpthread.h:343
static int dt_pthread_mutex_destroy(dt_pthread_mutex_t *mutex)
Definition dtpthread.h:326
#define dt_pthread_rwlock_init
Definition dtpthread.h:337
#define dt_pthread_rwlock_unlock
Definition dtpthread.h:339
static int dt_pthread_mutex_lock(dt_pthread_mutex_t *mutex) ACQUIRE(mutex) NO_THREAD_SAFETY_ANALYSIS
Definition dtpthread.h:311
#define dt_pthread_rwlock_rdlock_with_caller(A, B, C)
Definition dtpthread.h:345
Definition cache.h:27
uint32_t key
Definition cache.h:34
GList * link
Definition cache.h:31
size_t data_size
Definition cache.h:29
dt_pthread_rwlock_t lock
Definition cache.h:32
int _lock_demoting
Definition cache.h:33
void * data
Definition cache.h:28
size_t cost
Definition cache.h:30
Definition cache.h:42
size_t cost
Definition cache.h:46
GList * lru
Definition cache.h:50
size_t cost_quota
Definition cache.h:47
void * allocate_data
Definition cache.h:55
GHashTable * hashtable
Definition cache.h:49
dt_cache_allocate_t allocate
Definition cache.h:53
dt_pthread_mutex_t lock
Definition cache.h:43
size_t entry_size
Definition cache.h:45
dt_cache_allocate_t cleanup
Definition cache.h:54
void * cleanup_data
Definition cache.h:56
#define dt_alloc_align(B)
Definition tests/cache.c:22