Ansel 0.0
A darktable fork - bloat + design vision
Loading...
Searching...
No Matches
Permutohedral.h
Go to the documentation of this file.
1/*
2 This file is part of darktable,
3 Copyright (C) 2010-2011 johannes hanika.
4 Copyright (C) 2011 Bruce Guenter.
5 Copyright (C) 2011, 2014 Ulrich Pegelow.
6 Copyright (C) 2012 Richard Wonka.
7 Copyright (C) 2012, 2014, 2016 Tobias Ellinghaus.
8 Copyright (C) 2016 Roman Lebedev.
9 Copyright (C) 2020 Heiko Bauke.
10 Copyright (C) 2020 Ralf Brown.
11 Copyright (C) 2022 Martin Baƙinka.
12
13 darktable is free software: you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation, either version 3 of the License, or
16 (at your option) any later version.
17
18 darktable is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with darktable. If not, see <http://www.gnu.org/licenses/>.
25*/
26/*
27 this file has been taken from ImageStack (http://code.google.com/p/imagestack/)
28 and adjusted slightly to fit darktable.
29
30 ImageStack is released under the new bsd license:
31
32Copyright (c) 2010, Andrew Adams
33All rights reserved.
34
35Redistribution and use in source and binary forms, with or without modification, are permitted provided that
36the following conditions are met:
37
38 * Redistributions of source code must retain the above copyright notice, this list of conditions and the
39following disclaimer.
40 * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and
41the following disclaimer in the documentation and/or other materials provided with the distribution.
42 * Neither the name of the Stanford Graphics Lab nor the names of its contributors may be used to endorse
43or promote products derived from this software without specific prior written permission.
44
45THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
46WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
47PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
48DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
49PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
50CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
51OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
52DAMAGE.
53*/
54
55#pragma once
56
57/*******************************************************************
58 * Permutohedral Lattice implementation from: *
59 * Fast High-Dimensional Filtering using the Permutohedral Lattice *
60 * Andrew Adams, Jongmin Baek, Abe Davis *
61 *******************************************************************/
62
63#include <algorithm>
64#include <math.h>
65#include <stdio.h>
66#include <stdlib.h>
67#include <string.h>
68
69#include <iostream>
70
71/*******************************************************************
72 * Hash table implementation for permutohedral lattice *
73 * *
74 * The lattice points are stored sparsely using a hash table. *
75 * The key for each point is its spatial location in the (d+1)- *
76 * dimensional space. *
77 * *
78 *******************************************************************/
79template <int KD, int VD> class HashTablePermutohedral
80{
81public:
82 // Struct for a key
83 struct Key
84 {
85 Key() = default;
86
87 Key(const Key &origin, int dim, int direction) // construct neighbor in dimension 'dim'
88 {
89 for(int i = 0; i < KD; i++) key[i] = origin.key[i] + direction;
90 key[dim] = origin.key[dim] - direction * KD;
91 setHash();
92 }
93
94 Key(const Key &) = default; // let the compiler write the copy constructor
95
96 Key &operator=(const Key &) = default;
97
98 void setKey(int idx, short val)
99 {
100 key[idx] = val;
101 }
102
103 void setHash()
104 {
105 size_t k = 0;
106 for(int i = 0; i < KD; i++)
107 {
108 k += key[i];
109 k *= 2531011;
110 }
111 hash = (unsigned)k;
112 }
113
114 bool operator==(const Key &other) const
115 {
116 if(hash != other.hash) return false;
117 for(int i = 0; i < KD; i++)
118 {
119 if(key[i] != other.key[i]) return false;
120 }
121 return true;
122 }
123
124 unsigned hash{ 0 }; // cache the hash value for this key
125 short key[KD]{}; // key is a KD-dimensional vector
126 };
127
128public:
129 // Struct for an associated value
130 struct Value
131 {
132 Value() = default;
133
135 {
136 for(int i = 0; i < VD; i++)
137 {
138 value[i] = init;
139 }
140 }
141
142 Value(const Value &) = default; // let the compiler write the copy constructor
143
144 Value &operator=(const Value &) = default;
145
146 static void clear(float *val)
147 {
148 for(int i = 0; i < VD; i++) val[i] = 0;
149 }
150
151 void setValue(int idx, short val)
152 {
153 value[idx] = val;
154 }
155
156 void addValue(int idx, short val)
157 {
158 value[idx] += val;
159 }
160
161 void add(const Value &other)
162 {
163 for(int i = 0; i < VD; i++)
164 {
165 value[i] += other.value[i];
166 }
167 }
168
169 void add(const float *other, float weight)
170 {
171 for(int i = 0; i < VD; i++)
172 {
173 value[i] += weight * other[i];
174 }
175 }
176
177 void addTo(float *dest, float weight) const
178 {
179 for(int i = 0; i < VD; i++)
180 {
181 dest[i] += weight * value[i];
182 }
183 }
184
185 void mix(const Value *left, const Value *center, const Value *right)
186 {
187 for(int i = 0; i < VD; i++)
188 {
189 value[i] = (0.25f * left->value[i] + 0.5f * center->value[i] + 0.25f * right->value[i]);
190 }
191 }
192
193 Value &operator+=(const Value &other)
194 {
195 for(int i = 0; i < VD; i++)
196 {
197 value[i] += other.value[i];
198 }
199 return *this;
200 }
201
202 float value[VD]{};
203 };
204
205public:
206 /* Constructor
207 * kd_: the dimensionality of the position vectors on the hyperplane.
208 * vd_: the dimensionality of the value vectors
209 */
211 {
212 capacity = 1 << 15;
213 capacity_bits = 0x7fff;
214 filled = 0;
215 entries = new Entry[capacity];
216 keys = new Key[maxFill()];
217 values = new Value[maxFill()]{ 0 };
218 }
219
221
223 {
224 delete[] entries;
225 delete[] keys;
226 delete[] values;
227 }
228
230
231 // Returns the number of vectors stored.
232 int size() const
233 {
234 return filled;
235 }
236
237 size_t maxFill() const
238 {
239 return capacity / 2;
240 }
241
242 // Returns a pointer to the keys array.
243 const Key *getKeys() const
244 {
245 return keys;
246 }
247
248 // Returns a pointer to the values array.
250 {
251 return values;
252 }
253
254 /* Returns the index into the hash table for a given key.
255 * key: a reference to the position vector.
256 * create: a flag specifying whether an entry should be created,
257 * should an entry with the given key not found.
258 */
259 int lookupOffset(const Key &key, bool create = true)
260 {
261 size_t h = key.hash & capacity_bits;
262 // Find the entry with the given key
263 while(1)
264 {
265 Entry e = entries[h];
266 // check if the cell is empty
267 if(e.keyIdx == -1)
268 {
269 if(!create) return -1; // Return not found.
270 // Double hash table size if necessary
271 if(filled >= maxFill())
272 {
273 grow();
274 }
275 // need to create an entry. Store the given key.
276 keys[filled] = key;
278 return filled++;
279 }
280
281 // check if the cell has a matching key
282 if(keys[e.keyIdx] == key) return e.keyIdx;
283
284 // increment the bucket with wraparound
285 h = (h + 1) & capacity_bits;
286 }
287 }
288
289 /* Looks up the value vector associated with a given key vector.
290 * k : reference to the key vector to be looked up.
291 * create : true if a non-existing key should be created.
292 */
293 Value *lookup(const Key &k, bool create = true)
294 {
295 int offset = lookupOffset(k, create);
296 return (offset < 0) ? nullptr : values + offset;
297 };
298
299 /* Grows the size of the hash table */
300 void grow(int order = 1)
301 {
302 size_t oldCapacity = capacity;
303 while(order-- > 0)
304 {
305 capacity *= 2;
306 capacity_bits = (capacity_bits << 1) | 1;
307 }
308
309 // Migrate the value vectors.
310 Value *newValues = new Value[maxFill()];
311 std::copy(values, values + filled, newValues);
312 delete[] values;
313 values = newValues;
314
315 // Migrate the key vectors.
316 Key *newKeys = new Key[maxFill()];
317 std::copy(keys, keys + filled, newKeys);
318 delete[] keys;
319 keys = newKeys;
320
321 Entry *newEntries = new Entry[capacity];
322
323 // Migrate the table of indices.
324 for(size_t i = 0; i < oldCapacity; i++)
325 {
326 if(entries[i].keyIdx == -1) continue;
327 size_t h = keys[entries[i].keyIdx].hash & capacity_bits;
328 while(newEntries[h].keyIdx != -1)
329 {
330 h = (h + 1) & capacity_bits;
331 }
332 newEntries[h] = entries[i];
333 }
334 delete[] entries;
335 entries = newEntries;
336 }
337
338private:
339 // Private struct for the hash table entries.
340 struct Entry
341 {
342 int keyIdx{ -1 };
343 };
344
349 unsigned long capacity_bits;
350};
351
352
353/******************************************************************
354 * The algorithm class that performs the filter *
355 * *
356 * PermutohedralLattice::splat(...) and *
357 * PermutohedralLattic::slice() do almost all the work. *
358 * *
359 ******************************************************************/
360template <int D, int VD> class PermutohedralLattice
361{
362private:
363 // short-hand for types we use
365 typedef typename HashTable::Key Key;
366 typedef typename HashTable::Value Value;
367
368public:
369 /* Constructor
370 * d_ : dimensionality of key vectors
371 * vd_ : dimensionality of value vectors
372 * nData_ : number of points in the input
373 */
374 PermutohedralLattice(size_t nData_, int nThreads_ = 1) : nData(nData_), nThreads(nThreads_)
375 {
376 // Allocate storage for various arrays
377 float *scaleFactorTmp = new float[D];
378 int *canonicalTmp = new int[(D + 1) * (D + 1)];
379
380 replay = new ReplayEntry[nData];
381
382 // compute the coordinates of the canonical simplex, in which
383 // the difference between a contained point and the zero
384 // remainder vertex is always in ascending order. (See pg.4 of paper.)
385 for(int i = 0; i <= D; i++)
386 {
387 for(int j = 0; j <= D - i; j++) canonicalTmp[i * (D + 1) + j] = i;
388 for(int j = D - i + 1; j <= D; j++) canonicalTmp[i * (D + 1) + j] = i - (D + 1);
389 }
390 canonical = canonicalTmp;
391
392 // Compute parts of the rotation matrix E. (See pg.4-5 of paper.)
393 for(int i = 0; i < D; i++)
394 {
395 // the diagonal entries for normalization
396 scaleFactorTmp[i] = 1.0f / (sqrtf((float)(i + 1) * (i + 2)));
397
398 /* We presume that the user would like to do a Gaussian blur of standard deviation
399 * 1 in each dimension (or a total variance of d, summed over dimensions.)
400 * Because the total variance of the blur performed by this algorithm is not d,
401 * we must scale the space to offset this.
402 *
403 * The total variance of the algorithm is (See pg.6 and 10 of paper):
404 * [variance of splatting] + [variance of blurring] + [variance of splatting]
405 * = d(d+1)(d+1)/12 + d(d+1)(d+1)/2 + d(d+1)(d+1)/12
406 * = 2d(d+1)(d+1)/3.
407 *
408 * So we need to scale the space by (d+1)sqrt(2/3).
409 */
410 scaleFactorTmp[i] *= (D + 1) * sqrtf(2.0 / 3);
411 }
412 scaleFactor = scaleFactorTmp;
413
415 }
416
418
420 {
421 delete[] scaleFactor;
422 delete[] replay;
423 delete[] canonical;
424 delete[] hashTables;
425 }
426
428
429 /* Performs splatting with given position and value vectors */
430 void splat(float *position, float *value, size_t replay_index, int thread_index = 0) const
431 {
432 float elevated[D + 1];
433 int greedy[D + 1];
434 int rank[D + 1];
435 float barycentric[D + 2];
436 Key key;
437
438 // first rotate position into the (d+1)-dimensional hyperplane
439 elevated[D] = -D * position[D - 1] * scaleFactor[D - 1];
440 for(int i = D - 1; i > 0; i--)
441 elevated[i]
442 = (elevated[i + 1] - i * position[i - 1] * scaleFactor[i - 1] + (i + 2) * position[i] * scaleFactor[i]);
443 elevated[0] = elevated[1] + 2 * position[0] * scaleFactor[0];
444
445 // prepare to find the closest lattice points
446 constexpr float scale = 1.0f / (D + 1);
447
448 // greedily search for the closest zero-colored lattice point
449 int sum = 0;
450 for(int i = 0; i <= D; i++)
451 {
452 float v = elevated[i] * scale;
453 float up = ceilf(v) * (D + 1);
454 float down = floorf(v) * (D + 1);
455
456 if(up - elevated[i] < elevated[i] - down)
457 greedy[i] = up;
458 else
459 greedy[i] = down;
460
461 sum += greedy[i];
462 }
463 sum /= D + 1;
464
465 // rank differential to find the permutation between this simplex and the canonical one.
466 // (See pg. 3-4 in paper.)
467 memset(rank, 0, sizeof rank);
468 for(int i = 0; i < D; i++)
469 for(int j = i + 1; j <= D; j++)
470 if(elevated[i] - greedy[i] < elevated[j] - greedy[j])
471 rank[i]++;
472 else
473 rank[j]++;
474
475 if(sum > 0)
476 {
477 // sum too large - the point is off the hyperplane.
478 // need to bring down the ones with the smallest differential
479 for(int i = 0; i <= D; i++)
480 {
481 if(rank[i] >= D + 1 - sum)
482 {
483 greedy[i] -= D + 1;
484 rank[i] += sum - (D + 1);
485 }
486 else
487 rank[i] += sum;
488 }
489 }
490 else if(sum < 0)
491 {
492 // sum too small - the point is off the hyperplane
493 // need to bring up the ones with largest differential
494 for(int i = 0; i <= D; i++)
495 {
496 if(rank[i] < -sum)
497 {
498 greedy[i] += D + 1;
499 rank[i] += (D + 1) + sum;
500 }
501 else
502 rank[i] += sum;
503 }
504 }
505
506 // Compute barycentric coordinates (See pg.10 of paper.)
507 memset(barycentric, 0, sizeof barycentric);
508 for(int i = 0; i <= D; i++)
509 {
510 barycentric[D - rank[i]] += (elevated[i] - greedy[i]) * scale;
511 barycentric[D + 1 - rank[i]] -= (elevated[i] - greedy[i]) * scale;
512 }
513 barycentric[0] += 1.0f + barycentric[D + 1];
514
515 // Splat the value into each vertex of the simplex, with barycentric weights.
516 replay[replay_index].table = thread_index;
517 for(int remainder = 0; remainder <= D; remainder++)
518 {
519 // Compute the location of the lattice point explicitly (all but the last coordinate - it's redundant
520 // because they sum to zero)
521 for(int i = 0; i < D; i++) key.key[i] = greedy[i] + canonical[remainder * (D + 1) + rank[i]];
522 key.setHash();
523
524 // Retrieve pointer to the value at this vertex.
525 Value *val = hashTables[thread_index].lookup(key, true);
526
527 // Accumulate values with barycentric weight.
528 val->add(value, barycentric[remainder]);
529
530 // Record this interaction to use later when slicing
531 replay[replay_index].offset[remainder] = val - hashTables[thread_index].getValues();
532 replay[replay_index].weight[remainder] = barycentric[remainder];
533 }
534 }
535
536 /* Merge the multiple threads' hash tables into the totals. */
538 {
539 if(nThreads <= 1) return;
540
541 /* Because growing the hash table is expensive, we want to avoid having to do it multiple times.
542 * Only a small percentage of entries in the individual hash tables have the same key, so we
543 * won't waste much space if we simply grow the destination table enough to hold the sum of the
544 * entries in the individual tables
545 */
546 size_t total_entries = hashTables[0].size();
547 for(int i = 1; i < nThreads; i++) total_entries += hashTables[i].size();
548 int order = 0;
549 while(total_entries > hashTables[0].maxFill())
550 {
551 order++;
552 total_entries /= 2;
553 }
554 if(order > 0) hashTables[0].grow(order);
555 /* Merge the multiple hash tables into one, creating an offset remap table. */
556 int **offset_remap = new int *[nThreads];
557 for(int i = 1; i < nThreads; i++)
558 {
559 const Key *oldKeys = hashTables[i].getKeys();
560 const Value *oldVals = hashTables[i].getValues();
561 const int filled = hashTables[i].size();
562 offset_remap[i] = new int[filled];
563 for(int j = 0; j < filled; j++)
564 {
565 Value *val = hashTables[0].lookup(oldKeys[j], true);
566 val->add(oldVals[j]);
567 offset_remap[i][j] = val - hashTables[0].getValues();
568 }
569 }
570
571 /* Rewrite the offsets in the replay structure from the above generated table. */
572 for(int i = 0; i < nData; i++)
573 {
574 if(replay[i].table > 0)
575 {
576 for(int dim = 0; dim <= D; dim++)
577 replay[i].offset[dim] = offset_remap[replay[i].table][replay[i].offset[dim]];
578 }
579 }
580
581 for(int i = 1; i < nThreads; i++) delete[] offset_remap[i];
582 delete[] offset_remap;
583 }
584
585 /* Performs slicing out of position vectors. Note that the barycentric weights and the simplex
586 * containing each position vector were calculated and stored in the splatting step.
587 * We may reuse this to accelerate the algorithm. (See pg. 6 in paper.)
588 */
589 void slice(float *col, size_t replay_index) const
590 {
591 const Value *base = hashTables[0].getValues();
592 Value::clear(col);
593 ReplayEntry &r = replay[replay_index];
594 for(int i = 0; i <= D; i++)
595 {
596 base[r.offset[i]].addTo(col, r.weight[i]);
597 }
598 }
599
600 /* Performs a Gaussian blur along each projected axis in the hyperplane. */
601 void blur() const
602 {
603 // Prepare arrays
604 Value *newValue = new Value[hashTables[0].size()];
605 Value *oldValue = hashTables[0].getValues();
606 const Value *hashTableBase = oldValue;
607 const Key *keyBase = hashTables[0].getKeys();
608 const Value zero{ 0 };
609
610 // For each of d+1 axes,
611 for(int j = 0; j <= D; j++)
612 {
613#ifdef _OPENMP
614#pragma omp parallel for shared(j, oldValue, newValue)
615#endif
616 // For each vertex in the lattice,
617 for(int i = 0; i < hashTables[0].size(); i++) // blur point i in dimension j
618 {
619 const Key &key = keyBase[i]; // keys to current vertex
620 // construct keys to the neighbors along the given axis.
621 Key neighbor1(key, j, +1);
622 Key neighbor2(key, j, -1);
623
624 const Value *oldVal = oldValue + i;
625
626 const Value *vm1 = hashTables[0].lookup(neighbor1, false); // look up first neighbor
627 vm1 = vm1 ? vm1 - hashTableBase + oldValue : &zero;
628
629 const Value *vp1 = hashTables[0].lookup(neighbor2, false); // look up second neighbor
630 vp1 = vp1 ? vp1 - hashTableBase + oldValue : &zero;
631
632 // Mix values of the three vertices
633 newValue[i].mix(vm1, oldVal, vp1);
634 }
635 std::swap(newValue, oldValue);
636 // the freshest data is now in oldValue, and newValue is ready to be written over
637 }
638
639 // depending where we ended up, we may have to copy data
640 if(oldValue != hashTableBase)
641 {
642 std::copy(oldValue, oldValue + hashTables[0].size(), hashTables[0].getValues());
643 delete[] oldValue;
644 }
645 else
646 {
647 delete[] newValue;
648 }
649 }
650
651private:
652 int nData;
654 const float *scaleFactor;
655 const int *canonical;
656
657 // slicing is done by replaying splatting (ie storing the sparse matrix)
659 {
660 // since every dimension of a lattice point gets handled by the same thread,
661 // we only need to store the id of the hash table once, instead of for each dimension
662 int table;
663 int offset[D + 1];
664 float weight[D + 1];
666
668};
669
670// clang-format off
671// modelines: These editor modelines have been set for all relevant files by tools/update_modelines.py
672// vim: shiftwidth=2 expandtab tabstop=2 cindent
673// kate: tab-indents: off; indent-width 2; replace-tabs on; indent-mode cstyle; remove-trailing-spaces modified;
674// clang-format on
675
void init(dt_imageio_module_format_t *self)
Definition avif.c:151
int position()
Definition backgroundjobs.c:82
Definition Permutohedral.h:80
HashTablePermutohedral(const HashTablePermutohedral &)=delete
const Key * getKeys() const
Definition Permutohedral.h:243
unsigned long capacity_bits
Definition Permutohedral.h:349
int size() const
Definition Permutohedral.h:232
HashTablePermutohedral & operator=(const HashTablePermutohedral &)=delete
~HashTablePermutohedral()
Definition Permutohedral.h:222
Value * values
Definition Permutohedral.h:346
HashTablePermutohedral()
Definition Permutohedral.h:210
Key * keys
Definition Permutohedral.h:345
size_t capacity
Definition Permutohedral.h:348
Entry * entries
Definition Permutohedral.h:347
size_t filled
Definition Permutohedral.h:348
size_t maxFill() const
Definition Permutohedral.h:237
Value * lookup(const Key &k, bool create=true)
Definition Permutohedral.h:293
void grow(int order=1)
Definition Permutohedral.h:300
Value * getValues() const
Definition Permutohedral.h:249
int lookupOffset(const Key &key, bool create=true)
Definition Permutohedral.h:259
Definition Permutohedral.h:361
void slice(float *col, size_t replay_index) const
Definition Permutohedral.h:589
struct PermutohedralLattice::ReplayEntry * replay
HashTable::Value Value
Definition Permutohedral.h:366
const float * scaleFactor
Definition Permutohedral.h:654
PermutohedralLattice(size_t nData_, int nThreads_=1)
Definition Permutohedral.h:374
int nThreads
Definition Permutohedral.h:653
PermutohedralLattice(const PermutohedralLattice &)=delete
void blur() const
Definition Permutohedral.h:601
~PermutohedralLattice()
Definition Permutohedral.h:419
void merge_splat_threads()
Definition Permutohedral.h:537
PermutohedralLattice & operator=(const PermutohedralLattice &)=delete
int nData
Definition Permutohedral.h:652
HashTable * hashTables
Definition Permutohedral.h:667
HashTablePermutohedral< D, VD > HashTable
Definition Permutohedral.h:364
HashTable::Key Key
Definition Permutohedral.h:365
const int * canonical
Definition Permutohedral.h:655
void splat(float *position, float *value, size_t replay_index, int thread_index=0) const
Definition Permutohedral.h:430
const float i
Definition colorspaces_inline_conversions.h:669
const float h
Definition colorspaces_inline_conversions.h:1366
const float r
Definition colorspaces_inline_conversions.h:1324
char * key
Definition common/metadata.c:60
static const dt_aligned_pixel_simd_t value
Definition darktable.h:501
static void weight(const float *c1, const float *c2, const float sharpen, dt_aligned_pixel_t weight)
Definition eaw.c:33
static const float v
Definition iop_profile.h:223
size_t size
Definition mipmap_cache.c:3
Definition Permutohedral.h:341
int keyIdx
Definition Permutohedral.h:342
Definition Permutohedral.h:84
Key(const Key &)=default
unsigned hash
Definition Permutohedral.h:124
Key & operator=(const Key &)=default
void setHash()
Definition Permutohedral.h:103
Key(const Key &origin, int dim, int direction)
Definition Permutohedral.h:87
bool operator==(const Key &other) const
Definition Permutohedral.h:114
short key[KD]
Definition Permutohedral.h:125
void setKey(int idx, short val)
Definition Permutohedral.h:98
Definition Permutohedral.h:131
void setValue(int idx, short val)
Definition Permutohedral.h:151
Value & operator+=(const Value &other)
Definition Permutohedral.h:193
void add(const Value &other)
Definition Permutohedral.h:161
Value(const Value &)=default
Value & operator=(const Value &)=default
static void clear(float *val)
Definition Permutohedral.h:146
float value[VD]
Definition Permutohedral.h:202
Value(int init)
Definition Permutohedral.h:134
void mix(const Value *left, const Value *center, const Value *right)
Definition Permutohedral.h:185
void addTo(float *dest, float weight) const
Definition Permutohedral.h:177
void addValue(int idx, short val)
Definition Permutohedral.h:156
void add(const float *other, float weight)
Definition Permutohedral.h:169
Definition Permutohedral.h:659
int table
Definition Permutohedral.h:662
float weight[D+1]
Definition Permutohedral.h:664
int offset[D+1]
Definition Permutohedral.h:663