Line data Source code
1 : /* +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
2 : Copyright (c) 2008-2017, Petr Kobalicek
3 :
4 : This software is provided 'as-is', without any express or implied
5 : warranty. In no event will the authors be held liable for any damages
6 : arising from the use of this software.
7 :
8 : Permission is granted to anyone to use this software for any purpose,
9 : including commercial applications, and to alter it and redistribute it
10 : freely, subject to the following restrictions:
11 :
12 : 1. The origin of this software must not be misrepresented; you must not
13 : claim that you wrote the original software. If you use this software
14 : in a product, an acknowledgment in the product documentation would be
15 : appreciated but is not required.
16 : 2. Altered source versions must be plainly marked as such, and must not be
17 : misrepresented as being the original software.
18 : 3. This notice may not be removed or altered from any source distribution.
19 : +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
20 : #ifdef __PLUMED_HAS_ASMJIT
21 : #pragma GCC diagnostic push
22 : #pragma GCC diagnostic ignored "-Wpedantic"
23 : // [AsmJit]
24 : // Complete x86/x64 JIT and Remote Assembler for C++.
25 : //
26 : // [License]
27 : // Zlib - See LICENSE.md file in the package.
28 :
29 : // [Export]
30 : #define ASMJIT_EXPORTS
31 :
32 : // [Dependencies]
33 : #include "./utils.h"
34 : #include "./zone.h"
35 :
36 : // [Api-Begin]
37 : #include "./asmjit_apibegin.h"
38 :
39 : namespace PLMD {
40 : namespace asmjit {
41 :
42 : //! Zero size block used by `Zone` that doesn't have any memory allocated.
43 : static const Zone::Block Zone_zeroBlock = { nullptr, nullptr, 0, { 0 } };
44 :
45 : static ASMJIT_INLINE uint32_t Zone_getAlignmentOffsetFromAlignment(uint32_t x) noexcept {
46 11664 : switch (x) {
47 : default: return 0;
48 : case 0 : return 0;
49 : case 1 : return 0;
50 0 : case 2 : return 1;
51 0 : case 4 : return 2;
52 0 : case 8 : return 3;
53 0 : case 16: return 4;
54 0 : case 32: return 5;
55 0 : case 64: return 6;
56 : }
57 : }
58 :
59 : // ============================================================================
60 : // [asmjit::Zone - Construction / Destruction]
61 : // ============================================================================
62 :
63 11664 : Zone::Zone(uint32_t blockSize, uint32_t blockAlignment) noexcept
64 11664 : : _ptr(nullptr),
65 11664 : _end(nullptr),
66 11664 : _block(const_cast<Zone::Block*>(&Zone_zeroBlock)),
67 11664 : _blockSize(blockSize),
68 11664 : _blockAlignmentShift(Zone_getAlignmentOffsetFromAlignment(blockAlignment)) {}
69 :
70 11664 : Zone::~Zone() noexcept {
71 11664 : reset(true);
72 11664 : }
73 :
74 : // ============================================================================
75 : // [asmjit::Zone - Reset]
76 : // ============================================================================
77 :
78 17496 : void Zone::reset(bool releaseMemory) noexcept {
79 17496 : Block* cur = _block;
80 :
81 : // Can't be altered.
82 17496 : if (cur == &Zone_zeroBlock)
83 : return;
84 :
85 11664 : if (releaseMemory) {
86 : // Since cur can be in the middle of the double-linked list, we have to
87 : // traverse to both directions `prev` and `next` separately.
88 7776 : Block* next = cur->next;
89 : do {
90 7780 : Block* prev = cur->prev;
91 : Internal::releaseMemory(cur);
92 : cur = prev;
93 7780 : } while (cur);
94 :
95 : cur = next;
96 7776 : while (cur) {
97 0 : next = cur->next;
98 : Internal::releaseMemory(cur);
99 : cur = next;
100 : }
101 :
102 7776 : _ptr = nullptr;
103 7776 : _end = nullptr;
104 7776 : _block = const_cast<Zone::Block*>(&Zone_zeroBlock);
105 : }
106 : else {
107 3888 : while (cur->prev)
108 : cur = cur->prev;
109 :
110 3888 : _ptr = cur->data;
111 3888 : _end = _ptr + cur->size;
112 3888 : _block = cur;
113 : }
114 : }
115 :
116 : // ============================================================================
117 : // [asmjit::Zone - Alloc]
118 : // ============================================================================
119 :
120 7780 : void* Zone::_alloc(size_t size) noexcept {
121 7780 : Block* curBlock = _block;
122 : uint8_t* p;
123 :
124 7780 : size_t blockSize = std::max<size_t>(_blockSize, size);
125 7780 : size_t blockAlignment = getBlockAlignment();
126 :
127 : // The `_alloc()` method can only be called if there is not enough space
128 : // in the current block, see `alloc()` implementation for more details.
129 : ASMJIT_ASSERT(curBlock == &Zone_zeroBlock || getRemainingSize() < size);
130 :
131 : // If the `Zone` has been cleared the current block doesn't have to be the
132 : // last one. Check if there is a block that can be used instead of allocating
133 : // a new one. If there is a `next` block it's completely unused, we don't have
134 : // to check for remaining bytes.
135 7780 : Block* next = curBlock->next;
136 7780 : if (next && next->size >= size) {
137 0 : p = Utils::alignTo(next->data, blockAlignment);
138 :
139 0 : _block = next;
140 0 : _ptr = p + size;
141 0 : _end = next->data + next->size;
142 :
143 0 : return static_cast<void*>(p);
144 : }
145 :
146 : // Prevent arithmetic overflow.
147 7780 : if (ASMJIT_UNLIKELY(blockSize > (~static_cast<size_t>(0) - sizeof(Block) - blockAlignment)))
148 : return nullptr;
149 :
150 7780 : blockSize += blockAlignment;
151 7780 : Block* newBlock = static_cast<Block*>(Internal::allocMemory(sizeof(Block) + blockSize));
152 :
153 7780 : if (ASMJIT_UNLIKELY(!newBlock))
154 : return nullptr;
155 :
156 : // Align the pointer to `blockAlignment` and adjust the size of this block
157 : // accordingly. It's the same as using `blockAlignment - Utils::alignDiff()`,
158 : // just written differently.
159 7780 : p = Utils::alignTo(newBlock->data, blockAlignment);
160 7780 : newBlock->prev = nullptr;
161 7780 : newBlock->next = nullptr;
162 7780 : newBlock->size = blockSize;
163 :
164 7780 : if (curBlock != &Zone_zeroBlock) {
165 4 : newBlock->prev = curBlock;
166 4 : curBlock->next = newBlock;
167 :
168 : // Does only happen if there is a next block, but the requested memory
169 : // can't fit into it. In this case a new buffer is allocated and inserted
170 : // between the current block and the next one.
171 4 : if (next) {
172 0 : newBlock->next = next;
173 0 : next->prev = newBlock;
174 : }
175 : }
176 :
177 7780 : _block = newBlock;
178 7780 : _ptr = p + size;
179 7780 : _end = newBlock->data + blockSize;
180 :
181 7780 : return static_cast<void*>(p);
182 : }
183 :
184 27628 : void* Zone::allocZeroed(size_t size) noexcept {
185 : void* p = alloc(size);
186 27628 : if (ASMJIT_UNLIKELY(!p)) return p;
187 27628 : return ::memset(p, 0, size);
188 : }
189 :
190 36630 : void* Zone::dup(const void* data, size_t size, bool nullTerminate) noexcept {
191 36630 : if (ASMJIT_UNLIKELY(!data || !size)) return nullptr;
192 :
193 : ASMJIT_ASSERT(size != IntTraits<size_t>::maxValue());
194 36630 : uint8_t* m = allocT<uint8_t>(size + nullTerminate);
195 36630 : if (ASMJIT_UNLIKELY(!m)) return nullptr;
196 :
197 : ::memcpy(m, data, size);
198 36630 : if (nullTerminate) m[size] = '\0';
199 :
200 : return static_cast<void*>(m);
201 : }
202 :
203 0 : char* Zone::sformat(const char* fmt, ...) noexcept {
204 0 : if (ASMJIT_UNLIKELY(!fmt)) return nullptr;
205 :
206 : char buf[512];
207 : size_t len;
208 :
209 : va_list ap;
210 0 : va_start(ap, fmt);
211 :
212 0 : len = vsnprintf(buf, ASMJIT_ARRAY_SIZE(buf) - 1, fmt, ap);
213 0 : buf[len++] = 0;
214 :
215 0 : va_end(ap);
216 0 : return static_cast<char*>(dup(buf, len));
217 : }
218 :
219 : // ============================================================================
220 : // [asmjit::ZoneHeap - Helpers]
221 : // ============================================================================
222 :
223 : static bool ZoneHeap_hasDynamicBlock(ZoneHeap* self, ZoneHeap::DynamicBlock* block) noexcept {
224 : ZoneHeap::DynamicBlock* cur = self->_dynamicBlocks;
225 : while (cur) {
226 : if (cur == block)
227 : return true;
228 : cur = cur->next;
229 : }
230 : return false;
231 : }
232 :
233 : // ============================================================================
234 : // [asmjit::ZoneHeap - Init / Reset]
235 : // ============================================================================
236 :
237 9720 : void ZoneHeap::reset(Zone* zone) noexcept {
238 : // Free dynamic blocks.
239 9720 : DynamicBlock* block = _dynamicBlocks;
240 9728 : while (block) {
241 8 : DynamicBlock* next = block->next;
242 : Internal::releaseMemory(block);
243 : block = next;
244 : }
245 :
246 : // Zero the entire class and initialize to the given `zone`.
247 : ::memset(this, 0, sizeof(*this));
248 9720 : _zone = zone;
249 9720 : }
250 :
251 : // ============================================================================
252 : // [asmjit::ZoneHeap - Alloc / Release]
253 : // ============================================================================
254 :
255 79806 : void* ZoneHeap::_alloc(size_t size, size_t& allocatedSize) noexcept {
256 : ASMJIT_ASSERT(isInitialized());
257 :
258 : // We use our memory pool only if the requested block is of a reasonable size.
259 : uint32_t slot;
260 : if (_getSlotIndex(size, slot, allocatedSize)) {
261 : // Slot reuse.
262 79798 : uint8_t* p = reinterpret_cast<uint8_t*>(_slots[slot]);
263 79798 : size = allocatedSize;
264 :
265 79798 : if (p) {
266 1676 : _slots[slot] = reinterpret_cast<Slot*>(p)->next;
267 : //printf("ALLOCATED %p of size %d (SLOT %d)\n", p, int(size), slot);
268 1676 : return p;
269 : }
270 :
271 : // So use Zone to allocate a new chunk for us. But before we use it, we
272 : // check if there is enough room for the new chunk in zone, and if not,
273 : // we redistribute the remaining memory in Zone's current block into slots.
274 78122 : Zone* zone = _zone;
275 : p = Utils::alignTo(zone->getCursor(), kBlockAlignment);
276 78122 : size_t remain = (p <= zone->getEnd()) ? (size_t)(zone->getEnd() - p) : size_t(0);
277 :
278 78122 : if (ASMJIT_LIKELY(remain >= size)) {
279 74234 : zone->setCursor(p + size);
280 : //printf("ALLOCATED %p of size %d (SLOT %d)\n", p, int(size), slot);
281 74234 : return p;
282 : }
283 : else {
284 : // Distribute the remaining memory to suitable slots.
285 3888 : if (remain >= kLoGranularity) {
286 : do {
287 0 : size_t distSize = std::min<size_t>(remain, kLoMaxSize);
288 0 : uint32_t distSlot = static_cast<uint32_t>((distSize - kLoGranularity) / kLoGranularity);
289 : ASMJIT_ASSERT(distSlot < kLoCount);
290 :
291 0 : reinterpret_cast<Slot*>(p)->next = _slots[distSlot];
292 0 : _slots[distSlot] = reinterpret_cast<Slot*>(p);
293 :
294 0 : p += distSize;
295 0 : remain -= distSize;
296 0 : } while (remain >= kLoGranularity);
297 : zone->setCursor(p);
298 : }
299 :
300 3888 : p = static_cast<uint8_t*>(zone->_alloc(size));
301 3888 : if (ASMJIT_UNLIKELY(!p)) {
302 0 : allocatedSize = 0;
303 0 : return nullptr;
304 : }
305 :
306 : //printf("ALLOCATED %p of size %d (SLOT %d)\n", p, int(size), slot);
307 : return p;
308 : }
309 : }
310 : else {
311 : // Allocate a dynamic block.
312 : size_t overhead = sizeof(DynamicBlock) + sizeof(DynamicBlock*) + kBlockAlignment;
313 :
314 : // Handle a possible overflow.
315 8 : if (ASMJIT_UNLIKELY(overhead >= ~static_cast<size_t>(0) - size))
316 : return nullptr;
317 :
318 8 : void* p = Internal::allocMemory(size + overhead);
319 8 : if (ASMJIT_UNLIKELY(!p)) {
320 0 : allocatedSize = 0;
321 0 : return nullptr;
322 : }
323 :
324 : // Link as first in `_dynamicBlocks` double-linked list.
325 : DynamicBlock* block = static_cast<DynamicBlock*>(p);
326 8 : DynamicBlock* next = _dynamicBlocks;
327 :
328 8 : if (next)
329 0 : next->prev = block;
330 :
331 8 : block->prev = nullptr;
332 8 : block->next = next;
333 8 : _dynamicBlocks = block;
334 :
335 : // Align the pointer to the guaranteed alignment and store `DynamicBlock`
336 : // at the end of the memory block, so `_releaseDynamic()` can find it.
337 8 : p = Utils::alignTo(static_cast<uint8_t*>(p) + sizeof(DynamicBlock) + sizeof(DynamicBlock*), kBlockAlignment);
338 8 : reinterpret_cast<DynamicBlock**>(p)[-1] = block;
339 :
340 8 : allocatedSize = size;
341 : //printf("ALLOCATED DYNAMIC %p of size %d\n", p, int(size));
342 8 : return p;
343 : }
344 : }
345 :
346 3888 : void* ZoneHeap::_allocZeroed(size_t size, size_t& allocatedSize) noexcept {
347 : ASMJIT_ASSERT(isInitialized());
348 :
349 3888 : void* p = _alloc(size, allocatedSize);
350 3888 : if (ASMJIT_UNLIKELY(!p)) return p;
351 3888 : return ::memset(p, 0, allocatedSize);
352 : }
353 :
354 0 : void ZoneHeap::_releaseDynamic(void* p, size_t size) noexcept {
355 : ASMJIT_ASSERT(isInitialized());
356 : //printf("RELEASING DYNAMIC %p of size %d\n", p, int(size));
357 :
358 : // Pointer to `DynamicBlock` is stored at [-1].
359 0 : DynamicBlock* block = reinterpret_cast<DynamicBlock**>(p)[-1];
360 : ASMJIT_ASSERT(ZoneHeap_hasDynamicBlock(this, block));
361 :
362 : // Unlink and free.
363 0 : DynamicBlock* prev = block->prev;
364 0 : DynamicBlock* next = block->next;
365 :
366 0 : if (prev)
367 0 : prev->next = next;
368 : else
369 0 : _dynamicBlocks = next;
370 :
371 0 : if (next)
372 0 : next->prev = prev;
373 :
374 : Internal::releaseMemory(block);
375 0 : }
376 :
377 : // ============================================================================
378 : // [asmjit::ZoneVectorBase - Helpers]
379 : // ============================================================================
380 :
381 18252 : Error ZoneVectorBase::_grow(ZoneHeap* heap, size_t sizeOfT, size_t n) noexcept {
382 18252 : size_t threshold = Globals::kAllocThreshold / sizeOfT;
383 18252 : size_t capacity = _capacity;
384 18252 : size_t after = _length;
385 :
386 18252 : if (ASMJIT_UNLIKELY(IntTraits<size_t>::maxValue() - n < after))
387 : return DebugUtils::errored(kErrorNoHeapMemory);
388 :
389 18252 : after += n;
390 18252 : if (capacity >= after)
391 : return kErrorOk;
392 :
393 : // ZoneVector is used as an array to hold short-lived data structures used
394 : // during code generation. The growing strategy is simple - use small capacity
395 : // at the beginning (very good for ZoneHeap) and then grow quicker to prevent
396 : // successive reallocations.
397 18252 : if (capacity < 4)
398 : capacity = 4;
399 6588 : else if (capacity < 8)
400 : capacity = 8;
401 3496 : else if (capacity < 16)
402 : capacity = 16;
403 1232 : else if (capacity < 64)
404 : capacity = 64;
405 : else if (capacity < 256)
406 : capacity = 256;
407 :
408 18252 : while (capacity < after) {
409 0 : if (capacity < threshold)
410 0 : capacity *= 2;
411 : else
412 0 : capacity += threshold;
413 : }
414 :
415 18252 : return _reserve(heap, sizeOfT, capacity);
416 : }
417 :
418 18252 : Error ZoneVectorBase::_reserve(ZoneHeap* heap, size_t sizeOfT, size_t n) noexcept {
419 18252 : size_t oldCapacity = _capacity;
420 18252 : if (oldCapacity >= n) return kErrorOk;
421 :
422 18252 : size_t nBytes = n * sizeOfT;
423 18252 : if (ASMJIT_UNLIKELY(nBytes < n))
424 : return DebugUtils::errored(kErrorNoHeapMemory);
425 :
426 : size_t allocatedBytes;
427 : uint8_t* newData = static_cast<uint8_t*>(heap->alloc(nBytes, allocatedBytes));
428 :
429 18252 : if (ASMJIT_UNLIKELY(!newData))
430 : return DebugUtils::errored(kErrorNoHeapMemory);
431 :
432 18252 : void* oldData = _data;
433 18252 : if (_length)
434 6588 : ::memcpy(newData, oldData, _length * sizeOfT);
435 :
436 18252 : if (oldData)
437 6588 : heap->release(oldData, oldCapacity * sizeOfT);
438 :
439 18252 : _capacity = allocatedBytes / sizeOfT;
440 : ASMJIT_ASSERT(_capacity >= n);
441 :
442 18252 : _data = newData;
443 18252 : return kErrorOk;
444 : }
445 :
446 3888 : Error ZoneVectorBase::_resize(ZoneHeap* heap, size_t sizeOfT, size_t n) noexcept {
447 3888 : size_t length = _length;
448 3888 : if (_capacity < n) {
449 1944 : ASMJIT_PROPAGATE(_grow(heap, sizeOfT, n - length));
450 : ASMJIT_ASSERT(_capacity >= n);
451 : }
452 :
453 3888 : if (length < n)
454 3888 : ::memset(static_cast<uint8_t*>(_data) + length * sizeOfT, 0, (n - length) * sizeOfT);
455 :
456 3888 : _length = n;
457 3888 : return kErrorOk;
458 : }
459 :
460 : // ============================================================================
461 : // [asmjit::ZoneBitVector - Ops]
462 : // ============================================================================
463 :
464 0 : Error ZoneBitVector::_resize(ZoneHeap* heap, size_t newLength, size_t idealCapacity, bool newBitsValue) noexcept {
465 : ASMJIT_ASSERT(idealCapacity >= newLength);
466 :
467 0 : if (newLength <= _length) {
468 : // The size after the resize is lesser than or equal to the current length.
469 0 : size_t idx = newLength / kBitsPerWord;
470 0 : size_t bit = newLength % kBitsPerWord;
471 :
472 : // Just set all bits outside of the new length in the last word to zero.
473 : // There is a case that there are not bits to set if `bit` is zero. This
474 : // happens when `newLength` is a multiply of `kBitsPerWord` like 64, 128,
475 : // and so on. In that case don't change anything as that would mean settings
476 : // bits outside of the `_length`.
477 0 : if (bit)
478 0 : _data[idx] &= (static_cast<uintptr_t>(1) << bit) - 1U;
479 :
480 0 : _length = newLength;
481 0 : return kErrorOk;
482 : }
483 :
484 : size_t oldLength = _length;
485 0 : BitWord* data = _data;
486 :
487 0 : if (newLength > _capacity) {
488 : // Realloc needed... Calculate the minimum capacity (in bytes) requied.
489 : size_t minimumCapacityInBits = Utils::alignTo<size_t>(idealCapacity, kBitsPerWord);
490 : size_t allocatedCapacity;
491 :
492 0 : if (ASMJIT_UNLIKELY(minimumCapacityInBits < newLength))
493 0 : return DebugUtils::errored(kErrorNoHeapMemory);
494 :
495 : // Normalize to bytes.
496 0 : size_t minimumCapacity = minimumCapacityInBits / 8;
497 : BitWord* newData = static_cast<BitWord*>(heap->alloc(minimumCapacity, allocatedCapacity));
498 :
499 0 : if (ASMJIT_UNLIKELY(!newData))
500 : return DebugUtils::errored(kErrorNoHeapMemory);
501 :
502 : // `allocatedCapacity` now contains number in bytes, we need bits.
503 0 : size_t allocatedCapacityInBits = allocatedCapacity * 8;
504 :
505 : // Arithmetic overflow should normally not happen. If it happens we just
506 : // change the `allocatedCapacityInBits` to the `minimumCapacityInBits` as
507 : // this value is still safe to be used to call `_heap->release(...)`.
508 0 : if (ASMJIT_UNLIKELY(allocatedCapacityInBits < allocatedCapacity))
509 : allocatedCapacityInBits = minimumCapacityInBits;
510 :
511 0 : if (oldLength)
512 : ::memcpy(newData, data, _wordsPerBits(oldLength));
513 :
514 0 : if (data)
515 0 : heap->release(data, _capacity / 8);
516 : data = newData;
517 :
518 0 : _data = data;
519 0 : _capacity = allocatedCapacityInBits;
520 : }
521 :
522 : // Start (of the old length) and end (of the new length) bits
523 0 : size_t idx = oldLength / kBitsPerWord;
524 0 : size_t startBit = oldLength % kBitsPerWord;
525 0 : size_t endBit = newLength % kBitsPerWord;
526 :
527 : // Set new bits to either 0 or 1. The `pattern` is used to set multiple
528 : // bits per bit-word and contains either all zeros or all ones.
529 : BitWord pattern = _patternFromBit(newBitsValue);
530 :
531 : // First initialize the last bit-word of the old length.
532 0 : if (startBit) {
533 : size_t nBits = 0;
534 :
535 0 : if (idx == (newLength / kBitsPerWord)) {
536 : // The number of bit-words is the same after the resize. In that case
537 : // we need to set only bits necessary in the current last bit-word.
538 : ASMJIT_ASSERT(startBit < endBit);
539 0 : nBits = endBit - startBit;
540 : }
541 : else {
542 : // There is be more bit-words after the resize. In that case we don't
543 : // have to be extra careful about the last bit-word of the old length.
544 0 : nBits = kBitsPerWord - startBit;
545 : }
546 :
547 0 : data[idx++] |= pattern << nBits;
548 : }
549 :
550 : // Initialize all bit-words after the last bit-word of the old length.
551 : size_t endIdx = _wordsPerBits(newLength);
552 0 : endIdx -= static_cast<size_t>(endIdx * kBitsPerWord == newLength);
553 :
554 0 : while (idx <= endIdx)
555 0 : data[idx++] = pattern;
556 :
557 : // Clear unused bits of the last bit-word.
558 0 : if (endBit)
559 0 : data[endIdx] &= (static_cast<BitWord>(1) << endBit) - 1;
560 :
561 0 : _length = newLength;
562 0 : return kErrorOk;
563 : }
564 :
565 0 : Error ZoneBitVector::_append(ZoneHeap* heap, bool value) noexcept {
566 : size_t kThreshold = Globals::kAllocThreshold * 8;
567 0 : size_t newLength = _length + 1;
568 0 : size_t idealCapacity = _capacity;
569 :
570 0 : if (idealCapacity < 128)
571 : idealCapacity = 128;
572 0 : else if (idealCapacity <= kThreshold)
573 0 : idealCapacity *= 2;
574 : else
575 0 : idealCapacity += kThreshold;
576 :
577 0 : if (ASMJIT_UNLIKELY(idealCapacity < _capacity)) {
578 : // It's technically impossible that `_length + 1` overflows.
579 : idealCapacity = newLength;
580 : ASMJIT_ASSERT(idealCapacity > _capacity);
581 : }
582 :
583 0 : return _resize(heap, newLength, idealCapacity, value);
584 : }
585 :
586 0 : Error ZoneBitVector::fill(size_t from, size_t to, bool value) noexcept {
587 0 : if (ASMJIT_UNLIKELY(from >= to)) {
588 0 : if (from > to)
589 : return DebugUtils::errored(kErrorInvalidArgument);
590 : else
591 0 : return kErrorOk;
592 : }
593 :
594 : ASMJIT_ASSERT(from <= _length);
595 : ASMJIT_ASSERT(to <= _length);
596 :
597 : // This is very similar to `ZoneBitVector::_fill()`, however, since we
598 : // actually set bits that are already part of the container we need to
599 : // special case filiing to zeros and ones.
600 0 : size_t idx = from / kBitsPerWord;
601 0 : size_t startBit = from % kBitsPerWord;
602 :
603 0 : size_t endIdx = to / kBitsPerWord;
604 0 : size_t endBit = to % kBitsPerWord;
605 :
606 0 : BitWord* data = _data;
607 : ASMJIT_ASSERT(data != nullptr);
608 :
609 : // Special case for non-zero `startBit`.
610 0 : if (startBit) {
611 0 : if (idx == endIdx) {
612 : ASMJIT_ASSERT(startBit < endBit);
613 :
614 0 : size_t nBits = endBit - startBit;
615 0 : BitWord mask = ((static_cast<BitWord>(1) << nBits) - 1) << startBit;
616 :
617 0 : if (value)
618 0 : data[idx] |= mask;
619 : else
620 0 : data[idx] &= ~mask;
621 0 : return kErrorOk;
622 : }
623 : else {
624 0 : BitWord mask = (static_cast<BitWord>(0) - 1) << startBit;
625 :
626 0 : if (value)
627 0 : data[idx++] |= mask;
628 : else
629 0 : data[idx++] &= ~mask;
630 : }
631 : }
632 :
633 : // Fill all bits in case there is a gap between the current `idx` and `endIdx`.
634 0 : if (idx < endIdx) {
635 : BitWord pattern = _patternFromBit(value);
636 : do {
637 0 : data[idx++] = pattern;
638 0 : } while (idx < endIdx);
639 : }
640 :
641 : // Special case for non-zero `endBit`.
642 0 : if (endBit) {
643 0 : BitWord mask = ((static_cast<BitWord>(1) << endBit) - 1);
644 0 : if (value)
645 0 : data[endIdx] |= mask;
646 : else
647 0 : data[endIdx] &= ~mask;
648 : }
649 :
650 : return kErrorOk;
651 : }
652 :
653 : // ============================================================================
654 : // [asmjit::ZoneHashBase - Utilities]
655 : // ============================================================================
656 :
657 0 : static uint32_t ZoneHash_getClosestPrime(uint32_t x) noexcept {
658 : static const uint32_t primeTable[] = {
659 : 23, 53, 193, 389, 769, 1543, 3079, 6151, 12289, 24593
660 : };
661 :
662 : size_t i = 0;
663 : uint32_t p;
664 :
665 : do {
666 0 : if ((p = primeTable[i]) > x)
667 : break;
668 0 : } while (++i < ASMJIT_ARRAY_SIZE(primeTable));
669 :
670 0 : return p;
671 : }
672 :
673 : // ============================================================================
674 : // [asmjit::ZoneHashBase - Reset]
675 : // ============================================================================
676 :
677 3888 : void ZoneHashBase::reset(ZoneHeap* heap) noexcept {
678 3888 : ZoneHashNode** oldData = _data;
679 3888 : if (oldData != _embedded)
680 0 : _heap->release(oldData, _bucketsCount * sizeof(ZoneHashNode*));
681 :
682 3888 : _heap = heap;
683 3888 : _size = 0;
684 3888 : _bucketsCount = 1;
685 3888 : _bucketsGrow = 1;
686 3888 : _data = _embedded;
687 3888 : _embedded[0] = nullptr;
688 3888 : }
689 :
690 : // ============================================================================
691 : // [asmjit::ZoneHashBase - Rehash]
692 : // ============================================================================
693 :
694 0 : void ZoneHashBase::_rehash(uint32_t newCount) noexcept {
695 : ASMJIT_ASSERT(isInitialized());
696 :
697 0 : ZoneHashNode** oldData = _data;
698 : ZoneHashNode** newData = reinterpret_cast<ZoneHashNode**>(
699 0 : _heap->allocZeroed(static_cast<size_t>(newCount) * sizeof(ZoneHashNode*)));
700 :
701 : // We can still store nodes into the table, but it will degrade.
702 0 : if (ASMJIT_UNLIKELY(newData == nullptr))
703 : return;
704 :
705 : uint32_t i;
706 0 : uint32_t oldCount = _bucketsCount;
707 :
708 0 : for (i = 0; i < oldCount; i++) {
709 0 : ZoneHashNode* node = oldData[i];
710 0 : while (node) {
711 0 : ZoneHashNode* next = node->_hashNext;
712 0 : uint32_t hMod = node->_hVal % newCount;
713 :
714 0 : node->_hashNext = newData[hMod];
715 0 : newData[hMod] = node;
716 :
717 : node = next;
718 : }
719 : }
720 :
721 : // 90% is the maximum occupancy, can't overflow since the maximum capacity
722 : // is limited to the last prime number stored in the prime table.
723 0 : if (oldData != _embedded)
724 0 : _heap->release(oldData, _bucketsCount * sizeof(ZoneHashNode*));
725 :
726 0 : _bucketsCount = newCount;
727 0 : _bucketsGrow = newCount * 9 / 10;
728 :
729 0 : _data = newData;
730 : }
731 :
732 : // ============================================================================
733 : // [asmjit::ZoneHashBase - Ops]
734 : // ============================================================================
735 :
736 0 : ZoneHashNode* ZoneHashBase::_put(ZoneHashNode* node) noexcept {
737 0 : uint32_t hMod = node->_hVal % _bucketsCount;
738 0 : ZoneHashNode* next = _data[hMod];
739 :
740 0 : node->_hashNext = next;
741 0 : _data[hMod] = node;
742 :
743 0 : if (++_size >= _bucketsGrow && next) {
744 0 : uint32_t newCapacity = ZoneHash_getClosestPrime(_bucketsCount);
745 0 : if (newCapacity != _bucketsCount)
746 0 : _rehash(newCapacity);
747 : }
748 :
749 0 : return node;
750 : }
751 :
752 0 : ZoneHashNode* ZoneHashBase::_del(ZoneHashNode* node) noexcept {
753 0 : uint32_t hMod = node->_hVal % _bucketsCount;
754 :
755 0 : ZoneHashNode** pPrev = &_data[hMod];
756 0 : ZoneHashNode* p = *pPrev;
757 :
758 0 : while (p) {
759 0 : if (p == node) {
760 0 : *pPrev = p->_hashNext;
761 0 : return node;
762 : }
763 :
764 0 : pPrev = &p->_hashNext;
765 0 : p = *pPrev;
766 : }
767 :
768 : return nullptr;
769 : }
770 :
771 : // ============================================================================
772 : // [asmjit::Zone - Test]
773 : // ============================================================================
774 :
775 : #if defined(ASMJIT_TEST)
776 : UNIT(base_zonevector) {
777 : Zone zone(8096 - Zone::kZoneOverhead);
778 : ZoneHeap heap(&zone);
779 :
780 : int i;
781 : int kMax = 100000;
782 :
783 : ZoneVector<int> vec;
784 :
785 : INFO("ZoneVector<int> basic tests");
786 : EXPECT(vec.append(&heap, 0) == kErrorOk);
787 : EXPECT(vec.isEmpty() == false);
788 : EXPECT(vec.getLength() == 1);
789 : EXPECT(vec.getCapacity() >= 1);
790 : EXPECT(vec.indexOf(0) == 0);
791 : EXPECT(vec.indexOf(-11) == Globals::kInvalidIndex);
792 :
793 : vec.clear();
794 : EXPECT(vec.isEmpty());
795 : EXPECT(vec.getLength() == 0);
796 : EXPECT(vec.indexOf(0) == Globals::kInvalidIndex);
797 :
798 : for (i = 0; i < kMax; i++) {
799 : EXPECT(vec.append(&heap, i) == kErrorOk);
800 : }
801 : EXPECT(vec.isEmpty() == false);
802 : EXPECT(vec.getLength() == static_cast<size_t>(kMax));
803 : EXPECT(vec.indexOf(kMax - 1) == static_cast<size_t>(kMax - 1));
804 : }
805 :
806 : UNIT(base_ZoneBitVector) {
807 : Zone zone(8096 - Zone::kZoneOverhead);
808 : ZoneHeap heap(&zone);
809 :
810 : size_t i, count;
811 : size_t kMaxCount = 100;
812 :
813 : ZoneBitVector vec;
814 : EXPECT(vec.isEmpty());
815 : EXPECT(vec.getLength() == 0);
816 :
817 : INFO("ZoneBitVector::resize()");
818 : for (count = 1; count < kMaxCount; count++) {
819 : vec.clear();
820 : EXPECT(vec.resize(&heap, count, false) == kErrorOk);
821 : EXPECT(vec.getLength() == count);
822 :
823 : for (i = 0; i < count; i++)
824 : EXPECT(vec.getAt(i) == false);
825 :
826 : vec.clear();
827 : EXPECT(vec.resize(&heap, count, true) == kErrorOk);
828 : EXPECT(vec.getLength() == count);
829 :
830 : for (i = 0; i < count; i++)
831 : EXPECT(vec.getAt(i) == true);
832 : }
833 :
834 : INFO("ZoneBitVector::fill()");
835 : for (count = 1; count < kMaxCount; count += 2) {
836 : vec.clear();
837 : EXPECT(vec.resize(&heap, count) == kErrorOk);
838 : EXPECT(vec.getLength() == count);
839 :
840 : for (i = 0; i < (count + 1) / 2; i++) {
841 : bool value = static_cast<bool>(i & 1);
842 : EXPECT(vec.fill(i, count - i, value) == kErrorOk);
843 : }
844 :
845 : for (i = 0; i < count; i++) {
846 : EXPECT(vec.getAt(i) == static_cast<bool>(i & 1));
847 : }
848 : }
849 : }
850 :
851 : #endif // ASMJIT_TEST
852 :
853 : } // asmjit namespace
854 : } // namespace PLMD
855 :
856 : // [Api-End]
857 : #include "./asmjit_apiend.h"
858 : #pragma GCC diagnostic pop
859 : #endif // __PLUMED_HAS_ASMJIT
|