PoolChunked.cc
Go to the documentation of this file.
1/*
2 * Copyright (C) 1996-2023 The Squid Software Foundation and contributors
3 *
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
7 */
8
9/*
10 * AUTHOR: Alex Rousskov, Andres Kroonmaa, Robert Collins
11 */
12
13#include "squid.h"
14#include "mem/PoolChunked.h"
15#include "mem/Stats.h"
16
17#include <cassert>
18#include <cstring>
19
20#define MEM_MAX_MMAP_CHUNKS 2048
21#define MEM_PAGE_SIZE 4096
22#define MEM_MIN_FREE 32
23#define MEM_MAX_FREE 65535 /* unsigned short is max number of items per chunk */
24
25/*
26 * Old way:
27 * xmalloc each item separately, upon free stack into idle pool array.
28 * each item is individually malloc()ed from system, imposing libmalloc
29 * overhead, and additionally we add our overhead of pointer size per item
30 * as we keep a list of pointer to free items.
31 *
32 * Chunking:
33 * xmalloc Chunk that fits at least MEM_MIN_FREE (32) items in an array, but
34 * limit Chunk size to MEM_CHUNK_MAX_SIZE (256K). Chunk size is rounded up to
35 * MEM_PAGE_SIZE (4K), trying to have chunks in multiples of VM_PAGE size.
36 * Minimum Chunk size is MEM_CHUNK_SIZE (16K).
37 * A number of items fits into a single chunk, depending on item size.
38 * Maximum number of items per chunk is limited to MEM_MAX_FREE (65535).
39 *
40 * We populate Chunk with a linkedlist, each node at first word of item,
41 * and pointing at next free item. Chunk->FreeList is pointing at first
42 * free node. Thus we stuff free housekeeping into the Chunk itself, and
43 * omit pointer overhead per item.
44 *
45 * Chunks are created on demand, and new chunks are inserted into linklist
46 * of chunks so that Chunks with smaller pointer value are placed closer
47 * to the linklist head. Head is a hotspot, servicing most of requests, so
48 * slow sorting occurs and Chunks in highest memory tend to become idle
49 * and freeable.
50 *
51 * event is registered that runs every 15 secs and checks reference time
52 * of each idle chunk. If a chunk is not referenced for 15 secs, it is
53 * released.
54 *
55 * [If mem_idle_limit is exceeded with pools, every chunk that becomes
56 * idle is immediately considered for release, unless this is the only
57 * chunk with free items in it.] (not implemented)
58 *
59 * In cachemgr output, there are new columns for chunking. Special item,
60 * Frag, is shown to estimate approximately fragmentation of chunked
61 * pools. Fragmentation is calculated by taking amount of items in use,
62 * calculating needed amount of chunks to fit all, and then comparing to
63 * actual amount of chunks in use. Frag number, in percent, is showing
64 * how many percent of chunks are in use excessively. 100% meaning that
65 * twice the needed amount of chunks are in use.
66 * "part" item shows number of chunks partially filled. This shows how
67 * badly fragmentation is spread across all chunks.
68 *
69 * Andres Kroonmaa.
70 */
71
72extern time_t squid_curtime;
73
74/* local prototypes */
75static int memCompChunks(MemChunk * const &, MemChunk * const &);
76static int memCompObjChunks(void * const &, MemChunk * const &);
77
78/* Compare chunks */
79static int
80memCompChunks(MemChunk * const &chunkA, MemChunk * const &chunkB)
81{
82 if (chunkA->objCache > chunkB->objCache)
83 return 1;
84 else if (chunkA->objCache < chunkB->objCache)
85 return -1;
86 else
87 return 0;
88}
89
90/* Compare object to chunk */
91static int
92memCompObjChunks(void *const &obj, MemChunk * const &chunk)
93{
94 /* object is lower in memory than the chunks arena */
95 if (obj < chunk->objCache)
96 return -1;
97 /* object is within the pool */
98 if (obj < (void *) ((char *) chunk->objCache + chunk->pool->chunk_size))
99 return 0;
100 /* object is above the pool */
101 return 1;
102}
103
105{
106 /* should have a pool for this too -
107 * note that this requires:
108 * allocate one chunk for the pool of chunks's first chunk
109 * allocate a chunk from that pool
110 * move the contents of one chunk into the other
111 * free the first chunk.
112 */
113 inuse_count = 0;
114 next = nullptr;
115 pool = aPool;
116
117 if (pool->doZero)
119 else
121
123 void **Free = (void **)freeList;
124
125 for (int i = 1; i < pool->chunk_capacity; ++i) {
126 *Free = (void *) ((char *) Free + pool->objectSize);
127 void **nextFree = (void **)*Free;
129 Free = nextFree;
130 }
132 pool->nextFreeChunk = this;
133
136 ++pool->chunkCount;
139}
140
141MemPoolChunked::MemPoolChunked(const char *aLabel, size_t aSize) :
142 Mem::Allocator(aLabel, aSize),
143 chunk_size(0),
144 chunk_capacity(0), chunkCount(0), freeCache(nullptr), nextFreeChunk(nullptr),
145 Chunks(nullptr), allChunks(Splay<MemChunk *>())
146{
148
149#if HAVE_MALLOPT && M_MMAP_MAX
151#endif
152}
153
155{
158 -- pool->chunkCount;
161}
162
163void
165{
166 void **Free;
167 /* XXX We should figure out a sane way of avoiding having to clear
168 * all buffers. For example data buffers such as used by MemBuf do
169 * not really need to be cleared.. There was a condition based on
170 * the object size here, but such condition is not safe.
171 */
172 if (doZero)
173 memset(obj, 0, objectSize);
174 Free = (void **)obj;
175 *Free = freeCache;
176 freeCache = obj;
178}
179
180/*
181 * Find a chunk with a free item.
182 * Create new chunk on demand if no chunk with frees found.
183 * Insert new chunk in front of lowest ram chunk, making it preferred in future,
184 * and resulting slow compaction towards lowest ram area.
185 */
186void *
188{
189 void **Free;
190
192
193 /* first, try cache */
194 if (freeCache) {
195 Free = (void **)freeCache;
197 freeCache = *Free;
198 *Free = nullptr;
199 return Free;
200 }
201 /* then try perchunk freelist chain */
202 if (nextFreeChunk == nullptr) {
203 /* no chunk with frees, so create new one */
204 --countSavedAllocs; // compensate for the ++ above
205 createChunk();
206 }
207 /* now we have some in perchunk freelist chain */
208 MemChunk *chunk = nextFreeChunk;
209
210 Free = (void **)chunk->freeList;
211 chunk->freeList = *Free;
212 *Free = nullptr;
213 ++chunk->inuse_count;
214 chunk->lastref = squid_curtime;
215
216 if (chunk->freeList == nullptr) {
217 /* last free in this chunk, so remove us from perchunk freelist chain */
219 }
221 return Free;
222}
223
224/* just create a new chunk and place it into a good spot in the chunk chain */
225void
227{
228 MemChunk *chunk, *newChunk;
229
230 newChunk = new MemChunk(this);
231
232 chunk = Chunks;
233 if (chunk == nullptr) { /* first chunk in pool */
234 Chunks = newChunk;
235 return;
236 }
237 if (newChunk->objCache < chunk->objCache) {
238 /* we are lowest ram chunk, insert as first chunk */
239 newChunk->next = chunk;
240 Chunks = newChunk;
241 return;
242 }
243 while (chunk->next) {
244 if (newChunk->objCache < chunk->next->objCache) {
245 /* new chunk is in lower ram, insert here */
246 newChunk->next = chunk->next;
247 chunk->next = newChunk;
248 return;
249 }
250 chunk = chunk->next;
251 }
252 /* we are the worst chunk in chain, add as last */
253 chunk->next = newChunk;
254}
255
266void
268{
269 int cap;
270 size_t csize = chunksize;
271
272 if (Chunks) /* unsafe to tamper */
273 return;
274
275 csize = ((csize + MEM_PAGE_SIZE - 1) / MEM_PAGE_SIZE) * MEM_PAGE_SIZE; /* round up to page size */
276 cap = csize / objectSize;
277
278 if (cap < MEM_MIN_FREE)
279 cap = MEM_MIN_FREE;
280 if (cap * objectSize > MEM_CHUNK_MAX_SIZE)
282 if (cap > MEM_MAX_FREE)
283 cap = MEM_MAX_FREE;
284 if (cap < 1)
285 cap = 1;
286
287 csize = cap * objectSize;
288 csize = ((csize + MEM_PAGE_SIZE - 1) / MEM_PAGE_SIZE) * MEM_PAGE_SIZE; /* round up to page size */
289 cap = csize / objectSize;
290
291 chunk_capacity = cap;
292 chunk_size = csize;
293}
294
295/*
296 * warning: we do not clean this entry from Pools assuming destruction
297 * is used at the end of the program only
298 */
300{
301 MemChunk *chunk, *fchunk;
302
304 clean(0);
305 assert(getInUseCount() == 0);
306
307 chunk = Chunks;
308 while ( (fchunk = chunk) != nullptr) {
309 chunk = chunk->next;
310 delete fchunk;
311 }
312 /* TODO we should be doing something about the original Chunks pointer here. */
313
314}
315
316void *
318{
319 void *p = get();
321 --meter.idle;
322 ++meter.inuse;
323 return p;
324}
325
326void
328{
329 push(obj);
331 --meter.inuse;
332 ++meter.idle;
333}
334
335void
337{
338 void *Free;
339 /*
340 * OK, so we have to go through all the global freeCache and find the Chunk
341 * any given Free belongs to, and stuff it into that Chunk's freelist
342 */
343
344 while ((Free = freeCache) != nullptr) {
345 MemChunk *chunk = nullptr;
346 chunk = const_cast<MemChunk *>(*allChunks.find(Free, memCompObjChunks));
348 assert(chunk->inuse_count > 0);
349 -- chunk->inuse_count;
350 (void) VALGRIND_MAKE_MEM_DEFINED(Free, sizeof(void *));
351 freeCache = *(void **)Free; /* remove from global cache */
352 *(void **)Free = chunk->freeList; /* stuff into chunks freelist */
353 (void) VALGRIND_MAKE_MEM_NOACCESS(Free, sizeof(void *));
354 chunk->freeList = Free;
355 chunk->lastref = squid_curtime;
356 }
357
358}
359
360/* removes empty Chunks from pool */
361void
363{
364 MemChunk *chunk, *freechunk, *listTail;
365 time_t age;
366
367 if (!Chunks)
368 return;
369
372 /* Now we have all chunks in this pool cleared up, all free items returned to their home */
373 /* We start now checking all chunks to see if we can release any */
374 /* We start from Chunks->next, so first chunk is not released */
375 /* Recreate nextFreeChunk list from scratch */
376
377 chunk = Chunks;
378 while ((freechunk = chunk->next) != nullptr) {
379 age = squid_curtime - freechunk->lastref;
380 freechunk->nextFreeChunk = nullptr;
381 if (freechunk->inuse_count == 0)
382 if (age >= maxage) {
383 chunk->next = freechunk->next;
384 delete freechunk;
385 freechunk = nullptr;
386 }
387 if (chunk->next == nullptr)
388 break;
389 chunk = chunk->next;
390 }
391
392 /* Recreate nextFreeChunk list from scratch */
393 /* Populate nextFreeChunk list in order of "most filled chunk first" */
394 /* in case of equal fill, put chunk in lower ram first */
395 /* First (create time) chunk is always on top, no matter how full */
396
397 chunk = Chunks;
398 nextFreeChunk = chunk;
399 chunk->nextFreeChunk = nullptr;
400
401 while (chunk->next) {
402 chunk->next->nextFreeChunk = nullptr;
403 if (chunk->next->inuse_count < chunk_capacity) {
404 listTail = nextFreeChunk;
405 while (listTail->nextFreeChunk) {
406 if (chunk->next->inuse_count > listTail->nextFreeChunk->inuse_count)
407 break;
408 if ((chunk->next->inuse_count == listTail->nextFreeChunk->inuse_count) &&
409 (chunk->next->objCache < listTail->nextFreeChunk->objCache))
410 break;
411 listTail = listTail->nextFreeChunk;
412 }
413 chunk->next->nextFreeChunk = listTail->nextFreeChunk;
414 listTail->nextFreeChunk = chunk->next;
415 }
416 chunk = chunk->next;
417 }
418 /* We started from 2nd chunk. If first chunk is full, remove it */
421
422 return;
423}
424
425bool
427{
428 return meter.idle.currentLevel() > (chunk_capacity << shift);
429}
430
431size_t
433{
434 MemChunk *chunk;
435 int chunks_free = 0;
436 int chunks_partial = 0;
437
438 clean((time_t) 555555); /* don't want to get chunks released before reporting */
439
440 stats.pool = this;
441 stats.label = label;
442 stats.meter = &meter;
443 stats.obj_size = objectSize;
444 stats.chunk_capacity = chunk_capacity;
445
446 /* gather stats for each Chunk */
447 chunk = Chunks;
448 while (chunk) {
449 if (chunk->inuse_count == 0)
450 ++chunks_free;
451 else if (chunk->inuse_count < chunk_capacity)
452 ++chunks_partial;
453 chunk = chunk->next;
454 }
455
456 stats.chunks_alloc += chunkCount;
457 stats.chunks_inuse += chunkCount - chunks_free;
458 stats.chunks_partial += chunks_partial;
459 stats.chunks_free += chunks_free;
460
461 stats.items_alloc += meter.alloc.currentLevel();
462 stats.items_inuse += meter.inuse.currentLevel();
463 stats.items_idle += meter.idle.currentLevel();
464
465 stats.overhead += sizeof(MemPoolChunked) + chunkCount * sizeof(MemChunk) + strlen(label) + 1;
466
467 return getInUseCount();
468}
469
time_t squid_curtime
Definition: stub_libtime.cc:20
static int memCompChunks(MemChunk *const &, MemChunk *const &)
Definition: PoolChunked.cc:80
#define MEM_PAGE_SIZE
Definition: PoolChunked.cc:21
#define MEM_MAX_FREE
Definition: PoolChunked.cc:23
#define MEM_MIN_FREE
Definition: PoolChunked.cc:22
#define MEM_MAX_MMAP_CHUNKS
Definition: PoolChunked.cc:20
static int memCompObjChunks(void *const &, MemChunk *const &)
Definition: PoolChunked.cc:92
#define MEM_CHUNK_MAX_SIZE
Definition: PoolChunked.h:16
#define MEM_CHUNK_SIZE
Definition: PoolChunked.h:15
#define M_MMAP_MAX
Definition: Pool.h:47
#define assert(EX)
Definition: assert.h:17
int inuse_count
Definition: PoolChunked.h:61
time_t lastref
Definition: PoolChunked.h:64
MemChunk * next
Definition: PoolChunked.h:63
void * objCache
Definition: PoolChunked.h:60
MemChunk * nextFreeChunk
Definition: PoolChunked.h:62
MemPoolChunked * pool
Definition: PoolChunked.h:65
MemChunk(MemPoolChunked *pool)
Definition: PoolChunked.cc:104
void * freeList
Definition: PoolChunked.h:59
void createChunk()
Definition: PoolChunked.cc:226
void push(void *obj)
Definition: PoolChunked.cc:164
bool idleTrigger(int) const override
Definition: PoolChunked.cc:426
void convertFreeCacheToChunkFreeCache()
Definition: PoolChunked.cc:336
void * freeCache
Definition: PoolChunked.h:47
void * allocate() override
*alloc()
Definition: PoolChunked.cc:317
friend class MemChunk
Definition: PoolChunked.h:24
MemChunk * nextFreeChunk
Definition: PoolChunked.h:48
void deallocate(void *) override
freeOne(void *)
Definition: PoolChunked.cc:327
size_t getStats(Mem::PoolStats &) override
Definition: PoolChunked.cc:432
Splay< MemChunk * > allChunks
Definition: PoolChunked.h:50
void setChunkSize(size_t) override
Definition: PoolChunked.cc:267
~MemPoolChunked() override
Definition: PoolChunked.cc:299
MemPoolChunked(const char *label, size_t obj_size)
Definition: PoolChunked.cc:141
MemChunk * Chunks
Definition: PoolChunked.h:49
void clean(time_t) override
Definition: PoolChunked.cc:362
size_t chunk_size
Definition: PoolChunked.h:44
const size_t objectSize
the size (in bytes) of objects managed by this allocator
Definition: Allocator.h:112
void flushCounters()
Definition: Allocator.h:74
int getInUseCount() const
the difference between the number of alloc() and freeOne() calls
Definition: Allocator.h:59
const char * label
brief description of objects returned by alloc()
Definition: Allocator.h:109
PoolMeter meter
statistics tracked for this allocator
Definition: Allocator.h:115
size_t countSavedAllocs
the number of malloc()/calloc() calls avoided since last flush
Definition: Allocator.h:101
ssize_t currentLevel() const
Definition: Meter.h:26
Meter idle
Definition: Meter.h:91
Meter alloc
Definition: Meter.h:89
Meter inuse
Definition: Meter.h:90
Definition: splay.h:50
Value const * find(FindValue const &, int(*compare)(FindValue const &a, Value const &b)) const
void remove(Value const &, SPLAYCMP *compare)
Definition: splay.h:332
void insert(Value const &, SPLAYCMP *compare)
Definition: splay.h:318
Memory Management.
Definition: Allocator.h:17
class Ping::pingStats_ stats
#define xfree
#define xmalloc
SQUIDCEXTERN int splayLastResult
Definition: splay.h:93
#define VALGRIND_MAKE_MEM_NOACCESS
Definition: valgrind.h:25
#define VALGRIND_MAKE_MEM_DEFINED
Definition: valgrind.h:27
void * xcalloc(size_t n, size_t sz)
Definition: xalloc.cc:71

 

Introduction

Documentation

Support

Miscellaneous

Web Site Translations

Mirrors