RockRebuild.cc
Go to the documentation of this file.
1 /*
2  * Copyright (C) 1996-2018 The Squid Software Foundation and contributors
3  *
4  * Squid software is distributed under GPLv2+ license and includes
5  * contributions from numerous individuals and organizations.
6  * Please see the COPYING and CONTRIBUTORS files for details.
7  */
8 
9 /* DEBUG: section 79 Disk IO Routines */
10 
11 #include "squid.h"
12 #include "base/AsyncJobCalls.h"
13 #include "fs/rock/RockDbCell.h"
14 #include "fs/rock/RockRebuild.h"
15 #include "fs/rock/RockSwapDir.h"
16 #include "fs_io.h"
17 #include "globals.h"
18 #include "ipc/StoreMap.h"
19 #include "md5.h"
20 #include "SquidTime.h"
21 #include "Store.h"
22 #include "store_rebuild.h"
23 #include "tools.h"
24 
25 #include <cerrno>
26 
27 CBDATA_NAMESPACED_CLASS_INIT(Rock, Rebuild);
28 
74 namespace Rock
75 {
76 
79 {
80 public:
81  LoadingFlags(): state(0), anchored(0), mapped(0), finalized(0), freed(0) {}
82 
83  /* for LoadingEntry */
84  uint8_t state:3;
85  uint8_t anchored:1;
86 
87  /* for LoadingSlot */
88  uint8_t mapped:1;
89  uint8_t finalized:1;
90  uint8_t freed:1;
91 };
92 
95 {
96 public:
97  LoadingEntry(const sfileno fileNo, LoadingParts &source);
98 
99  uint64_t &size;
100  uint32_t &version;
101 
104 
105  /* LoadingFlags::state */
106  State state() const { return static_cast<State>(flags.state); }
107  void state(State aState) const { flags.state = aState; }
108 
109  /* LoadingFlags::anchored */
110  bool anchored() const { return flags.anchored; }
111  void anchored(const bool beAnchored) { flags.anchored = beAnchored; }
112 
113 private:
115 };
116 
119 {
120 public:
121  LoadingSlot(const SlotId slotId, LoadingParts &source);
122 
125 
126  /* LoadingFlags::mapped */
127  bool mapped() const { return flags.mapped; }
128  void mapped(const bool beMapped) { flags.mapped = beMapped; }
129 
130  /* LoadingFlags::finalized */
131  bool finalized() const { return flags.finalized; }
132  void finalized(const bool beFinalized) { flags.finalized = beFinalized; }
133 
134  /* LoadingFlags::freed */
135  bool freed() const { return flags.freed; }
136  void freed(const bool beFreed) { flags.freed = beFreed; }
137 
138  bool used() const { return freed() || mapped() || more != -1; }
139 
140 private:
142 };
143 
147 {
148 public:
149  LoadingParts(int dbSlotLimit, int dbEntryLimit);
150  LoadingParts(LoadingParts&&) = delete; // paranoid (often too huge to copy)
151 
152 private:
153  friend class LoadingEntry;
154  friend class LoadingSlot;
155 
156  /* Anti-padding storage. With millions of entries, padding matters! */
157 
158  /* indexed by sfileno */
159  std::vector<uint64_t> sizes;
160  std::vector<uint32_t> versions;
161 
162  /* indexed by SlotId */
163  std::vector<Ipc::StoreMapSliceId> mores;
164 
165  /* entry flags are indexed by sfileno; slot flags -- by SlotId */
166  std::vector<LoadingFlags> flags;
167 };
168 
169 } /* namespace Rock */
170 
171 /* LoadingEntry */
172 
174  size(source.sizes.at(fileNo)),
175  version(source.versions.at(fileNo)),
176  flags(source.flags.at(fileNo))
177 {
178 }
179 
180 /* LoadingSlot */
181 
183  more(source.mores.at(slotId)),
184  flags(source.flags.at(slotId))
185 {
186 }
187 
188 /* LoadingParts */
189 
190 Rock::LoadingParts::LoadingParts(const int dbEntryLimit, const int dbSlotLimit):
191  sizes(dbEntryLimit, 0),
192  versions(dbEntryLimit, 0),
193  mores(dbSlotLimit, -1),
194  flags(dbSlotLimit)
195 {
196  assert(sizes.size() == versions.size()); // every entry has both fields
197  assert(sizes.size() <= mores.size()); // every entry needs slot(s)
198  assert(mores.size() == flags.size()); // every slot needs a set of flags
199 }
200 
201 /* Rebuild */
202 
203 Rock::Rebuild::Rebuild(SwapDir *dir): AsyncJob("Rock::Rebuild"),
204  sd(dir),
205  parts(nullptr),
206  dbSize(0),
207  dbSlotSize(0),
208  dbSlotLimit(0),
209  dbEntryLimit(0),
210  fd(-1),
211  dbOffset(0),
212  loadingPos(0),
213  validationPos(0)
214 {
215  assert(sd);
216  dbSize = sd->diskOffsetLimit(); // we do not care about the trailer waste
221 }
222 
224 {
225  if (fd >= 0)
226  file_close(fd);
227  delete parts;
228 }
229 
231 void
233 {
234  // in SMP mode, only the disker is responsible for populating the map
235  if (UsingSmp() && !IamDiskProcess()) {
236  debugs(47, 2, "Non-disker skips rebuilding of cache_dir #" <<
237  sd->index << " from " << sd->filePath);
238  mustStop("non-disker");
239  return;
240  }
241 
242  debugs(47, DBG_IMPORTANT, "Loading cache_dir #" << sd->index <<
243  " from " << sd->filePath);
244 
245  fd = file_open(sd->filePath, O_RDONLY | O_BINARY);
246  if (fd < 0)
247  failure("cannot open db", errno);
248 
249  char hdrBuf[SwapDir::HeaderSize];
250  if (read(fd, hdrBuf, sizeof(hdrBuf)) != SwapDir::HeaderSize)
251  failure("cannot read db header", errno);
252 
253  // slot prefix of SM_PAGE_SIZE should fit both core entry header and ours
254  assert(sizeof(DbCellHeader) < SM_PAGE_SIZE);
256 
257  dbOffset = SwapDir::HeaderSize;
258 
259  parts = new LoadingParts(dbEntryLimit, dbSlotLimit);
260 
261  checkpoint();
262 }
263 
265 void
267 {
268  if (!done())
269  eventAdd("Rock::Rebuild", Rock::Rebuild::Steps, this, 0.01, 1, true);
270 }
271 
272 bool
274 {
275  return loadingPos >= dbSlotLimit;
276 }
277 
278 bool
280 {
281  // paranoid slot checking is only enabled with squid -S
282  return validationPos >= dbEntryLimit +
283  (opt_store_doublecheck ? dbSlotLimit : 0);
284 }
285 
286 bool
288 {
289  return doneLoading() && doneValidating() && AsyncJob::doneAll();
290 }
291 
292 void
294 {
295  // use async call to enable job call protection that time events lack
296  CallJobHere(47, 5, static_cast<Rebuild*>(data), Rock::Rebuild, steps);
297 }
298 
299 void
301 {
302  if (!doneLoading())
303  loadingSteps();
304  else
305  validationSteps();
306 
307  checkpoint();
308 }
309 
310 void
312 {
313  debugs(47,5, sd->index << " slot " << loadingPos << " at " <<
314  dbOffset << " <= " << dbSize);
315 
316  // Balance our desire to maximize the number of entries processed at once
317  // (and, hence, minimize overheads and total rebuild time) with a
318  // requirement to also process Coordinator events, disk I/Os, etc.
319  const int maxSpentMsec = 50; // keep small: most RAM I/Os are under 1ms
320  const timeval loopStart = current_time;
321 
322  int loaded = 0;
323  while (!doneLoading()) {
324  loadOneSlot();
325  dbOffset += dbSlotSize;
326  ++loadingPos;
327  ++loaded;
328 
329  if (counts.scancount % 1000 == 0)
330  storeRebuildProgress(sd->index, dbSlotLimit, counts.scancount);
331 
333  continue; // skip "few entries at a time" check below
334 
335  getCurrentTime();
336  const double elapsedMsec = tvSubMsec(loopStart, current_time);
337  if (elapsedMsec > maxSpentMsec || elapsedMsec < 0) {
338  debugs(47, 5, HERE << "pausing after " << loaded << " entries in " <<
339  elapsedMsec << "ms; " << (elapsedMsec/loaded) << "ms per entry");
340  break;
341  }
342  }
343 }
344 
347 {
348  Must(0 <= fileNo && fileNo < dbEntryLimit);
349  return LoadingEntry(fileNo, *parts);
350 }
351 
354 {
355  Must(0 <= slotId && slotId < dbSlotLimit);
356  Must(slotId <= loadingPos); // cannot look ahead
357  return LoadingSlot(slotId, *parts);
358 }
359 
360 void
362 {
363  debugs(47,5, sd->index << " slot " << loadingPos << " at " <<
364  dbOffset << " <= " << dbSize);
365 
366  ++counts.scancount;
367 
368  if (lseek(fd, dbOffset, SEEK_SET) < 0)
369  failure("cannot seek to db entry", errno);
370 
371  buf.reset();
372 
373  if (!storeRebuildLoadEntry(fd, sd->index, buf, counts))
374  return;
375 
376  const SlotId slotId = loadingPos;
377 
378  // get our header
379  DbCellHeader header;
380  if (buf.contentSize() < static_cast<mb_size_t>(sizeof(header))) {
381  debugs(47, DBG_IMPORTANT, "WARNING: cache_dir[" << sd->index << "]: " <<
382  "Ignoring truncated " << buf.contentSize() << "-byte " <<
383  "cache entry meta data at " << dbOffset);
384  freeUnusedSlot(slotId, true);
385  return;
386  }
387  memcpy(&header, buf.content(), sizeof(header));
388  if (header.empty()) {
389  freeUnusedSlot(slotId, false);
390  return;
391  }
392  if (!header.sane(dbSlotSize, dbSlotLimit)) {
393  debugs(47, DBG_IMPORTANT, "WARNING: cache_dir[" << sd->index << "]: " <<
394  "Ignoring malformed cache entry meta data at " << dbOffset);
395  freeUnusedSlot(slotId, true);
396  return;
397  }
398  buf.consume(sizeof(header)); // optimize to avoid memmove()
399 
400  useNewSlot(slotId, header);
401 }
402 
404 bool
406 {
408  StoreEntry loadedE;
409  const uint64_t knownSize = header.entrySize > 0 ?
410  header.entrySize : anchor.basics.swap_file_sz.load();
411  if (!storeRebuildParseEntry(buf, loadedE, key, counts, knownSize))
412  return false;
413 
414  // the entry size may be unknown, but if it is known, it is authoritative
415 
416  debugs(47, 8, "importing basics for entry " << fileno <<
417  " inode.entrySize: " << header.entrySize <<
418  " swap_file_sz: " << loadedE.swap_file_sz);
419  anchor.set(loadedE);
420 
421  // we have not validated whether all db cells for this entry were loaded
423 
424  // loadedE->dump(5);
425 
426  return true;
427 }
428 
429 void
431 {
432  debugs(47, 5, sd->index << " validating from " << validationPos);
433 
434  // see loadingSteps() for the rationale; TODO: avoid duplication
435  const int maxSpentMsec = 50; // keep small: validation does not do I/O
436  const timeval loopStart = current_time;
437 
438  int validated = 0;
439  while (!doneValidating()) {
440  if (validationPos < dbEntryLimit)
441  validateOneEntry(validationPos);
442  else
443  validateOneSlot(validationPos - dbEntryLimit);
444  ++validationPos;
445  ++validated;
446 
447  if (validationPos % 1000 == 0)
448  debugs(20, 2, "validated: " << validationPos);
449 
451  continue; // skip "few entries at a time" check below
452 
453  getCurrentTime();
454  const double elapsedMsec = tvSubMsec(loopStart, current_time);
455  if (elapsedMsec > maxSpentMsec || elapsedMsec < 0) {
456  debugs(47, 5, "pausing after " << validated << " entries in " <<
457  elapsedMsec << "ms; " << (elapsedMsec/validated) << "ms per entry");
458  break;
459  }
460  }
461 }
462 
465 void
467 {
468  // walk all map-linked slots, starting from inode, and mark each
469  Ipc::StoreMapAnchor &anchor = sd->map->writeableEntry(fileNo);
470  Must(le.size > 0); // paranoid
471  uint64_t mappedSize = 0;
472  SlotId slotId = anchor.start;
473  while (slotId >= 0 && mappedSize < le.size) {
474  LoadingSlot slot = loadingSlot(slotId); // throws if we have not loaded that slot
475  Must(!slot.finalized()); // no loops or stealing from other entries
476  Must(slot.mapped()); // all our slots should be in the sd->map
477  Must(!slot.freed()); // all our slots should still be present
478  slot.finalized(true);
479 
480  Ipc::StoreMapSlice &mapSlice = sd->map->writeableSlice(fileNo, slotId);
481  Must(mapSlice.size > 0); // paranoid
482  mappedSize += mapSlice.size;
483  slotId = mapSlice.next;
484  }
485  /* no hodgepodge entries: one entry - one full chain and no leftovers */
486  Must(slotId < 0);
487  Must(mappedSize == le.size);
488 
489  if (!anchor.basics.swap_file_sz)
490  anchor.basics.swap_file_sz = le.size;
493  sd->map->closeForWriting(fileNo);
494  ++counts.objcount;
495 }
496 
499 void
501 {
502  try {
503  finalizeOrThrow(fileNo, le);
504  } catch (const std::exception &ex) {
505  freeBadEntry(fileNo, ex.what());
506  }
507 }
508 
509 void
511 {
512  LoadingEntry entry = loadingEntry(fileNo);
513  switch (entry.state()) {
514 
516  finalizeOrFree(fileNo, entry);
517  break;
518 
519  case LoadingEntry::leEmpty: // no entry hashed to this position
520  case LoadingEntry::leLoaded: // we have already unlocked this entry
521  case LoadingEntry::leCorrupted: // we have already removed this entry
522  case LoadingEntry::leIgnored: // we have already discarded this entry
523  break;
524  }
525 }
526 
527 void
529 {
530  const LoadingSlot slot = loadingSlot(slotId);
531  // there should not be any unprocessed slots left
532  Must(slot.freed() || (slot.mapped() && slot.finalized()));
533 }
534 
537 void
538 Rock::Rebuild::freeBadEntry(const sfileno fileno, const char *eDescription)
539 {
540  debugs(47, 2, "cache_dir #" << sd->index << ' ' << eDescription <<
541  " entry " << fileno << " is ignored during rebuild");
542 
543  LoadingEntry le = loadingEntry(fileno);
545 
546  Ipc::StoreMapAnchor &anchor = sd->map->writeableEntry(fileno);
547  assert(anchor.start < 0 || le.size > 0);
548  for (SlotId slotId = anchor.start; slotId >= 0;) {
549  const SlotId next = loadingSlot(slotId).more;
550  freeSlot(slotId, true);
551  slotId = next;
552  }
553 
554  sd->map->forgetWritingEntry(fileno);
555 }
556 
557 void
559 {
560  debugs(47,3, HERE << "cache_dir #" << sd->index << " rebuild level: " <<
564 }
565 
566 void
567 Rock::Rebuild::failure(const char *msg, int errNo)
568 {
569  debugs(47,5, sd->index << " slot " << loadingPos << " at " <<
570  dbOffset << " <= " << dbSize);
571 
572  if (errNo)
573  debugs(47, DBG_CRITICAL, "ERROR: Rock cache_dir rebuild failure: " << xstrerr(errNo));
574  debugs(47, DBG_CRITICAL, "Do you need to run 'squid -z' to initialize storage?");
575 
576  assert(sd);
577  fatalf("Rock cache_dir[%d] rebuild of %s failed: %s.",
578  sd->index, sd->filePath, msg);
579 }
580 
582 void
583 Rock::Rebuild::freeSlot(const SlotId slotId, const bool invalid)
584 {
585  debugs(47,5, sd->index << " frees slot " << slotId);
586  LoadingSlot slot = loadingSlot(slotId);
587  assert(!slot.freed());
588  slot.freed(true);
589 
590  if (invalid) {
591  ++counts.invalid;
592  //sd->unlink(fileno); leave garbage on disk, it should not hurt
593  }
594 
595  Ipc::Mem::PageId pageId;
596  pageId.pool = sd->index+1;
597  pageId.number = slotId+1;
598  sd->freeSlots->push(pageId);
599 }
600 
602 void
603 Rock::Rebuild::freeUnusedSlot(const SlotId slotId, const bool invalid)
604 {
605  LoadingSlot slot = loadingSlot(slotId);
606  // mapped slots must be freed via freeBadEntry() to keep the map in sync
607  assert(!slot.mapped());
608  freeSlot(slotId, invalid);
609 }
610 
612 void
613 Rock::Rebuild::mapSlot(const SlotId slotId, const DbCellHeader &header)
614 {
615  LoadingSlot slot = loadingSlot(slotId);
616  assert(!slot.mapped());
617  assert(!slot.freed());
618  slot.mapped(true);
619 
620  Ipc::StoreMapSlice slice;
621  slice.next = header.nextSlot;
622  slice.size = header.payloadSize;
623  sd->map->importSlice(slotId, slice);
624 }
625 
626 template <class SlotIdType> // accommodates atomic and simple SlotIds.
627 void
628 Rock::Rebuild::chainSlots(SlotIdType &from, const SlotId to)
629 {
630  LoadingSlot slot = loadingSlot(to);
631  assert(slot.more < 0);
632  slot.more = from; // may still be unset
633  from = to;
634 }
635 
638 void
639 Rock::Rebuild::addSlotToEntry(const sfileno fileno, const SlotId slotId, const DbCellHeader &header)
640 {
641  LoadingEntry le = loadingEntry(fileno);
642  Ipc::StoreMapAnchor &anchor = sd->map->writeableEntry(fileno);
643 
644  debugs(47,9, "adding " << slotId << " to entry " << fileno);
645  // we do not need to preserve the order
646  if (le.anchored()) {
647  LoadingSlot inode = loadingSlot(anchor.start);
648  chainSlots(inode.more, slotId);
649  } else {
650  chainSlots(anchor.start, slotId);
651  }
652 
653  le.size += header.payloadSize; // must precede freeBadEntry() calls
654 
655  if (header.firstSlot == slotId) {
656  debugs(47,5, "added inode");
657 
658  if (le.anchored()) { // we have already added another inode slot
659  freeBadEntry(fileno, "inode conflict");
660  ++counts.clashcount;
661  return;
662  }
663 
664  le.anchored(true);
665 
666  if (!importEntry(anchor, fileno, header)) {
667  freeBadEntry(fileno, "corrupted metainfo");
668  return;
669  }
670 
671  // set total entry size and/or check it for consistency
672  if (const uint64_t totalSize = header.entrySize) {
673  assert(totalSize != static_cast<uint64_t>(-1));
674  if (!anchor.basics.swap_file_sz) {
675  anchor.basics.swap_file_sz = totalSize;
676  assert(anchor.basics.swap_file_sz != static_cast<uint64_t>(-1));
677  } else if (totalSize != anchor.basics.swap_file_sz) {
678  freeBadEntry(fileno, "size mismatch");
679  return;
680  }
681  }
682  }
683 
684  const uint64_t totalSize = anchor.basics.swap_file_sz; // may be 0/unknown
685 
686  if (totalSize > 0 && le.size > totalSize) { // overflow
687  debugs(47, 8, "overflow: " << le.size << " > " << totalSize);
688  freeBadEntry(fileno, "overflowing");
689  return;
690  }
691 
692  mapSlot(slotId, header);
693  if (totalSize > 0 && le.size == totalSize)
694  finalizeOrFree(fileno, le); // entry is probably fully loaded now
695 }
696 
698 void
700 {
701  anchor.setKey(reinterpret_cast<const cache_key*>(header.key));
702  assert(header.firstSlot >= 0);
703  anchor.start = -1; // addSlotToEntry() will set it
704 
705  assert(anchor.basics.swap_file_sz != static_cast<uint64_t>(-1));
706 
707  LoadingEntry le = loadingEntry(fileno);
709  le.version = header.version;
710  le.size = 0;
711 }
712 
714 void
715 Rock::Rebuild::startNewEntry(const sfileno fileno, const SlotId slotId, const DbCellHeader &header)
716 {
717  // A miss may have been stored at our fileno while we were loading other
718  // slots from disk. We ought to preserve that entry because it is fresher.
719  const bool overwriteExisting = false;
720  if (Ipc::StoreMap::Anchor *anchor = sd->map->openForWritingAt(fileno, overwriteExisting)) {
721  primeNewEntry(*anchor, fileno, header);
722  addSlotToEntry(fileno, slotId, header); // may fail
723  assert(anchor->basics.swap_file_sz != static_cast<uint64_t>(-1));
724  } else {
725  // A new from-network entry is occupying our map slot; let it be, but
726  // save us from the trouble of going through the above motions again.
727  LoadingEntry le = loadingEntry(fileno);
729  freeUnusedSlot(slotId, false);
730  }
731 }
732 
734 bool
735 Rock::Rebuild::sameEntry(const sfileno fileno, const DbCellHeader &header) const
736 {
737  // Header updates always result in multi-start chains and often
738  // result in multi-version chains so we can only compare the keys.
739  const Ipc::StoreMap::Anchor &anchor = sd->map->writeableEntry(fileno);
740  return anchor.sameKey(reinterpret_cast<const cache_key*>(header.key));
741 }
742 
744 void
745 Rock::Rebuild::useNewSlot(const SlotId slotId, const DbCellHeader &header)
746 {
747  const cache_key *const key =
748  reinterpret_cast<const cache_key*>(header.key);
749  const sfileno fileno = sd->map->fileNoByKey(key);
750  assert(0 <= fileno && fileno < dbEntryLimit);
751 
752  LoadingEntry le = loadingEntry(fileno);
753  debugs(47,9, "entry " << fileno << " state: " << le.state() << ", inode: " <<
754  header.firstSlot << ", size: " << header.payloadSize);
755 
756  switch (le.state()) {
757 
758  case LoadingEntry::leEmpty: {
759  startNewEntry(fileno, slotId, header);
760  break;
761  }
762 
764  if (sameEntry(fileno, header)) {
765  addSlotToEntry(fileno, slotId, header); // may fail
766  } else {
767  // either the loading chain or this slot is stale;
768  // be conservative and ignore both (and any future ones)
769  freeBadEntry(fileno, "duplicated");
770  freeUnusedSlot(slotId, true);
771  ++counts.dupcount;
772  }
773  break;
774  }
775 
776  case LoadingEntry::leLoaded: {
777  // either the previously loaded chain or this slot is stale;
778  // be conservative and ignore both (and any future ones)
780  sd->map->freeEntry(fileno); // may not be immediately successful
781  freeUnusedSlot(slotId, true);
782  ++counts.dupcount;
783  break;
784  }
785 
787  // previously seen slots messed things up so we must ignore this one
788  freeUnusedSlot(slotId, true);
789  break;
790  }
791 
793  // already replaced by a fresher or colliding from-network entry
794  freeUnusedSlot(slotId, false);
795  break;
796  }
797  }
798 }
799 
void addSlotToEntry(const sfileno fileno, const SlotId slotId, const DbCellHeader &header)
Definition: RockRebuild.cc:639
uint8_t anchored
whether we loaded the inode slot for this entry
Definition: RockRebuild.cc:85
SwapDir * sd
Definition: RockRebuild.h:75
virtual bool doneAll() const
whether positive goal has been reached
Definition: AsyncJob.cc:96
void finalized(const bool beFinalized)
Definition: RockRebuild.cc:132
bool doneValidating() const
Definition: RockRebuild.cc:279
void mapped(const bool beMapped)
Definition: RockRebuild.cc:128
static int store_dirs_rebuilding
the number of cache_dirs being rebuilt; TODO: move to Disks::Rebuilding
Definition: Controller.h:139
std::atomic< uint64_t > swap_file_sz
Definition: StoreMap.h:102
virtual void swanSong() override
Definition: RockRebuild.cc:558
#define CBDATA_NAMESPACED_CLASS_INIT(namespace, type)
Definition: cbdata.h:326
#define assert(EX)
Definition: assert.h:17
void finalizeOrThrow(const sfileno fileNo, LoadingEntry &le)
Definition: RockRebuild.cc:466
void useNewSlot(const SlotId slotId, const DbCellHeader &header)
handle freshly loaded (and validated) db slot header
Definition: RockRebuild.cc:745
struct Ipc::StoreMapAnchor::Basics basics
void finalizeOrFree(const sfileno fileNo, LoadingEntry &le)
Definition: RockRebuild.cc:500
void loadOneSlot()
Definition: RockRebuild.cc:361
std::vector< Ipc::StoreMapSliceId > mores
LoadingSlot::more for all slots.
Definition: RockRebuild.cc:163
#define CallJobHere(debugSection, debugLevel, job, Class, method)
Definition: AsyncJobCalls.h:57
std::atomic< StoreMapSliceId > next
ID of the next entry slice.
Definition: StoreMap.h:46
smart StoreEntry-level info pointer (hides anti-padding LoadingParts arrays)
Definition: RockRebuild.cc:94
void storeRebuildComplete(StoreRebuildData *dc)
bool storeRebuildParseEntry(MemBuf &buf, StoreEntry &tmpe, cache_key *key, StoreRebuildData &stats, uint64_t expectedSize)
parses entry buffer and validates entry metadata; fills e on success
virtual ~Rebuild() override
Definition: RockRebuild.cc:223
std::atomic< Size > size
slice contents size
Definition: StoreMap.h:45
unsigned char cache_key
Store key.
Definition: forward.h:29
State
possible store entry states during index rebuild
Definition: RockRebuild.cc:103
bool freed() const
Definition: RockRebuild.cc:135
void state(State aState) const
Definition: RockRebuild.cc:107
static int version
uint64_t slotSize
all db slots are of this size
Definition: RockSwapDir.h:79
uint64_t entrySize
total entry content size or zero if still unknown
Definition: RockDbCell.h:42
int file_open(const char *path, int mode)
Definition: fs_io.cc:46
void checkpoint()
continues after a pause if not done
Definition: RockRebuild.cc:266
sfileno SlotId
db cell number, starting with cell 0 (always occupied by the db header)
Definition: forward.h:30
#define Must(condition)
Like assert() but throws an exception instead of aborting the process.
Definition: TextException.h:69
void loadingSteps()
Definition: RockRebuild.cc:311
#define DBG_CRITICAL
Definition: Debug.h:45
sfileno nextSlot
slot ID of the next slot occupied by the entry
Definition: RockDbCell.h:46
uint32_t pool
page pool ID within Squid
Definition: Page.h:33
struct timeval current_time
Definition: stub_time.cc:15
int opt_foreground_rebuild
LoadingEntry loadingEntry(const sfileno fileNo)
Definition: RockRebuild.cc:346
bool importEntry(Ipc::StoreMapAnchor &anchor, const sfileno slotId, const DbCellHeader &header)
parse StoreEntry basics and add them to the map, returning true on success
Definition: RockRebuild.cc:405
void primeNewEntry(Ipc::StoreMapAnchor &anchor, const sfileno fileno, const DbCellHeader &header)
initialize housekeeping information for a newly accepted entry
Definition: RockRebuild.cc:699
void fatalf(const char *fmt,...)
Definition: fatal.cc:68
uint8_t freed
whether the slot was given to the map as free space
Definition: RockRebuild.cc:90
uint8_t mapped
whether the slot was added to a mapped entry
Definition: RockRebuild.cc:88
uint64_t & size
payload seen so far
Definition: RockRebuild.cc:99
low-level anti-padding storage class for LoadingEntry and LoadingSlot flags
Definition: RockRebuild.cc:78
bool anchored() const
Definition: RockRebuild.cc:110
Rebuild(SwapDir *dir)
Definition: RockRebuild.cc:203
bool sameKey(const cache_key *const aKey) const
Definition: StoreMap.cc:770
smart db slot-level info pointer (hides anti-padding LoadingParts arrays)
Definition: RockRebuild.cc:118
uint64_t swap_file_sz
Definition: Store.h:206
int tvSubMsec(struct timeval, struct timeval)
Definition: stub_time.cc:20
uint32_t number
page number within the segment
Definition: Page.h:35
const char * xstrerr(int error)
Definition: xstrerror.cc:83
LoadingFlags & flags
entry flags (see the above accessors) are ours
Definition: RockRebuild.cc:114
void const char HLPCB void * data
Definition: stub_helper.cc:16
int64_t dbSize
Definition: RockRebuild.h:78
bool sane(const size_t slotSize, int slotLimit) const
whether this slot is not corrupted
Definition: RockDbCell.h:33
#define debugs(SECTION, LEVEL, CONTENT)
Definition: Debug.h:124
#define DBG_IMPORTANT
Definition: Debug.h:46
std::vector< uint64_t > sizes
LoadingEntry::size for all entries.
Definition: RockRebuild.cc:159
uint32_t payloadSize
slot contents size, always positive
Definition: RockDbCell.h:43
void anchored(const bool beAnchored)
Definition: RockRebuild.cc:111
static StoreRebuildData counts
#define SQUID_MD5_DIGEST_LENGTH
Definition: md5.h:66
#define EBIT_CLR(flag, bit)
Definition: defines.h:106
LoadingParts(int dbSlotLimit, int dbEntryLimit)
Definition: RockRebuild.cc:190
static const int64_t HeaderSize
on-disk db header size
Definition: RockSwapDir.h:148
bool doneLoading() const
Definition: RockRebuild.cc:273
int dbSlotSize
the size of a db cell, including the cell header
Definition: RockRebuild.h:79
bool sameEntry(const sfileno fileno, const DbCellHeader &header) const
does the header belong to the fileno entry being loaded?
Definition: RockRebuild.cc:735
static void Steps(void *data)
Definition: RockRebuild.cc:293
bool used() const
Definition: RockRebuild.cc:138
int32_t StoreMapSliceId
Definition: StoreMap.h:24
uint64_t key[2]
StoreEntry key.
Definition: RockDbCell.h:41
int64_t diskOffsetLimit() const
Definition: RockSwapDir.cc:707
void freeBadEntry(const sfileno fileno, const char *eDescription)
Definition: RockRebuild.cc:538
uint8_t state
current entry state (one of the LoadingEntry::State values)
Definition: RockRebuild.cc:84
bool empty() const
true iff no entry occupies this slot
Definition: RockDbCell.h:28
signed_int32_t sfileno
Definition: forward.h:22
std::vector< uint32_t > versions
LoadingEntry::version for all entries.
Definition: RockRebuild.cc:160
int opt_store_doublecheck
#define SM_PAGE_SIZE
Definition: defines.h:102
uint32_t & version
DbCellHeader::version to distinguish same-URL chains.
Definition: RockRebuild.cc:100
bool UsingSmp()
Whether there should be more than one worker process running.
Definition: tools.cc:658
virtual bool doneAll() const override
whether positive goal has been reached
Definition: RockRebuild.cc:287
void const char * buf
Definition: stub_helper.cc:16
std::ostream & HERE(std::ostream &s)
Definition: Debug.h:153
Ipc::StoreMapSliceId & more
another slot in some chain belonging to the same entry (unordered!)
Definition: RockRebuild.cc:124
void startNewEntry(const sfileno fileno, const SlotId slotId, const DbCellHeader &header)
handle a slot from an entry that we have not seen before
Definition: RockRebuild.cc:715
time_t getCurrentTime(void)
Get current time.
void eventAdd(const char *name, EVH *func, void *arg, double when, int weight, bool cbdata)
Definition: event.cc:109
void validateOneEntry(const sfileno fileNo)
Definition: RockRebuild.cc:510
void freed(const bool beFreed)
Definition: RockRebuild.cc:136
void chainSlots(SlotIdType &from, const SlotId to)
Definition: RockRebuild.cc:628
#define EBIT_SET(flag, bit)
Definition: defines.h:105
LoadingSlot(const SlotId slotId, LoadingParts &source)
Definition: RockRebuild.cc:182
int dbSlotLimit
total number of db cells
Definition: RockRebuild.h:80
int64_t entryLimitActual() const
max number of possible entries in db
Definition: RockSwapDir.cc:206
void validateOneSlot(const SlotId slotId)
Definition: RockRebuild.cc:528
int dbEntryLimit
maximum number of entries that can be stored in db
Definition: RockRebuild.h:81
int64_t slotLimitActual() const
total number of slots in this db
Definition: RockSwapDir.cc:197
ssize_t mb_size_t
Definition: MemBuf.h:17
Shared memory page identifier, address, or handler.
Definition: Page.h:21
virtual void start() override
prepares and initiates entry loading sequence
Definition: RockRebuild.cc:232
LoadingSlot loadingSlot(const SlotId slotId)
Definition: RockRebuild.cc:353
void storeRebuildProgress(int sd_index, int total, int sofar)
void validationSteps()
Definition: RockRebuild.cc:430
void mapSlot(const SlotId slotId, const DbCellHeader &header)
adds slot to the entry chain in the map
Definition: RockRebuild.cc:613
void set(const StoreEntry &anEntry, const cache_key *aKey=nullptr)
store StoreEntry key and basics for an inode slot
Definition: StoreMap.cc:777
State state() const
Definition: RockRebuild.cc:106
uint8_t finalized
whether finalizeOrThrow() has scanned the slot
Definition: RockRebuild.cc:89
bool IamDiskProcess() STUB_RETVAL_NOP(false) bool InDaemonMode() STUB_RETVAL_NOP(false) bool UsingSmp() STUB_RETVAL_NOP(false) bool IamCoordinatorProcess() STUB_RETVAL(false) bool IamPrimaryProcess() STUB_RETVAL(false) int NumberOfKids() STUB_RETVAL(0) void setMaxFD(void) STUB void setSystemLimits(void) STUB void squid_signal(int
whether the current process is dedicated to managing a cache_dir
#define O_BINARY
Definition: defines.h:204
void failure(const char *msg, int errNo=0)
Definition: RockRebuild.cc:567
void freeSlot(const SlotId slotId, const bool invalid)
adds slot to the free slot index
Definition: RockRebuild.cc:583
LoadingFlags & flags
slot flags (see the above accessors) are ours
Definition: RockRebuild.cc:141
std::vector< LoadingFlags > flags
all LoadingEntry and LoadingSlot flags
Definition: RockRebuild.cc:166
void file_close(int fd)
Definition: fs_io.cc:76
std::atomic< StoreMapSliceId > start
where the chain of StoreEntry slices begins [app]
Definition: StoreMap.h:108
bool storeRebuildLoadEntry(int fd, int diskIndex, MemBuf &buf, StoreRebuildData &)
loads entry from disk; fills supplied memory buffer on success
uint32_t version
detects conflicts among same-key entries
Definition: RockDbCell.h:44
LoadingEntry(const sfileno fileNo, LoadingParts &source)
Definition: RockRebuild.cc:173
sfileno firstSlot
slot ID of the first slot occupied by the entry
Definition: RockDbCell.h:45
bool finalized() const
Definition: RockRebuild.cc:131
int size
Definition: ModDevPoll.cc:77
bool mapped() const
Definition: RockRebuild.cc:127
void freeUnusedSlot(const SlotId slotId, const bool invalid)
freeSlot() for never-been-mapped slots
Definition: RockRebuild.cc:603
void setKey(const cache_key *const aKey)
Definition: StoreMap.cc:763

 

Introduction

Documentation

Support

Miscellaneous

Web Site Translations

Mirrors