aiops.cc
Go to the documentation of this file.
1/*
2 * Copyright (C) 1996-2022 The Squid Software Foundation and contributors
3 *
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
7 */
8
9/* DEBUG: section 43 AIOPS */
10
11#ifndef _REENTRANT
12#error "_REENTRANT MUST be defined to build squid async io support."
13#endif
14
15#include "squid.h"
17#include "DiskThreads.h"
18#include "SquidConfig.h"
19#include "Store.h"
20
21/*
22 * struct stat and squidaio_xstrdup use explicit pool alloc()/freeOne().
23 * XXX: convert to MEMPROXY_CLASS() API
24 */
25#include "mem/Pool.h"
26
27#include <cerrno>
28#include <csignal>
29#include <sys/stat.h>
30#include <fcntl.h>
31#include <pthread.h>
32#include <dirent.h>
33#if HAVE_SCHED_H
34#include <sched.h>
35#endif
36
37#define RIDICULOUS_LENGTH 4096
38
45};
47
48typedef struct squidaio_request_t {
49
53 char *path;
54 int oflag;
56 int fd;
57 char *bufferp;
58 size_t buflen;
59 off_t offset;
60 int whence;
61 int ret;
62 int err;
63
64 struct stat *tmpstatp;
65
66 struct stat *statp;
69
71 pthread_mutex_t mutex;
72 pthread_cond_t cond;
74 squidaio_request_t *volatile *volatile tailp;
75 unsigned long requests;
76 unsigned long blocked; /* main failed to lock the queue */
78
80
83 pthread_t thread;
85
87 unsigned long requests;
88};
89
92void *squidaio_thread_loop(void *);
99#if AIO_OPENDIR
100static void *squidaio_do_opendir(squidaio_request_t *);
101#endif
103static void squidaio_poll_queues(void);
104
105static squidaio_thread_t *threads = nullptr;
106static int squidaio_initialised = 0;
107
108#define AIO_LARGE_BUFS 16384
109#define AIO_MEDIUM_BUFS AIO_LARGE_BUFS >> 1
110#define AIO_SMALL_BUFS AIO_LARGE_BUFS >> 2
111#define AIO_TINY_BUFS AIO_LARGE_BUFS >> 3
112#define AIO_MICRO_BUFS 128
113
114static MemAllocator *squidaio_large_bufs = nullptr; /* 16K */
115static MemAllocator *squidaio_medium_bufs = nullptr; /* 8K */
116static MemAllocator *squidaio_small_bufs = nullptr; /* 4K */
117static MemAllocator *squidaio_tiny_bufs = nullptr; /* 2K */
118static MemAllocator *squidaio_micro_bufs = nullptr; /* 128K */
119
120static int request_queue_len = 0;
124
125static struct {
127}
128
130
131 nullptr, &request_queue2.head
134
135static struct {
137}
138
140
141 nullptr, &done_requests.head
143static pthread_attr_t globattr;
144#if HAVE_SCHED_H
145
146static struct sched_param globsched;
147#endif
148static pthread_t main_thread;
149
150static MemAllocator *
152{
153 if (size <= AIO_LARGE_BUFS) {
154 if (size <= AIO_MICRO_BUFS)
155 return squidaio_micro_bufs;
156 else if (size <= AIO_TINY_BUFS)
157 return squidaio_tiny_bufs;
158 else if (size <= AIO_SMALL_BUFS)
159 return squidaio_small_bufs;
160 else if (size <= AIO_MEDIUM_BUFS)
162 else
163 return squidaio_large_bufs;
164 }
165
166 return nullptr;
167}
168
169void *
171{
172 void *p;
173 MemAllocator *pool;
174
175 if ((pool = squidaio_get_pool(size)) != nullptr) {
176 p = pool->alloc();
177 } else
178 p = xmalloc(size);
179
180 return p;
181}
182
183static char *
184squidaio_xstrdup(const char *str)
185{
186 char *p;
187 int len = strlen(str) + 1;
188
189 p = (char *)squidaio_xmalloc(len);
190 strncpy(p, str, len);
191
192 return p;
193}
194
195void
196squidaio_xfree(void *p, int size)
197{
198 MemAllocator *pool;
199
200 if ((pool = squidaio_get_pool(size)) != nullptr) {
201 pool->freeOne(p);
202 } else
203 xfree(p);
204}
205
206static void
208{
209 MemAllocator *pool;
210 int len = strlen(str) + 1;
211
212 if ((pool = squidaio_get_pool(len)) != nullptr) {
213 pool->freeOne(str);
214 } else
215 xfree(str);
216}
217
218void
220{
221 int i;
222 squidaio_thread_t *threadp;
223
225 return;
226
227 pthread_attr_init(&globattr);
228
229#if HAVE_PTHREAD_ATTR_SETSCOPE
230
231 pthread_attr_setscope(&globattr, PTHREAD_SCOPE_SYSTEM);
232
233#endif
234#if HAVE_SCHED_H
235
236 globsched.sched_priority = 1;
237
238#endif
239
240 main_thread = pthread_self();
241
242#if HAVE_SCHED_H && HAVE_PTHREAD_SETSCHEDPARAM
243
244 pthread_setschedparam(main_thread, SCHED_OTHER, &globsched);
245
246#endif
247#if HAVE_SCHED_H
248
249 globsched.sched_priority = 2;
250
251#endif
252#if HAVE_SCHED_H && HAVE_PTHREAD_ATTR_SETSCHEDPARAM
253
254 pthread_attr_setschedparam(&globattr, &globsched);
255
256#endif
257
258 /* Give each thread a smaller 256KB stack, should be more than sufficient */
259 pthread_attr_setstacksize(&globattr, 256 * 1024);
260
261 /* Initialize request queue */
262 if (pthread_mutex_init(&(request_queue.mutex), nullptr))
263 fatal("Failed to create mutex");
264
265 if (pthread_cond_init(&(request_queue.cond), nullptr))
266 fatal("Failed to create condition variable");
267
268 request_queue.head = nullptr;
269
271
273
275
276 /* Initialize done queue */
277 if (pthread_mutex_init(&(done_queue.mutex), nullptr))
278 fatal("Failed to create mutex");
279
280 if (pthread_cond_init(&(done_queue.cond), nullptr))
281 fatal("Failed to create condition variable");
282
283 done_queue.head = nullptr;
284
286
288
290
291 // Initialize the thread I/O pipes before creating any threads
292 // see bug 3189 comment 5 about race conditions.
294
295 /* Create threads and get them to sit in their wait loop */
297
298 assert(NUMTHREADS != 0);
299
300 for (i = 0; i < NUMTHREADS; ++i) {
302 threadp->status = _THREAD_STARTING;
303 threadp->current_req = nullptr;
304 threadp->requests = 0;
305 threadp->next = threads;
306 threads = threadp;
307
308 if (pthread_create(&threadp->thread, &globattr, squidaio_thread_loop, threadp)) {
309 fprintf(stderr, "Thread creation failed\n");
310 threadp->status = _THREAD_FAILED;
311 continue;
312 }
313 }
314
315 /* Create request pool */
317
318 squidaio_large_bufs = memPoolCreate("squidaio_large_bufs", AIO_LARGE_BUFS);
319
320 squidaio_medium_bufs = memPoolCreate("squidaio_medium_bufs", AIO_MEDIUM_BUFS);
321
322 squidaio_small_bufs = memPoolCreate("squidaio_small_bufs", AIO_SMALL_BUFS);
323
324 squidaio_tiny_bufs = memPoolCreate("squidaio_tiny_bufs", AIO_TINY_BUFS);
325
326 squidaio_micro_bufs = memPoolCreate("squidaio_micro_bufs", AIO_MICRO_BUFS);
327
329}
330
331void
333{
335 return;
336
337 /* This is the same as in squidaio_sync */
338 do {
340 } while (request_queue_len > 0);
341
343
345}
346
347void *
349{
350 squidaio_thread_t *threadp = (squidaio_thread_t *)ptr;
352 sigset_t newSig;
353
354 /*
355 * Make sure to ignore signals which may possibly get sent to
356 * the parent squid thread. Causes havoc with mutex's and
357 * condition waits otherwise
358 */
359
360 sigemptyset(&newSig);
361 sigaddset(&newSig, SIGPIPE);
362 sigaddset(&newSig, SIGCHLD);
363#if defined(_SQUID_LINUX_THREADS_)
364
365 sigaddset(&newSig, SIGQUIT);
366 sigaddset(&newSig, SIGTRAP);
367#else
368
369 sigaddset(&newSig, SIGUSR1);
370 sigaddset(&newSig, SIGUSR2);
371#endif
372
373 sigaddset(&newSig, SIGHUP);
374 sigaddset(&newSig, SIGTERM);
375 sigaddset(&newSig, SIGINT);
376 sigaddset(&newSig, SIGALRM);
377 pthread_sigmask(SIG_BLOCK, &newSig, nullptr);
378
379 while (1) {
380 threadp->current_req = request = nullptr;
381 request = nullptr;
382 /* Get a request to process */
383 threadp->status = _THREAD_WAITING;
384 pthread_mutex_lock(&request_queue.mutex);
385
386 while (!request_queue.head) {
387 pthread_cond_wait(&request_queue.cond, &request_queue.mutex);
388 }
389
391
392 if (request)
393 request_queue.head = request->next;
394
395 if (!request_queue.head)
397
398 pthread_mutex_unlock(&request_queue.mutex);
399
400 /* process the request */
401 threadp->status = _THREAD_BUSY;
402
403 request->next = nullptr;
404
405 threadp->current_req = request;
406
407 errno = 0;
408
409 if (!request->cancelled) {
410 switch (request->request_type) {
411
412 case _AIO_OP_OPEN:
414 break;
415
416 case _AIO_OP_READ:
418 break;
419
420 case _AIO_OP_WRITE:
422 break;
423
424 case _AIO_OP_CLOSE:
426 break;
427
428 case _AIO_OP_UNLINK:
430 break;
431
432#if AIO_OPENDIR /* Opendir not implemented yet */
433
434 case _AIO_OP_OPENDIR:
435 squidaio_do_opendir(request);
436 break;
437#endif
438
439 case _AIO_OP_STAT:
441 break;
442
443 default:
444 request->ret = -1;
445 request->err = EINVAL;
446 break;
447 }
448 } else { /* cancelled */
449 request->ret = -1;
450 request->err = EINTR;
451 }
452
453 threadp->status = _THREAD_DONE;
454 /* put the request in the done queue */
455 pthread_mutex_lock(&done_queue.mutex);
457 done_queue.tailp = &request->next;
458 pthread_mutex_unlock(&done_queue.mutex);
460 ++ threadp->requests;
461 } /* while forever */
462
463 return nullptr;
464} /* squidaio_thread_loop */
465
466static void
468{
469 static int high_start = 0;
470 debugs(43, 9, "squidaio_queue_request: " << request << " type=" << request->request_type << " result=" << request->resultp);
471 /* Mark it as not executed (failing result, no error) */
472 request->ret = -1;
473 request->err = 0;
474 /* Internal housekeeping */
476 request->resultp->_data = request;
477 /* Play some tricks with the request_queue2 queue */
478 request->next = nullptr;
479
480 if (pthread_mutex_trylock(&request_queue.mutex) == 0) {
481 if (request_queue2.head) {
482 /* Grab blocked requests */
485 }
486
487 /* Enqueue request */
489
490 request_queue.tailp = &request->next;
491
492 pthread_cond_signal(&request_queue.cond);
493
494 pthread_mutex_unlock(&request_queue.mutex);
495
496 if (request_queue2.head) {
497 /* Clear queue of blocked requests */
498 request_queue2.head = nullptr;
499 request_queue2.tailp = &request_queue2.head;
500 }
501 } else {
502 /* Oops, the request queue is blocked, use request_queue2 */
503 *request_queue2.tailp = request;
504 request_queue2.tailp = &request->next;
505 }
506
507 if (request_queue2.head) {
508 static uint64_t filter = 0;
509 static uint64_t filter_limit = 8192;
510
511 if (++filter >= filter_limit) {
512 filter_limit += filter;
513 filter = 0;
514 debugs(43, DBG_IMPORTANT, "WARNING: squidaio_queue_request: Queue congestion (growing to " << filter_limit << ")");
515 }
516 }
517
518 /* Warn if out of threads */
520 static int last_warn = 0;
521 static int queue_high, queue_low;
522
523 if (high_start == 0) {
524 high_start = squid_curtime;
525 queue_high = request_queue_len;
526 queue_low = request_queue_len;
527 }
528
529 if (request_queue_len > queue_high)
530 queue_high = request_queue_len;
531
532 if (request_queue_len < queue_low)
533 queue_low = request_queue_len;
534
535 if (squid_curtime >= (last_warn + 15) &&
536 squid_curtime >= (high_start + 5)) {
537 debugs(43, DBG_IMPORTANT, "WARNING: squidaio_queue_request: Disk I/O overloading");
538
539 if (squid_curtime >= (high_start + 15))
540 debugs(43, DBG_IMPORTANT, "squidaio_queue_request: Queue Length: current=" <<
541 request_queue_len << ", high=" << queue_high <<
542 ", low=" << queue_low << ", duration=" <<
543 (long int) (squid_curtime - high_start));
544
545 last_warn = squid_curtime;
546 }
547 } else {
548 high_start = 0;
549 }
550
551 /* Warn if seriously overloaded */
553 debugs(43, DBG_CRITICAL, "squidaio_queue_request: Async request queue growing uncontrollably!");
554 debugs(43, DBG_CRITICAL, "squidaio_queue_request: Syncing pending I/O operations.. (blocking)");
556 debugs(43, DBG_CRITICAL, "squidaio_queue_request: Synced");
557 }
558} /* squidaio_queue_request */
559
560static void
562{
563 squidaio_result_t *resultp = requestp->resultp;
564 int cancelled = requestp->cancelled;
565
566 /* Free allocated structures and copy data back to user space if the */
567 /* request hasn't been cancelled */
568
569 switch (requestp->request_type) {
570
571 case _AIO_OP_STAT:
572
573 if (!cancelled && requestp->ret == 0)
574 memcpy(requestp->statp, requestp->tmpstatp, sizeof(struct stat));
575
576 squidaio_xfree(requestp->tmpstatp, sizeof(struct stat));
577
578 squidaio_xstrfree(requestp->path);
579
580 break;
581
582 case _AIO_OP_OPEN:
583 if (cancelled && requestp->ret >= 0)
584 /* The open() was cancelled but completed */
585 close(requestp->ret);
586
587 squidaio_xstrfree(requestp->path);
588
589 break;
590
591 case _AIO_OP_CLOSE:
592 if (cancelled && requestp->ret < 0)
593 /* The close() was cancelled and never got executed */
594 close(requestp->fd);
595
596 break;
597
598 case _AIO_OP_UNLINK:
599
600 case _AIO_OP_OPENDIR:
601 squidaio_xstrfree(requestp->path);
602
603 break;
604
605 case _AIO_OP_READ:
606 break;
607
608 case _AIO_OP_WRITE:
609 break;
610
611 default:
612 break;
613 }
614
615 if (resultp != nullptr && !cancelled) {
616 resultp->aio_return = requestp->ret;
617 resultp->aio_errno = requestp->err;
618 }
619
621} /* squidaio_cleanup_request */
622
623int
625{
627
628 if (request && request->resultp == resultp) {
629 debugs(43, 9, "squidaio_cancel: " << request << " type=" << request->request_type << " result=" << request->resultp);
630 request->cancelled = 1;
631 request->resultp = nullptr;
632 resultp->_data = nullptr;
633 resultp->result_type = _AIO_OP_NONE;
634 return 0;
635 }
636
637 return 1;
638} /* squidaio_cancel */
639
640int
641squidaio_open(const char *path, int oflag, mode_t mode, squidaio_result_t * resultp)
642{
644 squidaio_request_t *requestp;
645
647
648 requestp->path = (char *) squidaio_xstrdup(path);
649
650 requestp->oflag = oflag;
651
652 requestp->mode = mode;
653
654 requestp->resultp = resultp;
655
656 requestp->request_type = _AIO_OP_OPEN;
657
658 requestp->cancelled = 0;
659
660 resultp->result_type = _AIO_OP_OPEN;
661
662 squidaio_queue_request(requestp);
663
664 return 0;
665}
666
667static void
669{
670 requestp->ret = open(requestp->path, requestp->oflag, requestp->mode);
671 requestp->err = errno;
672}
673
674int
675squidaio_read(int fd, char *bufp, size_t bufs, off_t offset, int whence, squidaio_result_t * resultp)
676{
677 squidaio_request_t *requestp;
678
680
681 requestp->fd = fd;
682
683 requestp->bufferp = bufp;
684
685 requestp->buflen = bufs;
686
687 requestp->offset = offset;
688
689 requestp->whence = whence;
690
691 requestp->resultp = resultp;
692
693 requestp->request_type = _AIO_OP_READ;
694
695 requestp->cancelled = 0;
696
697 resultp->result_type = _AIO_OP_READ;
698
699 squidaio_queue_request(requestp);
700
701 return 0;
702}
703
704static void
706{
707 if (lseek(requestp->fd, requestp->offset, requestp->whence) >= 0)
708 requestp->ret = read(requestp->fd, requestp->bufferp, requestp->buflen);
709 else
710 requestp->ret = -1;
711 requestp->err = errno;
712}
713
714int
715squidaio_write(int fd, char *bufp, size_t bufs, off_t offset, int whence, squidaio_result_t * resultp)
716{
717 squidaio_request_t *requestp;
718
720
721 requestp->fd = fd;
722
723 requestp->bufferp = bufp;
724
725 requestp->buflen = bufs;
726
727 requestp->offset = offset;
728
729 requestp->whence = whence;
730
731 requestp->resultp = resultp;
732
733 requestp->request_type = _AIO_OP_WRITE;
734
735 requestp->cancelled = 0;
736
737 resultp->result_type = _AIO_OP_WRITE;
738
739 squidaio_queue_request(requestp);
740
741 return 0;
742}
743
744static void
746{
747 requestp->ret = write(requestp->fd, requestp->bufferp, requestp->buflen);
748 requestp->err = errno;
749}
750
751int
753{
754 squidaio_request_t *requestp;
755
757
758 requestp->fd = fd;
759
760 requestp->resultp = resultp;
761
762 requestp->request_type = _AIO_OP_CLOSE;
763
764 requestp->cancelled = 0;
765
766 resultp->result_type = _AIO_OP_CLOSE;
767
768 squidaio_queue_request(requestp);
769
770 return 0;
771}
772
773static void
775{
776 requestp->ret = close(requestp->fd);
777 requestp->err = errno;
778}
779
780int
781
782squidaio_stat(const char *path, struct stat *sb, squidaio_result_t * resultp)
783{
785 squidaio_request_t *requestp;
786
788
789 requestp->path = (char *) squidaio_xstrdup(path);
790
791 requestp->statp = sb;
792
793 requestp->tmpstatp = (struct stat *) squidaio_xmalloc(sizeof(struct stat));
794
795 requestp->resultp = resultp;
796
797 requestp->request_type = _AIO_OP_STAT;
798
799 requestp->cancelled = 0;
800
801 resultp->result_type = _AIO_OP_STAT;
802
803 squidaio_queue_request(requestp);
804
805 return 0;
806}
807
808static void
810{
811 requestp->ret = stat(requestp->path, requestp->tmpstatp);
812 requestp->err = errno;
813}
814
815int
816squidaio_unlink(const char *path, squidaio_result_t * resultp)
817{
819 squidaio_request_t *requestp;
820
822
823 requestp->path = squidaio_xstrdup(path);
824
825 requestp->resultp = resultp;
826
827 requestp->request_type = _AIO_OP_UNLINK;
828
829 requestp->cancelled = 0;
830
831 resultp->result_type = _AIO_OP_UNLINK;
832
833 squidaio_queue_request(requestp);
834
835 return 0;
836}
837
838static void
840{
841 requestp->ret = unlink(requestp->path);
842 requestp->err = errno;
843}
844
845#if AIO_OPENDIR
846/* XXX squidaio_opendir NOT implemented yet.. */
847
848int
849squidaio_opendir(const char *path, squidaio_result_t * resultp)
850{
851 squidaio_request_t *requestp;
852 int len;
853
854 requestp = squidaio_request_pool->alloc();
855
856 resultp->result_type = _AIO_OP_OPENDIR;
857
858 return -1;
859}
860
861static void
862squidaio_do_opendir(squidaio_request_t * requestp)
863{
864 /* NOT IMPLEMENTED */
865}
866
867#endif
868
869static void
871{
872 /* kick "overflow" request queue */
873
874 if (request_queue2.head &&
875 pthread_mutex_trylock(&request_queue.mutex) == 0) {
878 pthread_cond_signal(&request_queue.cond);
879 pthread_mutex_unlock(&request_queue.mutex);
880 request_queue2.head = nullptr;
881 request_queue2.tailp = &request_queue2.head;
882 }
883
884 /* poll done queue */
885 if (done_queue.head && pthread_mutex_trylock(&done_queue.mutex) == 0) {
886
887 struct squidaio_request_t *requests = done_queue.head;
888 done_queue.head = nullptr;
890 pthread_mutex_unlock(&done_queue.mutex);
891 *done_requests.tailp = requests;
893
894 while (requests->next) {
895 requests = requests->next;
897 }
898
899 done_requests.tailp = &requests->next;
900 }
901}
902
905{
908 int cancelled;
909 int polled = 0;
910
911AIO_REPOLL:
912 request = done_requests.head;
913
914 if (request == nullptr && !polled) {
917 polled = 1;
918 request = done_requests.head;
919 }
920
921 if (!request) {
922 return nullptr;
923 }
924
925 debugs(43, 9, "squidaio_poll_done: " << request << " type=" << request->request_type << " result=" << request->resultp);
926 done_requests.head = request->next;
927
928 if (!done_requests.head)
929 done_requests.tailp = &done_requests.head;
930
931 resultp = request->resultp;
932
933 cancelled = request->cancelled;
934
936
937 debugs(43, 5, "DONE: " << request->ret << " -> " << request->err);
938
940
941 if (cancelled)
942 goto AIO_REPOLL;
943
944 return resultp;
945} /* squidaio_poll_done */
946
947int
949{
950 return request_queue_len + (done_requests.head ? 1 : 0);
951}
952
953int
955{
956 /* XXX This might take a while if the queue is large.. */
957
958 do {
960 } while (request_queue_len > 0);
961
963}
964
965int
967{
968 return request_queue_len;
969}
970
971static void
973{
974 switch (request->request_type) {
975
976 case _AIO_OP_OPEN:
977 debugs(43, 5, "OPEN of " << request->path << " to FD " << request->ret);
978 break;
979
980 case _AIO_OP_READ:
981 debugs(43, 5, "READ on fd: " << request->fd);
982 break;
983
984 case _AIO_OP_WRITE:
985 debugs(43, 5, "WRITE on fd: " << request->fd);
986 break;
987
988 case _AIO_OP_CLOSE:
989 debugs(43, 5, "CLOSE of fd: " << request->fd);
990 break;
991
992 case _AIO_OP_UNLINK:
993 debugs(43, 5, "UNLINK of " << request->path);
994 break;
995
996 default:
997 break;
998 }
999}
1000
1001void
1003{
1004 squidaio_thread_t *threadp;
1005 int i;
1006
1008 return;
1009
1010 storeAppendPrintf(sentry, "\n\nThreads Status:\n");
1011
1012 storeAppendPrintf(sentry, "#\tID\t# Requests\n");
1013
1014 threadp = threads;
1015
1016 for (i = 0; i < NUMTHREADS; ++i) {
1017 storeAppendPrintf(sentry, "%i\t0x%lx\t%ld\n", i + 1, (unsigned long)threadp->thread, threadp->requests);
1018 threadp = threadp->next;
1019 }
1020}
1021
int squidaio_opendir(const char *, squidaio_result_t *)
enum _squidaio_request_type squidaio_request_type
Definition: DiskThreads.h:55
@ _AIO_OP_OPENDIR
Definition: DiskThreads.h:52
@ _AIO_OP_NONE
Definition: DiskThreads.h:46
@ _AIO_OP_WRITE
Definition: DiskThreads.h:49
@ _AIO_OP_UNLINK
Definition: DiskThreads.h:51
@ _AIO_OP_OPEN
Definition: DiskThreads.h:47
@ _AIO_OP_READ
Definition: DiskThreads.h:48
@ _AIO_OP_CLOSE
Definition: DiskThreads.h:50
@ _AIO_OP_STAT
Definition: DiskThreads.h:53
#define MAGIC1
Definition: DiskThreads.h:34
#define NUMTHREADS
Definition: DiskThreads.h:30
int size
Definition: ModDevPoll.cc:75
time_t squid_curtime
Definition: stub_libtime.cc:20
static void squidaio_debug(squidaio_request_t *)
Definition: aiops.cc:972
int squidaio_stat(const char *path, struct stat *sb, squidaio_result_t *resultp)
Definition: aiops.cc:782
#define AIO_TINY_BUFS
Definition: aiops.cc:111
static pthread_attr_t globattr
Definition: aiops.cc:143
int squidaio_unlink(const char *path, squidaio_result_t *resultp)
Definition: aiops.cc:816
int squidaio_operations_pending(void)
Definition: aiops.cc:948
squidaio_request_t * head
Definition: aiops.cc:126
static squidaio_request_queue_t request_queue
Definition: aiops.cc:123
static void squidaio_cleanup_request(squidaio_request_t *)
Definition: aiops.cc:561
static void squidaio_poll_queues(void)
Definition: aiops.cc:870
#define AIO_MEDIUM_BUFS
Definition: aiops.cc:109
static char * squidaio_xstrdup(const char *str)
Definition: aiops.cc:184
void * squidaio_thread_loop(void *)
Definition: aiops.cc:348
static void squidaio_do_stat(squidaio_request_t *)
Definition: aiops.cc:809
int squidaio_open(const char *path, int oflag, mode_t mode, squidaio_result_t *resultp)
Definition: aiops.cc:641
static squidaio_thread_t * threads
Definition: aiops.cc:105
static MemAllocator * squidaio_small_bufs
Definition: aiops.cc:116
static pthread_t main_thread
Definition: aiops.cc:148
struct squidaio_request_t squidaio_request_t
static void squidaio_do_open(squidaio_request_t *)
Definition: aiops.cc:668
void squidaio_init(void)
Definition: aiops.cc:219
static MemAllocator * squidaio_micro_bufs
Definition: aiops.cc:118
_squidaio_thread_status
Definition: aiops.cc:39
@ _THREAD_BUSY
Definition: aiops.cc:42
@ _THREAD_FAILED
Definition: aiops.cc:43
@ _THREAD_DONE
Definition: aiops.cc:44
@ _THREAD_WAITING
Definition: aiops.cc:41
@ _THREAD_STARTING
Definition: aiops.cc:40
static struct @49 done_requests
#define RIDICULOUS_LENGTH
Definition: aiops.cc:37
static squidaio_request_queue_t done_queue
Definition: aiops.cc:133
int squidaio_read(int fd, char *bufp, size_t bufs, off_t offset, int whence, squidaio_result_t *resultp)
Definition: aiops.cc:675
#define AIO_LARGE_BUFS
Definition: aiops.cc:108
static void squidaio_xstrfree(char *str)
Definition: aiops.cc:207
static void squidaio_do_close(squidaio_request_t *)
Definition: aiops.cc:774
static MemAllocator * squidaio_get_pool(int size)
Definition: aiops.cc:151
void squidaio_stats(StoreEntry *sentry)
Definition: aiops.cc:1002
static MemAllocator * squidaio_large_bufs
Definition: aiops.cc:114
void squidaio_shutdown(void)
Definition: aiops.cc:332
static struct @48 request_queue2
squidaio_result_t * squidaio_poll_done(void)
Definition: aiops.cc:904
enum _squidaio_thread_status squidaio_thread_status
Definition: aiops.cc:46
static MemAllocator * squidaio_thread_pool
Definition: aiops.cc:122
static MemAllocator * squidaio_tiny_bufs
Definition: aiops.cc:117
#define AIO_MICRO_BUFS
Definition: aiops.cc:112
static void squidaio_queue_request(squidaio_request_t *)
Definition: aiops.cc:467
squidaio_request_t ** tailp
Definition: aiops.cc:126
struct squidaio_request_queue_t squidaio_request_queue_t
static MemAllocator * squidaio_request_pool
Definition: aiops.cc:121
static void squidaio_do_read(squidaio_request_t *)
Definition: aiops.cc:705
int squidaio_close(int fd, squidaio_result_t *resultp)
Definition: aiops.cc:752
int squidaio_write(int fd, char *bufp, size_t bufs, off_t offset, int whence, squidaio_result_t *resultp)
Definition: aiops.cc:715
static void squidaio_do_write(squidaio_request_t *)
Definition: aiops.cc:745
void squidaio_xfree(void *p, int size)
Definition: aiops.cc:196
static void squidaio_do_unlink(squidaio_request_t *)
Definition: aiops.cc:839
int squidaio_sync(void)
Definition: aiops.cc:954
int squidaio_get_queue_len(void)
Definition: aiops.cc:966
int squidaio_cancel(squidaio_result_t *resultp)
Definition: aiops.cc:624
static int request_queue_len
Definition: aiops.cc:120
static int squidaio_initialised
Definition: aiops.cc:106
void * squidaio_xmalloc(int size)
Definition: aiops.cc:170
#define AIO_SMALL_BUFS
Definition: aiops.cc:110
static MemAllocator * squidaio_medium_bufs
Definition: aiops.cc:115
#define assert(EX)
Definition: assert.h:19
static void NotifyIOCompleted()
Definition: CommIO.h:36
static void NotifyIOClose()
Definition: CommIO.cc:38
static void ResetNotifications()
Definition: CommIO.cc:69
static void Initialize()
Definition: CommIO.cc:19
virtual void freeOne(void *)=0
virtual void * alloc()=0
enum _squidaio_request_type result_type
Definition: DiskThreads.h:64
#define DBG_IMPORTANT
Definition: Stream.h:41
#define debugs(SECTION, LEVEL, CONTENT)
Definition: Stream.h:196
#define DBG_CRITICAL
Definition: Stream.h:40
void fatal(const char *message)
Definition: fatal.cc:28
#define memPoolCreate
Definition: Pool.h:325
#define xfree
#define xmalloc
static struct stat sb
Definition: squidclient.cc:71
void storeAppendPrintf(StoreEntry *e, const char *fmt,...)
Definition: store.cc:828
unsigned long blocked
Definition: aiops.cc:76
squidaio_request_t *volatile head
Definition: aiops.cc:73
squidaio_request_t *volatile *volatile tailp
Definition: aiops.cc:74
pthread_cond_t cond
Definition: aiops.cc:72
pthread_mutex_t mutex
Definition: aiops.cc:71
unsigned long requests
Definition: aiops.cc:75
squidaio_result_t * resultp
Definition: aiops.cc:67
struct stat * tmpstatp
Definition: aiops.cc:64
struct squidaio_request_t * next
Definition: aiops.cc:50
struct stat * statp
Definition: aiops.cc:66
size_t buflen
Definition: aiops.cc:58
squidaio_request_type request_type
Definition: aiops.cc:51
char * bufferp
Definition: aiops.cc:57
squidaio_thread_t * next
Definition: aiops.cc:82
pthread_t thread
Definition: aiops.cc:83
struct squidaio_request_t * current_req
Definition: aiops.cc:86
squidaio_thread_status status
Definition: aiops.cc:84
unsigned long requests
Definition: aiops.cc:87
struct _request * request(char *urlin)
Definition: tcp-banger2.c:291
unsigned short mode_t
Definition: types.h:150

 

Introduction

Documentation

Support

Miscellaneous

Web Site Translations

Mirrors