1.2b20-1: Async I/O [updated patch]

From: Henrik Nordstrom <hno@dont-contact.us>
Date: Fri, 08 May 1998 22:57:42 +0000

This is a multi-part message in MIME format.

--------------61503C0C19036D7147A0C340
Content-Type: text/plain; charset=us-ascii
Content-Transfer-Encoding: 7bit

Oops ;-)

Here is a in large parts reworked patch for async I/O. This replaces all
of my previous async-io patch (which was messy and broken).

* The read offset was not preserved, corrupting deferred hits on
pipelined connections.

* Main <-> I/O thread communication is done using a single request
pointer, optionally protected by a mutex.

* CPU spin situation solved in two possible ways: Either by properly
protecting the condition variable using a mutex, or by increasing the
timeout-time on each timeout instead of relying on squid_curtime.

* Use a 50 msec select timeout for ASYNC_IO to keep things flowing
nicely.

* Check for completed threads when the request queue grows. Completed
requests are put on a FIFO queue until processed from the main loop.
This is done to more quickly reuse of finished threads.

* Fixed a silly bug in my last version of this patch. No idea how the
last patch could pass my tests...

* Avoid pointer aliasing. Some compilers/optimisations have trouble with
this (the old code was probably safe, but..).

* Some minor code cleanups.

* This patch is not messed up ;-)

* Removed compiler warnings

- No more SIGCONT signal hack or cross-level hints. This was a bad move
both on performance and code design.

- Mutex signalling is optional (-DAIO_PROPER_MUTEX). Using a volatile
pointer should be safe on most/all platforms.

This patch is quite heavily tested, and seems to perform well.

I still haven't found the cause to another async-io problem. When
async-io is enabled Squid only caches about 1/2 of the requests in my
tests. Without async-io everything gets cached as expected. I have
verified that this problem in present in a unpatched b20-1 and is not
caused by this patch or any of the other 1.2b20-1 changes I have made.

Comments are welcome, as always.

/Henrik

--------------61503C0C19036D7147A0C340
Content-Type: text/plain; charset=us-ascii; name="squid-1.2.beta20-1.asyncio.patch"
Content-Transfer-Encoding: 7bit
Content-Disposition: inline; filename="squid-1.2.beta20-1.asyncio.patch"

Index: squid/src/protos.h
diff -u squid/src/protos.h:1.1.1.19 squid/src/protos.h:1.1.1.19.2.3
--- squid/src/protos.h:1.1.1.19 Sat Apr 25 14:47:53 1998
+++ squid/src/protos.h Mon May 4 22:42:54 1998
@@ -48,8 +48,8 @@
 extern void aioCancel(int, void *);
 extern void aioOpen(const char *, int, mode_t, AIOCB *, void *, void *);
 extern void aioClose(int);
-extern void aioWrite(int, char *, int, AIOCB *, void *);
-extern void aioRead(int, char *, int, AIOCB *, void *);
+extern void aioWrite(int, int offset, char *, int size, AIOCB *, void *);
+extern void aioRead(int, int offset, char *, int size, AIOCB *, void *);
 extern void aioStat(char *, struct stat *, AIOCB *, void *, void *);
 extern void aioUnlink(const char *, AIOCB *, void *);
 extern void aioCheckCallbacks(void);
Index: squid/src/comm.c
diff -u squid/src/comm.c:1.1.1.19 squid/src/comm.c:1.1.1.19.2.2
--- squid/src/comm.c:1.1.1.19 Sat Apr 25 14:47:46 1998
+++ squid/src/comm.c Thu May 7 19:59:53 1998
@@ -1066,8 +1066,13 @@
         if (nfds == 0)
             return COMM_SHUTDOWN;
         for (;;) {
+#if USE_ASYNC_IO
+ poll_time.tv_sec = 0;
+ poll_time.tv_usec = sec > 0 ? 50 : 0;
+#else
             poll_time.tv_sec = sec > 0 ? 1 : 0;
             poll_time.tv_usec = 0;
+#endif
             num = select(maxfd, &readfds, &writefds, NULL, &poll_time);
             Counter.select_loops++;
             if (num >= 0)
Index: squid/src/aiops.c
diff -u squid/src/aiops.c:1.1.1.7 squid/src/aiops.c:1.1.1.7.8.6
--- squid/src/aiops.c:1.1.1.7 Tue Mar 24 20:00:11 1998
+++ squid/src/aiops.c Fri May 8 22:22:30 1998
@@ -1,4 +1,3 @@
-
 /*
  * $Id$
  *
@@ -43,22 +42,27 @@
 #define NUMTHREADS 16
 #define RIDICULOUS_LENGTH 4096
 
-#define _THREAD_STARTING 0
-#define _THREAD_WAITING 1
-#define _THREAD_BUSY 2
-#define _THREAD_FAILED 3
-
-
-#define _AIO_OP_OPEN 0
-#define _AIO_OP_READ 1
-#define _AIO_OP_WRITE 2
-#define _AIO_OP_CLOSE 3
-#define _AIO_OP_UNLINK 4
-#define _AIO_OP_OPENDIR 5
-#define _AIO_OP_STAT 6
+enum _aio_thread_status {
+ _THREAD_STARTING=0,
+ _THREAD_WAITING,
+ _THREAD_BUSY,
+ _THREAD_FAILED,
+ _THREAD_DONE,
+};
+
+enum _aio_request_type {
+ _AIO_OP_NONE=0,
+ _AIO_OP_OPEN,
+ _AIO_OP_READ,
+ _AIO_OP_WRITE,
+ _AIO_OP_CLOSE,
+ _AIO_OP_UNLINK,
+ _AIO_OP_OPENDIR,
+ _AIO_OP_STAT,
+};
 
 typedef struct aio_request_t {
- int request_type;
+ enum _aio_request_type request_type;
     int cancelled;
     char *path;
     int oflag;
@@ -80,11 +84,11 @@
 
 typedef struct aio_thread_t {
     pthread_t thread;
- int status;
+ enum _aio_thread_status status;
     pthread_mutex_t mutex; /* Mutex for testing condition variable */
     pthread_cond_t cond; /* Condition variable */
- struct aio_request_t *req;
- struct aio_request_t *donereq;
+ struct aio_request_t *volatile req; /* set by main, cleared by thread */
+ struct aio_request_t *processed_req; /* reminder to main */
     struct aio_thread_t *next;
 } aio_thread_t;
 
@@ -99,22 +103,21 @@
 aio_result_t *aio_poll_done();
 
 static void aio_init(void);
-static void aio_free_thread(aio_thread_t *);
-static void aio_cleanup_and_free(aio_thread_t *);
 static void aio_queue_request(aio_request_t *);
 static void aio_process_request_queue(void);
 static void aio_cleanup_request(aio_request_t *);
 static void *aio_thread_loop(void *);
-static void aio_thread_open(aio_thread_t *);
-static void aio_thread_read(aio_thread_t *);
-static void aio_thread_write(aio_thread_t *);
-static void aio_thread_close(aio_thread_t *);
-static void aio_thread_stat(aio_thread_t *);
-static void aio_thread_unlink(aio_thread_t *);
+static void aio_do_open(aio_request_t *);
+static void aio_do_read(aio_request_t *);
+static void aio_do_write(aio_request_t *);
+static void aio_do_close(aio_request_t *);
+static void aio_do_stat(aio_request_t *);
+static void aio_do_unlink(aio_request_t *);
 #if AIO_OPENDIR
-static void *aio_thread_opendir(void *);
+static void *aio_do_opendir(aio_request_t *);
 #endif
 static void aio_debug(aio_request_t *);
+static void aio_poll_threads(void);
 
 static aio_thread_t thread[NUMTHREADS];
 static int aio_initialised = 0;
@@ -124,17 +127,19 @@
 static int num_free_requests = 0;
 static aio_request_t *request_queue_head = NULL;
 static aio_request_t *request_queue_tail = NULL;
+static aio_request_t *request_done_head = NULL;
+static aio_request_t *request_done_tail = NULL;
 static aio_thread_t *wait_threads = NULL;
 static aio_thread_t *busy_threads_head = NULL;
 static aio_thread_t *busy_threads_tail = NULL;
 static pthread_attr_t globattr;
 static struct sched_param globsched;
+static pthread_t main_thread;
 
 static void
 aio_init(void)
 {
     int i;
- pthread_t self;
     aio_thread_t *threadp;
 
     if (aio_initialised)
@@ -143,8 +148,8 @@
     pthread_attr_init(&globattr);
     pthread_attr_setscope(&globattr, PTHREAD_SCOPE_SYSTEM);
     globsched.sched_priority = 1;
- self = pthread_self();
- pthread_setschedparam(self, SCHED_OTHER, &globsched);
+ main_thread = pthread_self();
+ pthread_setschedparam(main_thread, SCHED_OTHER, &globsched);
     globsched.sched_priority = 2;
     pthread_attr_setschedparam(&globattr, &globsched);
 
@@ -162,7 +167,7 @@
             continue;
         }
         threadp->req = NULL;
- threadp->donereq = NULL;
+ threadp->processed_req = NULL;
         if (pthread_create(&(threadp->thread), &globattr, aio_thread_loop, threadp)) {
             fprintf(stderr, "Thread creation failed\n");
             threadp->status = _THREAD_FAILED;
@@ -170,6 +175,9 @@
         }
         threadp->next = wait_threads;
         wait_threads = threadp;
+#if AIO_PROPER_MUTEX
+ pthread_mutex_lock(&threadp->mutex);
+#endif
     }
 
     aio_initialised = 1;
@@ -181,9 +189,10 @@
 {
     aio_thread_t *threadp = (aio_thread_t *) ptr;
     aio_request_t *request;
- struct timespec abstime;
- int ret;
     sigset_t new;
+#if !AIO_PROPER_MUTEX
+ struct timespec wait_time;
+#endif
 
     /* Make sure to ignore signals which may possibly get sent to the parent */
     /* squid thread. Causes havoc with mutex's and condition waits otherwise */
@@ -199,54 +208,66 @@
     sigaddset(&new, SIGALRM);
     pthread_sigmask(SIG_BLOCK, &new, NULL);
 
+ pthread_mutex_lock(&threadp->mutex);
     while (1) {
- /* BELOW is done because Solaris 2.5.1 doesn't support semaphores!!! */
- /* Use timed wait to avoid race where thread context switches after */
- /* threadp->status gets set but before the condition wait happens. */
- /* In that case, a race occurs when the parent signals the condition */
- /* but this thread will never receive it. Recheck every 2-3 secs. */
- /* Also provides bonus of keeping thread contexts hot in CPU cache */
- /* (ie. faster thread reactions) at slight expense of CPU time. */
+#if AIO_PROPER_MUTEX
+ while (threadp->req == NULL) {
+ threadp->status = _THREAD_WAITING;
+ pthread_cond_wait(&(threadp->cond), &(threadp->mutex));
+ }
+#else
+ /* The timeout is used to unlock the race condition where
+ * ->req is set between the check and pthread_cond_wait.
+ * The thread steps it's own clock on each timeout, to avoid a CPU
+ * spin situation if the main thread is suspended (paging), and
+ * squid_curtime is not being updated timely.
+ */
+ wait_time.tv_sec = squid_curtime + 1; /* little quicker first time */
+ wait_time.tv_nsec = 0;
         while (threadp->req == NULL) {
- abstime.tv_sec = squid_curtime + 3;
- abstime.tv_nsec = 0;
             threadp->status = _THREAD_WAITING;
- ret = pthread_cond_timedwait(&(threadp->cond),
- &(threadp->mutex),
- &abstime);
+ pthread_cond_timedwait(&(threadp->cond), &(threadp->mutex),
+ &wait_time);
+ wait_time.tv_sec += 3; /* then wait 3 seconds between each check */
         }
+#endif
         request = threadp->req;
- switch (request->request_type) {
- case _AIO_OP_OPEN:
- aio_thread_open(threadp);
- break;
- case _AIO_OP_READ:
- aio_thread_read(threadp);
- break;
- case _AIO_OP_WRITE:
- aio_thread_write(threadp);
- break;
- case _AIO_OP_CLOSE:
- aio_thread_close(threadp);
- break;
- case _AIO_OP_UNLINK:
- aio_thread_unlink(threadp);
- break;
-#if AIO_OPENDIR
- /* Opendir not implemented yet */
- case _AIO_OP_OPENDIR:
- aio_thread_opendir(threadp);
- break;
+ errno = 0;
+ if (!request->cancelled) {
+ switch (request->request_type) {
+ case _AIO_OP_OPEN:
+ aio_do_open(request);
+ break;
+ case _AIO_OP_READ:
+ aio_do_read(request);
+ break;
+ case _AIO_OP_WRITE:
+ aio_do_write(request);
+ break;
+ case _AIO_OP_CLOSE:
+ aio_do_close(request);
+ break;
+ case _AIO_OP_UNLINK:
+ aio_do_unlink(request);
+ break;
+#if AIO_OPENDIR /* Opendir not implemented yet */
+ case _AIO_OP_OPENDIR:
+ aio_do_opendir(request);
+ break;
 #endif
- case _AIO_OP_STAT:
- aio_thread_stat(threadp);
- break;
- default:
- threadp->donereq->ret = -1;
- threadp->donereq->err = EINVAL;
- break;
+ case _AIO_OP_STAT:
+ aio_do_stat(request);
+ break;
+ default:
+ request->ret = -1;
+ request->err = EINVAL;
+ break;
+ }
+ } else { /* cancelled */
+ request->ret = -1;
+ request->err = EINTR;
         }
- threadp->req = NULL;
+ threadp->req = NULL; /* tells main thread that we are done */
     } /* while */
 } /* aio_thread_loop */
 
@@ -259,6 +280,7 @@
     if ((req = free_requests) != NULL) {
         free_requests = req->next;
         num_free_requests--;
+ req->next = NULL;
         return req;
     }
     return (aio_request_t *) xmalloc(sizeof(aio_request_t));
@@ -272,11 +294,13 @@
     /* it reflects the sort of load the squid server will experience. A */
     /* higher load will mean a need for more threads, which will in turn mean */
     /* a need for a bigger free request pool. */
+ /* Threads <-> requests are now partially asyncronous, use NUMTHREADS * 2 */
 
- if (num_free_requests >= NUMTHREADS) {
+ if (num_free_requests >= NUMTHREADS * 2) {
         xfree(req);
         return;
     }
+ memset(req,0,sizeof(*req));
     req->next = free_requests;
     free_requests = req;
     num_free_requests++;
@@ -286,8 +310,6 @@
 static void
 aio_do_request(aio_request_t * requestp)
 {
- aio_thread_t *threadp;
-
     if (wait_threads == NULL && busy_threads_head == NULL) {
         fprintf(stderr, "PANIC: No threads to service requests with!\n");
         exit(-1);
@@ -312,9 +334,12 @@
         request_queue_tail = requestp;
     }
     requestp->next = NULL;
- if (++request_queue_len > NUMTHREADS) {
+ request_queue_len += 1;
+ if (wait_threads==NULL)
+ aio_poll_threads();
+ if (wait_threads==NULL) {
         if (squid_curtime > (last_warn + 15)) {
- debug(43, 1) ("aio_queue_request: WARNING - Async request queue growing: Length = %d\n", request_queue_len);
+ debug(43, 1) ("aio_queue_request: WARNING - Out of service threads. Queue Length = %d\n", request_queue_len);
             debug(43, 1) ("aio_queue_request: Perhaps you should increase NUMTHREADS in aiops.c\n");
             debug(43, 1) ("aio_queue_request: First %d items on request queue\n", NUMTHREADS);
             rp = request_queue_head;
@@ -367,8 +392,9 @@
             return;
 
         requestp = request_queue_head;
- if ((request_queue_head = request_queue_head->next) == NULL)
+ if ((request_queue_head = requestp->next) == NULL)
             request_queue_tail = NULL;
+ requestp->next = NULL;
         request_queue_len--;
 
         if (requestp->cancelled) {
@@ -376,19 +402,21 @@
             continue;
         }
         threadp = wait_threads;
- wait_threads = wait_threads->next;
+ wait_threads = threadp->next;
+ threadp->next = NULL;
 
- threadp->req = requestp;
- threadp->donereq = requestp;
         if (busy_threads_head != NULL)
             busy_threads_tail->next = threadp;
         else
             busy_threads_head = threadp;
         busy_threads_tail = threadp;
- threadp->next = NULL;
 
         threadp->status = _THREAD_BUSY;
+ threadp->req = threadp->processed_req = requestp;
         pthread_cond_signal(&(threadp->cond));
+#if AIO_PROPER_MUTEX
+ pthread_mutex_unlock(&threadp->mutex);
+#endif
     }
 } /* aio_process_request_queue */
 
@@ -420,7 +448,7 @@
     default:
         break;
     }
- if (!cancelled) {
+ if (resultp != NULL && !cancelled) {
         resultp->aio_return = requestp->ret;
         resultp->aio_errno = requestp->err;
     }
@@ -433,15 +461,26 @@
 {
     aio_thread_t *threadp;
     aio_request_t *requestp;
- int ret;
 
     for (threadp = busy_threads_head; threadp != NULL; threadp = threadp->next)
- if (threadp->donereq->resultp == resultp)
- threadp->donereq->cancelled = 1;
+ if (threadp->processed_req->resultp == resultp) {
+ threadp->processed_req->cancelled = 1;
+ threadp->processed_req->resultp = NULL;
+ return 0;
+ }
     for (requestp = request_queue_head; requestp != NULL; requestp = requestp->next)
- if (requestp->resultp == resultp)
+ if (requestp->resultp == resultp) {
             requestp->cancelled = 1;
- return 0;
+ requestp->resultp = NULL;
+ return 0;
+ }
+ for (requestp = request_done_head; requestp != NULL; requestp = requestp->next)
+ if (requestp->resultp == resultp) {
+ requestp->cancelled = 1;
+ requestp->resultp = NULL;
+ return 0;
+ }
+ return 1;
 } /* aio_cancel */
 
 
@@ -476,10 +515,8 @@
 
 
 static void
-aio_thread_open(aio_thread_t * threadp)
+aio_do_open(aio_request_t *requestp)
 {
- aio_request_t *requestp = threadp->req;
-
     requestp->ret = open(requestp->path, requestp->oflag, requestp->mode);
     requestp->err = errno;
 }
@@ -516,10 +553,8 @@
 
 
 static void
-aio_thread_read(aio_thread_t * threadp)
+aio_do_read(aio_request_t * requestp)
 {
- aio_request_t *requestp = threadp->req;
-
     lseek(requestp->fd, requestp->offset, requestp->whence);
     requestp->ret = read(requestp->fd, requestp->tmpbufp, requestp->buflen);
     requestp->err = errno;
@@ -557,10 +592,8 @@
 
 
 static void
-aio_thread_write(aio_thread_t * threadp)
+aio_do_write(aio_request_t *requestp)
 {
- aio_request_t *requestp = threadp->req;
-
     requestp->ret = write(requestp->fd, requestp->tmpbufp, requestp->buflen);
     requestp->err = errno;
 }
@@ -588,10 +621,8 @@
 
 
 static void
-aio_thread_close(aio_thread_t * threadp)
+aio_do_close(aio_request_t *requestp)
 {
- aio_request_t *requestp = threadp->req;
-
     requestp->ret = close(requestp->fd);
     requestp->err = errno;
 }
@@ -633,10 +664,8 @@
 
 
 static void
-aio_thread_stat(aio_thread_t * threadp)
+aio_do_stat(aio_request_t *requestp)
 {
- aio_request_t *requestp = threadp->req;
-
     requestp->ret = stat(requestp->path, requestp->tmpstatp);
     requestp->err = errno;
 }
@@ -671,10 +700,8 @@
 
 
 static void
-aio_thread_unlink(aio_thread_t * threadp)
+aio_do_unlink(aio_request_t *requestp)
 {
- aio_request_t *requestp = threadp->req;
-
     requestp->ret = unlink(requestp->path);
     requestp->err = errno;
 }
@@ -698,60 +725,96 @@
     return -1;
 }
 
-static void *
-aio_thread_opendir(aio_thread_t * threadp)
+static void
+aio_do_opendir(aio_request_t *requestp)
 {
- aio_request_t *requestp = threadp->req;
- aio_result_t *resultp = requestp->resultp;
-
- return threadp;
+ /* NOT IMPLEMENTED */
 }
 #endif
 
 
-aio_result_t *
-aio_poll_done()
+void
+aio_poll_threads(void)
 {
     aio_thread_t *prev;
     aio_thread_t *threadp;
     aio_request_t *requestp;
+
+ do { /* while found completed thread */
+ prev = NULL;
+ threadp = busy_threads_head;
+ while (threadp) {
+ debug(43, 3) ("%d: %d -> %d\n",
+ threadp->thread,
+ threadp->req->request_type,
+ threadp->status);
+#if AIO_PROPER_MUTEX
+ if (threadp->req == NULL)
+ if (pthread_mutex_trylock(&threadp->mutex) == 0)
+ break;
+#else
+ if (threadp->req == NULL)
+ break;
+#endif
+ prev = threadp;
+ threadp = threadp->next;
+ }
+ if (threadp == NULL)
+ return;
+
+ if (prev == NULL)
+ busy_threads_head = busy_threads_head->next;
+ else
+ prev->next = threadp->next;
+
+ if (busy_threads_tail == threadp)
+ busy_threads_tail = prev;
+
+ requestp = threadp->processed_req;
+ threadp->processed_req = NULL;
+
+ threadp->next = wait_threads;
+ wait_threads = threadp;
+
+ if (request_done_tail != NULL)
+ request_done_tail->next = requestp;
+ else
+ request_done_head = requestp;
+ request_done_tail = requestp;
+ } while(threadp);
+
+ aio_process_request_queue();
+} /* aio_poll_threads */
+
+aio_result_t *
+aio_poll_done()
+{
+ aio_request_t *requestp, *prev;
     aio_result_t *resultp;
     int cancelled;
 
   AIO_REPOLL:
+ aio_poll_threads();
+ if (request_done_head == NULL) {
+ return NULL;
+ }
     prev = NULL;
- threadp = busy_threads_head;
- while (threadp) {
- debug(43, 3) ("%d: %d -> %d\n",
- threadp->thread,
- threadp->donereq->request_type,
- threadp->status);
- if (!threadp->req)
- break;
- prev = threadp;
- threadp = threadp->next;
+ requestp = request_done_head;
+ while (requestp->next) {
+ prev = requestp;
+ requestp = requestp->next;
     }
- if (threadp == NULL)
- return NULL;
-
     if (prev == NULL)
- busy_threads_head = busy_threads_head->next;
+ request_done_head = requestp->next;
     else
- prev->next = threadp->next;
+ prev->next = requestp->next;
+ request_done_tail = prev;
 
- if (busy_threads_tail == threadp)
- busy_threads_tail = prev;
-
- requestp = threadp->donereq;
- threadp->donereq = NULL;
     resultp = requestp->resultp;
+ cancelled = requestp->cancelled;
     aio_debug(requestp);
     debug(43, 3) ("DONE: %d -> %d\n", requestp->ret, requestp->err);
- threadp->next = wait_threads;
- wait_threads = threadp;
- cancelled = requestp->cancelled;
     aio_cleanup_request(requestp);
- aio_process_request_queue();
     if (cancelled)
         goto AIO_REPOLL;
     return resultp;
Index: squid/src/async_io.c
diff -u squid/src/async_io.c:1.1.1.4 squid/src/async_io.c:1.1.1.4.16.1
--- squid/src/async_io.c:1.1.1.4 Thu Feb 5 22:44:58 1998
+++ squid/src/async_io.c Mon May 4 22:45:23 1998
@@ -189,9 +189,10 @@
 
 
 void
-aioWrite(int fd, char *bufp, int len, AIOCB * callback, void *callback_data)
+aioWrite(int fd, int offset, char *bufp, int len, AIOCB * callback, void *callback_data)
 {
     aio_ctrl_t *ctrlp;
+ int seekmode;
 
     if (!initialised)
         aioInit();
@@ -216,7 +217,13 @@
     ctrlp->done_handler = callback;
     ctrlp->done_handler_data = callback_data;
     ctrlp->operation = _AIO_WRITE;
- if (aio_write(fd, bufp, len, 0, SEEK_END, &(ctrlp->result)) < 0) {
+ if (offset >= 0)
+ seekmode = SEEK_SET;
+ else {
+ seekmode = SEEK_END;
+ offset = 0;
+ }
+ if (aio_write(fd, bufp, len, offset, seekmode, &(ctrlp->result)) < 0) {
         if (errno == ENOMEM || errno == EAGAIN || errno == EINVAL)
             errno = EWOULDBLOCK;
         if (callback)
@@ -231,9 +238,10 @@
 
 
 void
-aioRead(int fd, char *bufp, int len, AIOCB * callback, void *callback_data)
+aioRead(int fd, int offset, char *bufp, int len, AIOCB * callback, void *callback_data)
 {
     aio_ctrl_t *ctrlp;
+ int seekmode;
 
     if (!initialised)
         aioInit();
@@ -258,7 +266,13 @@
     ctrlp->done_handler = callback;
     ctrlp->done_handler_data = callback_data;
     ctrlp->operation = _AIO_READ;
- if (aio_read(fd, bufp, len, 0, SEEK_CUR, &(ctrlp->result)) < 0) {
+ if (offset >= 0)
+ seekmode = SEEK_SET;
+ else {
+ seekmode = SEEK_CUR;
+ offset = 0;
+ }
+ if (aio_read(fd, bufp, len, offset, seekmode, &(ctrlp->result)) < 0) {
         if (errno == ENOMEM || errno == EAGAIN || errno == EINVAL)
             errno = EWOULDBLOCK;
         if (callback)
Index: squid/src/disk.c
diff -u squid/src/disk.c:1.1.1.10 squid/src/disk.c:1.1.1.10.6.1
--- squid/src/disk.c:1.1.1.10 Wed Apr 8 22:07:11 1998
+++ squid/src/disk.c Mon May 4 22:46:46 1998
@@ -241,6 +241,7 @@
     debug(6, 3) ("diskHandleWrite: FD %d\n", fd);
     /* We need to combine subsequent write requests after the first */
     /* But only if we don't need to seek() in betwen them, ugh! */
+ /* XXX This currently ignores any seeks (file_offset) */
     if (fdd->write_q->next != NULL && fdd->write_q->next->next != NULL) {
         len = 0;
         for (q = fdd->write_q->next; q != NULL; q = q->next)
@@ -273,6 +274,7 @@
     assert(fdd->write_q->len > fdd->write_q->buf_offset);
 #if USE_ASYNC_IO
     aioWrite(fd,
+ -1, /* seek offset, -1 == append */
         fdd->write_q->buf + fdd->write_q->buf_offset,
         fdd->write_q->len - fdd->write_q->buf_offset,
         diskHandleWriteComplete,
@@ -480,6 +482,7 @@
     ctrlp->data = ctrl_dat;
 #if USE_ASYNC_IO
     aioRead(fd,
+ ctrl_dat->offset,
         ctrl_dat->buf,
         ctrl_dat->req_len,
         diskHandleReadComplete,

--------------61503C0C19036D7147A0C340--
Received on Tue Jul 29 2003 - 13:15:49 MDT

This archive was generated by hypermail pre-2.1.9 : Tue Dec 09 2003 - 16:11:47 MST