Remove the HttpStateData::orig_request member When FwdServer::_peer is set, HttpStateData constructor creates a new special HttpRequest, overwriting the request pointer set in the parent (ServerStateData) constructor to fwd->request. This special HttpRequest sets the proper urlpath (which maybe different from the original HttpRequest), the host (HttpRequest::SetHost/GetHost) to be the peer hostname and inherits flags, protocol, method. Also sets the HttpRequest::flags.proxying. Probably this is originaly done to handle only the differences in urlpath and the host. But this is has as result to have two HttpRequests object in HttpStateData, but their difference is not clear. This is also causing some bugs: 1) Bug 2621: http://bugs.squid-cache.org/show_bug.cgi?id=2621 2) The request_header_access configuration parameter does not work when sending requests to parent proxies. 3) Squid may cache replies to requests with no-store in headers when uses a parent cache. Inside the HttpStateData::cacheableReply method where there is the following code: if (request && request->cache_control && EBIT_TEST(request->cache_control->mask, CC_NO_STORE) && !REFRESH_OVERRIDE(ignore_no_store)) return 0; The request->cache_control does not exist in the case the request destined to a parent cache. The above code do not cache replies to responses with no-store CC directive, but may cache them if the request destined to a parent cache. 4) Inside HttpStateData::sendRequest() method: if (neighborType(_peer, request) == PEER_SIBLING && !_peer->options.allow_miss) flags.only_if_cached = 1; In the above code if I am not wrong the parent caches which have been configured as "sibling" for specific domains using the neighbor_type_domain parameter are not counted. This patch removes the HttpStateData::orig_request member and uses only the HttpStateData::request member === modified file 'src/htcp.cc' --- src/htcp.cc 2011-05-13 08:13:01 +0000 +++ src/htcp.cc 2011-06-24 13:00:34 +0000 @@ -1572,41 +1572,41 @@ HttpHeader hdr(hoRequest); Packer pa; MemBuf mb; http_state_flags flags; if (!Comm::IsConnOpen(htcpIncomingConn)) return 0; old_squid_format = p->options.htcp_oldsquid; memset(&flags, '\0', sizeof(flags)); snprintf(vbuf, sizeof(vbuf), "%d/%d", req->http_ver.major, req->http_ver.minor); stuff.op = HTCP_TST; stuff.rr = RR_REQUEST; stuff.f1 = 1; stuff.response = 0; stuff.msg_id = ++msg_id_counter; stuff.S.method = (char *) RequestMethodStr(req->method); stuff.S.uri = (char *) e->url(); stuff.S.version = vbuf; - HttpStateData::httpBuildRequestHeader(req, req, e, &hdr, flags); + HttpStateData::httpBuildRequestHeader(req, e, &hdr, flags); mb.init(); packerToMemInit(&pa, &mb); hdr.packInto(&pa); hdr.clean(); packerClean(&pa); stuff.S.req_hdrs = mb.buf; pktlen = htcpBuildPacket(pkt, sizeof(pkt), &stuff); mb.clean(); if (!pktlen) { debugs(31, 3, "htcpQuery: htcpBuildPacket() failed"); return -1; } htcpSend(pkt, (int) pktlen, p->in_addr); queried_id[stuff.msg_id % N_QUERIED_KEYS] = stuff.msg_id; save_key = queried_keys[stuff.msg_id % N_QUERIED_KEYS]; storeKeyCopy(save_key, (const cache_key *)e->key); queried_addr[stuff.msg_id % N_QUERIED_KEYS] = p->in_addr; debugs(31, 3, "htcpQuery: key (" << save_key << ") " << storeKeyText(save_key)); @@ -1643,41 +1643,41 @@ stuff.msg_id = ++msg_id_counter; switch (reason) { case HTCP_CLR_INVALIDATION: stuff.reason = 1; break; default: stuff.reason = 0; break; } stuff.S.method = (char *) RequestMethodStr(req->method); if (e == NULL || e->mem_obj == NULL) { if (uri == NULL) { return; } stuff.S.uri = xstrdup(uri); } else { stuff.S.uri = (char *) e->url(); } stuff.S.version = vbuf; if (reason != HTCP_CLR_INVALIDATION) { - HttpStateData::httpBuildRequestHeader(req, req, e, &hdr, flags); + HttpStateData::httpBuildRequestHeader(req, e, &hdr, flags); mb.init(); packerToMemInit(&pa, &mb); hdr.packInto(&pa); hdr.clean(); packerClean(&pa); stuff.S.req_hdrs = mb.buf; } else { stuff.S.req_hdrs = NULL; } pktlen = htcpBuildPacket(pkt, sizeof(pkt), &stuff); if (reason != HTCP_CLR_INVALIDATION) { mb.clean(); } if (e == NULL) { xfree(stuff.S.uri); } if (!pktlen) { debugs(31, 3, "htcpClear: htcpBuildPacket() failed"); return; } === modified file 'src/http.cc' --- src/http.cc 2011-06-23 21:03:57 +0000 +++ src/http.cc 2011-06-24 15:25:05 +0000 @@ -66,123 +66,98 @@ #include "protos.h" #include "rfc1738.h" #include "SquidTime.h" #include "Store.h" #define SQUID_ENTER_THROWING_CODE() try { #define SQUID_EXIT_THROWING_CODE(status) \ status = true; \ } \ catch (const std::exception &e) { \ debugs (11, 1, "Exception error:" << e.what()); \ status = false; \ } CBDATA_CLASS_INIT(HttpStateData); static const char *const crlf = "\r\n"; static void httpMaybeRemovePublic(StoreEntry *, http_status); -static void copyOneHeaderFromClientsideRequestToUpstreamRequest(const HttpHeaderEntry *e, const String strConnection, HttpRequest * request, const HttpRequest * orig_request, +static void copyOneHeaderFromClientsideRequestToUpstreamRequest(const HttpHeaderEntry *e, const String strConnection, const HttpRequest * request, HttpHeader * hdr_out, const int we_do_ranges, const http_state_flags); HttpStateData::HttpStateData(FwdState *theFwdState) : AsyncJob("HttpStateData"), ServerStateData(theFwdState), lastChunk(0), header_bytes_read(0), reply_bytes_read(0), body_bytes_truncated(0), httpChunkDecoder(NULL) { debugs(11,5,HERE << "HttpStateData " << this << " created"); ignoreCacheControl = false; surrogateNoStore = false; serverConnection = fwd->serverConnection(); readBuf = new MemBuf; readBuf->init(16*1024, 256*1024); - orig_request = HTTPMSGLOCK(fwd->request); // reset peer response time stats for %hier.peer_http_request_sent.tv_sec = 0; - orig_request->hier.peer_http_request_sent.tv_usec = 0; + request->hier.peer_http_request_sent.tv_sec = 0; + request->hier.peer_http_request_sent.tv_usec = 0; if (fwd->serverConnection() != NULL) _peer = cbdataReference(fwd->serverConnection()->getPeer()); /* might be NULL */ if (_peer) { - const char *url; - - if (_peer->options.originserver) - url = orig_request->urlpath.termedBuf(); - else - url = entry->url(); - - HttpRequest * proxy_req = new HttpRequest(orig_request->method, orig_request->protocol, url); - - proxy_req->SetHost(_peer->host); - - proxy_req->port = _peer->http_port; - - proxy_req->flags = orig_request->flags; - - proxy_req->lastmod = orig_request->lastmod; - - proxy_req->flags.proxying = 1; - - HTTPMSGUNLOCK(request); - - request = HTTPMSGLOCK(proxy_req); - + request->flags.proxying = 1; /* * This NEIGHBOR_PROXY_ONLY check probably shouldn't be here. * We might end up getting the object from somewhere else if, * for example, the request to this neighbor fails. */ if (_peer->options.proxy_only) entry->releaseRequest(); #if USE_DELAY_POOLS entry->setNoDelay(_peer->options.no_delay); #endif } /* * register the handler to free HTTP state data when the FD closes */ typedef CommCbMemFunT Dialer; closeHandler = JobCallback(9, 5, Dialer, this, HttpStateData::httpStateConnClosed); comm_add_close_handler(serverConnection->fd, closeHandler); } HttpStateData::~HttpStateData() { /* * don't forget that ~ServerStateData() gets called automatically */ if (!readBuf->isNull()) readBuf->clean(); delete readBuf; if (httpChunkDecoder) delete httpChunkDecoder; - HTTPMSGUNLOCK(orig_request); - cbdataReferenceDone(_peer); debugs(11,5, HERE << "HttpStateData " << this << " destroyed; " << serverConnection); } const Comm::ConnectionPointer & HttpStateData::dataConnection() const { return serverConnection; } /* static void httpStateFree(int fd, void *data) { HttpStateData *httpState = static_cast(data); debugs(11, 5, "httpStateFree: FD " << fd << ", httpState=" << data); delete httpState; }*/ @@ -716,96 +691,96 @@ header_bytes_read = headersEnd(readBuf->content(), readBuf->contentSize()); readBuf->consume(header_bytes_read); } newrep->removeStaleWarnings(); if (newrep->sline.protocol == AnyP::PROTO_HTTP && newrep->sline.status >= 100 && newrep->sline.status < 200) { handle1xx(newrep); ctx_exit(ctx); return; } flags.chunked = 0; if (newrep->sline.protocol == AnyP::PROTO_HTTP && newrep->header.chunked()) { flags.chunked = 1; httpChunkDecoder = new ChunkedCodingParser; } if (!peerSupportsConnectionPinning()) - orig_request->flags.connection_auth_disabled = 1; + request->flags.connection_auth_disabled = 1; HttpReply *vrep = setVirginReply(newrep); flags.headers_parsed = 1; keepaliveAccounting(vrep); checkDateSkew(vrep); processSurrogateControl (vrep); /** \todo IF the reply is a 1.0 reply, AND it has a Connection: Header * Parse the header and remove all referenced headers */ - orig_request->hier.peer_reply_status = newrep->sline.status; + request->hier.peer_reply_status = newrep->sline.status; ctx_exit(ctx); } /// ignore or start forwarding the 1xx response (a.k.a., control message) void HttpStateData::handle1xx(HttpReply *reply) { HttpMsgPointerT msg(reply); // will destroy reply if unused // one 1xx at a time: we must not be called while waiting for previous 1xx Must(!flags.handling1xx); flags.handling1xx = true; - if (!orig_request->canHandle1xx()) { + if (!request->canHandle1xx()) { debugs(11, 2, HERE << "ignoring client-unsupported 1xx"); proceedAfter1xx(); return; } #if USE_HTTP_VIOLATIONS // check whether the 1xx response forwarding is allowed by squid.conf if (Config.accessList.reply) { ACLFilledChecklist ch(Config.accessList.reply, originalRequest(), NULL); ch.reply = HTTPMSGLOCK(reply); if (!ch.fastCheck()) { // TODO: support slow lookups? debugs(11, 3, HERE << "ignoring denied 1xx"); proceedAfter1xx(); return; } } #endif // USE_HTTP_VIOLATIONS debugs(11, 2, HERE << "forwarding 1xx to client"); // the Sink will use this to call us back after writing 1xx to the client typedef NullaryMemFunT CbDialer; const AsyncCall::Pointer cb = JobCallback(11, 3, CbDialer, this, HttpStateData::proceedAfter1xx); - CallJobHere1(11, 4, orig_request->clientConnectionManager, ConnStateData, + CallJobHere1(11, 4, request->clientConnectionManager, ConnStateData, ConnStateData::sendControlMsg, HttpControlMsg(msg, cb)); // If the call is not fired, then the Sink is gone, and HttpStateData // will terminate due to an aborted store entry or another similar error. // If we get stuck, it is not handle1xx fault if we could get stuck // for similar reasons without a 1xx response. } /// restores state and resumes processing after 1xx is ignored or forwarded void HttpStateData::proceedAfter1xx() { Must(flags.handling1xx); debugs(11, 2, HERE << "consuming " << header_bytes_read << " header and " << reply_bytes_read << " body bytes read after 1xx"); header_bytes_read = 0; reply_bytes_read = 0; CallJobHere(11, 3, this, HttpStateData, HttpStateData::processReply); } @@ -882,41 +857,41 @@ Ctx ctx = ctx_enter(entry->mem_obj->url); HttpReply *rep = finalReply(); if (rep->sline.status == HTTP_PARTIAL_CONTENT && rep->content_range) currentOffset = rep->content_range->spec.offset; entry->timestampsSet(); /* Check if object is cacheable or not based on reply code */ debugs(11, 3, "haveParsedReplyHeaders: HTTP CODE: " << rep->sline.status); if (neighbors_do_private_keys) httpMaybeRemovePublic(entry, rep->sline.status); if (rep->header.has(HDR_VARY) #if X_ACCELERATOR_VARY || rep->header.has(HDR_X_ACCELERATOR_VARY) #endif ) { - const char *vary = httpMakeVaryMark(orig_request, rep); + const char *vary = httpMakeVaryMark(request, rep); if (!vary) { entry->makePrivate(); if (!fwd->reforwardableStatus(rep->sline.status)) EBIT_CLR(entry->flags, ENTRY_FWD_HDR_WAIT); goto no_cache; } entry->mem_obj->vary_headers = xstrdup(vary); } /* * If its not a reply that we will re-forward, then * allow the client to get it. */ if (!fwd->reforwardableStatus(rep->sline.status)) EBIT_CLR(entry->flags, ENTRY_FWD_HDR_WAIT); switch (cacheableReply()) { @@ -967,41 +942,41 @@ { const HttpReply *rep = virginReply(); /** \par * If the reply wants to close the connection, it takes precedence */ if (httpHeaderHasConnDir(&rep->header, "close")) return COMPLETE_NONPERSISTENT_MSG; /** \par * If we didn't send a keep-alive request header, then this * can not be a persistent connection. */ if (!flags.keepalive) return COMPLETE_NONPERSISTENT_MSG; /** \par * If we haven't sent the whole request then this can not be a persistent * connection. */ if (!flags.request_sent) { - debugs(11, 2, "statusIfComplete: Request not yet fully sent \"" << RequestMethodStr(orig_request->method) << " " << entry->url() << "\"" ); + debugs(11, 2, "statusIfComplete: Request not yet fully sent \"" << RequestMethodStr(request->method) << " " << entry->url() << "\"" ); return COMPLETE_NONPERSISTENT_MSG; } /** \par * What does the reply have to say about keep-alive? */ /** \bug XXX BUG? * If the origin server (HTTP/1.0) does not send a keep-alive * header, but keeps the connection open anyway, what happens? * We'll return here and http.c waits for an EOF before changing * store_status to STORE_OK. Combine this with ENTRY_FWD_HDR_WAIT * and an error status code, and we might have to wait until * the server times out the socket. */ if (!rep->keep_alive) return COMPLETE_NONPERSISTENT_MSG; return COMPLETE_PERSISTENT_MSG; } @@ -1119,42 +1094,42 @@ // update I/O stats if (len > 0) { readBuf->appended(len); reply_bytes_read += len; #if USE_DELAY_POOLS DelayId delayId = entry->mem_obj->mostBytesAllowed(); delayId.bytesIn(len); #endif kb_incr(&statCounter.server.all.kbytes_in, len); kb_incr(&statCounter.server.http.kbytes_in, len); IOStats.Http.reads++; for (clen = len - 1, bin = 0; clen; bin++) clen >>= 1; IOStats.Http.read_hist[bin]++; // update peer response time stats (%hier.peer_http_request_sent; - orig_request->hier.peer_response_time = + const timeval &sent = request->hier.peer_http_request_sent; + request->hier.peer_response_time = sent.tv_sec ? tvSubMsec(sent, current_time) : -1; } /** \par * Here the RFC says we should ignore whitespace between replies, but we can't as * doing so breaks HTTP/0.9 replies beginning with witespace, and in addition * the response splitting countermeasures is extremely likely to trigger on this, * not allowing connection reuse in the first place. */ #if DONT_DO_THIS if (!flags.headers_parsed && len > 0 && fd_table[serverConnection->fd].uses > 1) { /* Skip whitespace between replies */ while (len > 0 && xisspace(*buf)) memmove(buf, buf + 1, len--); if (len == 0) { /* Continue to read... */ /* Timeout NOT increased. This whitespace was from previous reply */ flags.do_next_read = 1; @@ -1232,62 +1207,62 @@ if (!flags.headers_parsed && !eof) { debugs(11, 9, HERE << "needs more at " << readBuf->contentSize()); flags.do_next_read = 1; /** \retval false If we have not finished parsing the headers and may get more data. * Schedules more reads to retrieve the missing data. */ maybeReadVirginBody(); // schedules all kinds of reads; TODO: rename return false; } /** If we are done with parsing, check for errors */ err_type error = ERR_NONE; if (flags.headers_parsed) { // parsed headers, possibly with errors // check for header parsing errors if (HttpReply *vrep = virginReply()) { const http_status s = vrep->sline.status; const HttpVersion &v = vrep->sline.version; if (s == HTTP_INVALID_HEADER && v != HttpVersion(0,9)) { - debugs(11, DBG_IMPORTANT, "WARNING: HTTP: Invalid Response: Bad header encountered from " << entry->url() << " AKA " << orig_request->GetHost() << orig_request->urlpath.termedBuf() ); + debugs(11, DBG_IMPORTANT, "WARNING: HTTP: Invalid Response: Bad header encountered from " << entry->url() << " AKA " << request->GetHost() << request->urlpath.termedBuf() ); error = ERR_INVALID_RESP; } else if (s == HTTP_HEADER_TOO_LARGE) { fwd->dontRetry(true); error = ERR_TOO_BIG; } else { return true; // done parsing, got reply, and no error } } else { // parsed headers but got no reply - debugs(11, DBG_IMPORTANT, "WARNING: HTTP: Invalid Response: No reply at all for " << entry->url() << " AKA " << orig_request->GetHost() << orig_request->urlpath.termedBuf() ); + debugs(11, DBG_IMPORTANT, "WARNING: HTTP: Invalid Response: No reply at all for " << entry->url() << " AKA " << request->GetHost() << request->urlpath.termedBuf() ); error = ERR_INVALID_RESP; } } else { assert(eof); if (readBuf->hasContent()) { error = ERR_INVALID_RESP; - debugs(11, DBG_IMPORTANT, "WARNING: HTTP: Invalid Response: Headers did not parse at all for " << entry->url() << " AKA " << orig_request->GetHost() << orig_request->urlpath.termedBuf() ); + debugs(11, DBG_IMPORTANT, "WARNING: HTTP: Invalid Response: Headers did not parse at all for " << entry->url() << " AKA " << request->GetHost() << request->urlpath.termedBuf() ); } else { error = ERR_ZERO_SIZE_OBJECT; - debugs(11, (orig_request->flags.accelerated?DBG_IMPORTANT:2), "WARNING: HTTP: Invalid Response: No object data received for " << - entry->url() << " AKA " << orig_request->GetHost() << orig_request->urlpath.termedBuf() ); + debugs(11, (request->flags.accelerated?DBG_IMPORTANT:2), "WARNING: HTTP: Invalid Response: No object data received for " << + entry->url() << " AKA " << request->GetHost() << request->urlpath.termedBuf() ); } } assert(error != ERR_NONE); entry->reset(); fwd->fail(errorCon(error, HTTP_BAD_GATEWAY, fwd->request)); flags.do_next_read = 0; serverConnection->close(); return false; // quit on error } /** truncate what we read if we read too much so that writeReplyBody() writes no more than what we should have read */ void HttpStateData::truncateVirginBody() { assert(flags.headers_parsed); HttpReply *vrep = virginReply(); int64_t clen = -1; @@ -1402,55 +1377,55 @@ if (flags.keepalive_broken) { commSetConnTimeout(serverConnection, 10, nil); } else { commSetConnTimeout(serverConnection, Config.Timeout.read, nil); } flags.do_next_read = 1; } break; case COMPLETE_PERSISTENT_MSG: debugs(11, 5, "processReplyBody: COMPLETE_PERSISTENT_MSG from " << serverConnection); /* yes we have to clear all these! */ commUnsetConnTimeout(serverConnection); flags.do_next_read = 0; comm_remove_close_handler(serverConnection->fd, closeHandler); closeHandler = NULL; fwd->unregister(serverConnection); - if (orig_request->flags.spoof_client_ip) - client_addr = orig_request->client_addr; + if (request->flags.spoof_client_ip) + client_addr = request->client_addr; if (request->flags.pinned) { ispinned = true; } else if (request->flags.connection_auth && request->flags.auth_sent) { ispinned = true; } - if (orig_request->pinnedConnection() && ispinned) { - orig_request->pinnedConnection()->pinConnection(serverConnection, orig_request, _peer, + if (request->pinnedConnection() && ispinned) { + request->pinnedConnection()->pinConnection(serverConnection, request, _peer, (request->flags.connection_auth != 0)); } else { - fwd->pconnPush(serverConnection, request->GetHost()); + fwd->pconnPush(serverConnection, request->peer_host ? request->peer_host : request->GetHost()); } serverConnection = NULL; serverComplete(); return; case COMPLETE_NONPERSISTENT_MSG: debugs(11, 5, "processReplyBody: COMPLETE_NONPERSISTENT_MSG from " << serverConnection); serverComplete(); return; } maybeReadVirginBody(); } void HttpStateData::maybeReadVirginBody() { // we may need to grow the buffer if headers do not fit const int minRead = flags.headers_parsed ? 0 :1024; @@ -1512,439 +1487,436 @@ /// successfully wrote the entire request (including body, last-chunk, etc.) void HttpStateData::sendComplete() { /* * Set the read timeout here because it hasn't been set yet. * We only set the read timeout after the request has been * fully written to the server-side. If we start the timeout * after connection establishment, then we are likely to hit * the timeout for POST/PUT requests that have very large * request bodies. */ typedef CommCbMemFunT TimeoutDialer; AsyncCall::Pointer timeoutCall = JobCallback(11, 5, TimeoutDialer, this, HttpStateData::httpTimeout); commSetConnTimeout(serverConnection, Config.Timeout.read, timeoutCall); flags.request_sent = 1; - orig_request->hier.peer_http_request_sent = current_time; + request->hier.peer_http_request_sent = current_time; } // Close the HTTP server connection. Used by serverComplete(). void HttpStateData::closeServer() { debugs(11,5, HERE << "closing HTTP server " << serverConnection << " this " << this); if (Comm::IsConnOpen(serverConnection)) { fwd->unregister(serverConnection); comm_remove_close_handler(serverConnection->fd, closeHandler); closeHandler = NULL; serverConnection->close(); } } bool HttpStateData::doneWithServer() const { return !Comm::IsConnOpen(serverConnection); } /* * Fixup authentication request headers for special cases */ static void -httpFixupAuthentication(HttpRequest * request, HttpRequest * orig_request, const HttpHeader * hdr_in, HttpHeader * hdr_out, http_state_flags flags) +httpFixupAuthentication(HttpRequest * request, const HttpHeader * hdr_in, HttpHeader * hdr_out, http_state_flags flags) { http_hdr_type header = flags.originpeer ? HDR_AUTHORIZATION : HDR_PROXY_AUTHORIZATION; /* Nothing to do unless we are forwarding to a peer */ if (!request->flags.proxying) return; /* Needs to be explicitly enabled */ - if (!orig_request->peer_login) + if (!request->peer_login) return; /* Maybe already dealt with? */ if (hdr_out->has(header)) return; /* Nothing to do here for PASSTHRU */ - if (strcmp(orig_request->peer_login, "PASSTHRU") == 0) + if (strcmp(request->peer_login, "PASSTHRU") == 0) return; /* PROXYPASS is a special case, single-signon to servers with the proxy password (basic only) */ - if (flags.originpeer && strcmp(orig_request->peer_login, "PROXYPASS") == 0 && hdr_in->has(HDR_PROXY_AUTHORIZATION)) { + if (flags.originpeer && strcmp(request->peer_login, "PROXYPASS") == 0 && hdr_in->has(HDR_PROXY_AUTHORIZATION)) { const char *auth = hdr_in->getStr(HDR_PROXY_AUTHORIZATION); if (auth && strncasecmp(auth, "basic ", 6) == 0) { hdr_out->putStr(header, auth); return; } } /* Special mode to pass the username to the upstream cache */ - if (*orig_request->peer_login == '*') { + if (*request->peer_login == '*') { char loginbuf[256]; const char *username = "-"; - if (orig_request->extacl_user.size()) - username = orig_request->extacl_user.termedBuf(); + if (request->extacl_user.size()) + username = request->extacl_user.termedBuf(); #if USE_AUTH - else if (orig_request->auth_user_request != NULL) - username = orig_request->auth_user_request->username(); + else if (request->auth_user_request != NULL) + username = request->auth_user_request->username(); #endif - snprintf(loginbuf, sizeof(loginbuf), "%s%s", username, orig_request->peer_login + 1); + snprintf(loginbuf, sizeof(loginbuf), "%s%s", username, request->peer_login + 1); httpHeaderPutStrf(hdr_out, header, "Basic %s", old_base64_encode(loginbuf)); return; } /* external_acl provided credentials */ - if (orig_request->extacl_user.size() && orig_request->extacl_passwd.size() && - (strcmp(orig_request->peer_login, "PASS") == 0 || - strcmp(orig_request->peer_login, "PROXYPASS") == 0)) { + if (request->extacl_user.size() && request->extacl_passwd.size() && + (strcmp(request->peer_login, "PASS") == 0 || + strcmp(request->peer_login, "PROXYPASS") == 0)) { char loginbuf[256]; snprintf(loginbuf, sizeof(loginbuf), SQUIDSTRINGPH ":" SQUIDSTRINGPH, - SQUIDSTRINGPRINT(orig_request->extacl_user), - SQUIDSTRINGPRINT(orig_request->extacl_passwd)); + SQUIDSTRINGPRINT(request->extacl_user), + SQUIDSTRINGPRINT(request->extacl_passwd)); httpHeaderPutStrf(hdr_out, header, "Basic %s", old_base64_encode(loginbuf)); return; } /* Kerberos login to peer */ #if HAVE_AUTH_MODULE_NEGOTIATE && HAVE_KRB5 && HAVE_GSSAPI - if (strncmp(orig_request->peer_login, "NEGOTIATE",strlen("NEGOTIATE")) == 0) { + if (strncmp(request->peer_login, "NEGOTIATE",strlen("NEGOTIATE")) == 0) { char *Token=NULL; char *PrincipalName=NULL,*p; - if ((p=strchr(orig_request->peer_login,':')) != NULL ) { + if ((p=strchr(request->peer_login,':')) != NULL ) { PrincipalName=++p; } - Token = peer_proxy_negotiate_auth(PrincipalName,request->peer_host); + Token = peer_proxy_negotiate_auth(PrincipalName, request->peer_host); if (Token) { httpHeaderPutStrf(hdr_out, HDR_PROXY_AUTHORIZATION, "Negotiate %s",Token); } return; } #endif /* HAVE_KRB5 && HAVE_GSSAPI */ httpHeaderPutStrf(hdr_out, header, "Basic %s", - old_base64_encode(orig_request->peer_login)); + old_base64_encode(request->peer_login)); return; } /* * build request headers and append them to a given MemBuf * used by buildRequestPrefix() * note: initialised the HttpHeader, the caller is responsible for Clean()-ing */ void HttpStateData::httpBuildRequestHeader(HttpRequest * request, - HttpRequest * orig_request, StoreEntry * entry, HttpHeader * hdr_out, const http_state_flags flags) { /* building buffer for complex strings */ #define BBUF_SZ (MAX_URL+32) LOCAL_ARRAY(char, bbuf, BBUF_SZ); LOCAL_ARRAY(char, ntoabuf, MAX_IPSTRLEN); - const HttpHeader *hdr_in = &orig_request->header; + const HttpHeader *hdr_in = &request->header; const HttpHeaderEntry *e = NULL; HttpHeaderPos pos = HttpHeaderInitPos; assert (hdr_out->owner == hoRequest); /* append our IMS header */ if (request->lastmod > -1) hdr_out->putTime(HDR_IF_MODIFIED_SINCE, request->lastmod); - bool we_do_ranges = decideIfWeDoRanges (orig_request); + bool we_do_ranges = decideIfWeDoRanges (request); String strConnection (hdr_in->getList(HDR_CONNECTION)); while ((e = hdr_in->getEntry(&pos))) - copyOneHeaderFromClientsideRequestToUpstreamRequest(e, strConnection, request, orig_request, hdr_out, we_do_ranges, flags); + copyOneHeaderFromClientsideRequestToUpstreamRequest(e, strConnection, request, hdr_out, we_do_ranges, flags); /* Abstraction break: We should interpret multipart/byterange responses * into offset-length data, and this works around our inability to do so. */ - if (!we_do_ranges && orig_request->multipartRangeRequest()) { + if (!we_do_ranges && request->multipartRangeRequest()) { /* don't cache the result */ - orig_request->flags.cachable = 0; + request->flags.cachable = 0; /* pretend it's not a range request */ - delete orig_request->range; - orig_request->range = NULL; - orig_request->flags.range = 0; + delete request->range; + request->range = NULL; + request->flags.range = 0; } /* append Via */ if (Config.onoff.via) { String strVia; strVia = hdr_in->getList(HDR_VIA); snprintf(bbuf, BBUF_SZ, "%d.%d %s", - orig_request->http_ver.major, - orig_request->http_ver.minor, ThisCache); + request->http_ver.major, + request->http_ver.minor, ThisCache); strListAdd(&strVia, bbuf, ','); hdr_out->putStr(HDR_VIA, strVia.termedBuf()); strVia.clean(); } - if (orig_request->flags.accelerated) { + if (request->flags.accelerated) { /* Append Surrogate-Capabilities */ String strSurrogate(hdr_in->getList(HDR_SURROGATE_CAPABILITY)); #if USE_SQUID_ESI snprintf(bbuf, BBUF_SZ, "%s=\"Surrogate/1.0 ESI/1.0\"", Config.Accel.surrogate_id); #else snprintf(bbuf, BBUF_SZ, "%s=\"Surrogate/1.0\"", Config.Accel.surrogate_id); #endif strListAdd(&strSurrogate, bbuf, ','); hdr_out->putStr(HDR_SURROGATE_CAPABILITY, strSurrogate.termedBuf()); } /** \pre Handle X-Forwarded-For */ if (strcmp(opt_forwarded_for, "delete") != 0) { String strFwd = hdr_in->getList(HDR_X_FORWARDED_FOR); if (strFwd.size() > 65536/2) { // There is probably a forwarding loop with Via detection disabled. // If we do nothing, String will assert on overflow soon. // TODO: Terminate all transactions with huge XFF? strFwd = "error"; static int warnedCount = 0; if (warnedCount++ < 100) { - const char *url = entry ? entry->url() : urlCanonical(orig_request); + const char *url = entry ? entry->url() : urlCanonical(request); debugs(11, 1, "Warning: likely forwarding loop with " << url); } } if (strcmp(opt_forwarded_for, "on") == 0) { /** If set to ON - append client IP or 'unknown'. */ - if ( orig_request->client_addr.IsNoAddr() ) + if ( request->client_addr.IsNoAddr() ) strListAdd(&strFwd, "unknown", ','); else - strListAdd(&strFwd, orig_request->client_addr.NtoA(ntoabuf, MAX_IPSTRLEN), ','); + strListAdd(&strFwd, request->client_addr.NtoA(ntoabuf, MAX_IPSTRLEN), ','); } else if (strcmp(opt_forwarded_for, "off") == 0) { /** If set to OFF - append 'unknown'. */ strListAdd(&strFwd, "unknown", ','); } else if (strcmp(opt_forwarded_for, "transparent") == 0) { /** If set to TRANSPARENT - pass through unchanged. */ } else if (strcmp(opt_forwarded_for, "truncate") == 0) { /** If set to TRUNCATE - drop existing list and replace with client IP or 'unknown'. */ - if ( orig_request->client_addr.IsNoAddr() ) + if ( request->client_addr.IsNoAddr() ) strFwd = "unknown"; else - strFwd = orig_request->client_addr.NtoA(ntoabuf, MAX_IPSTRLEN); + strFwd = request->client_addr.NtoA(ntoabuf, MAX_IPSTRLEN); } if (strFwd.size() > 0) hdr_out->putStr(HDR_X_FORWARDED_FOR, strFwd.termedBuf()); } /** If set to DELETE - do not copy through. */ /* append Host if not there already */ if (!hdr_out->has(HDR_HOST)) { - if (orig_request->peer_domain) { - hdr_out->putStr(HDR_HOST, orig_request->peer_domain); - } else if (orig_request->port == urlDefaultPort(orig_request->protocol)) { + if (request->peer_domain) { + hdr_out->putStr(HDR_HOST, request->peer_domain); + } else if (request->port == urlDefaultPort(request->protocol)) { /* use port# only if not default */ - hdr_out->putStr(HDR_HOST, orig_request->GetHost()); + hdr_out->putStr(HDR_HOST, request->GetHost()); } else { httpHeaderPutStrf(hdr_out, HDR_HOST, "%s:%d", - orig_request->GetHost(), - (int) orig_request->port); + request->GetHost(), + (int) request->port); } } /* append Authorization if known in URL, not in header and going direct */ if (!hdr_out->has(HDR_AUTHORIZATION)) { - if (!request->flags.proxying && *request->login) { + if (!request->flags.proxying && request->login && *request->login) { httpHeaderPutStrf(hdr_out, HDR_AUTHORIZATION, "Basic %s", old_base64_encode(request->login)); } } /* Fixup (Proxy-)Authorization special cases. Plain relaying dealt with above */ - httpFixupAuthentication(request, orig_request, hdr_in, hdr_out, flags); + httpFixupAuthentication(request, hdr_in, hdr_out, flags); /* append Cache-Control, add max-age if not there already */ { HttpHdrCc *cc = hdr_in->getCc(); if (!cc) cc = httpHdrCcCreate(); #if 0 /* see bug 2330 */ /* Set no-cache if determined needed but not found */ - if (orig_request->flags.nocache) + if (request->flags.nocache) EBIT_SET(cc->mask, CC_NO_CACHE); #endif /* Add max-age only without no-cache */ if (!EBIT_TEST(cc->mask, CC_MAX_AGE) && !EBIT_TEST(cc->mask, CC_NO_CACHE)) { const char *url = - entry ? entry->url() : urlCanonical(orig_request); + entry ? entry->url() : urlCanonical(request); httpHdrCcSetMaxAge(cc, getMaxAge(url)); - if (request->urlpath.size()) - assert(strstr(url, request->urlpath.termedBuf())); } /* Enforce sibling relations */ if (flags.only_if_cached) EBIT_SET(cc->mask, CC_ONLY_IF_CACHED); hdr_out->putCc(cc); httpHdrCcDestroy(cc); } /* maybe append Connection: keep-alive */ if (flags.keepalive) { hdr_out->putStr(HDR_CONNECTION, "keep-alive"); } /* append Front-End-Https */ if (flags.front_end_https) { if (flags.front_end_https == 1 || request->protocol == AnyP::PROTO_HTTPS) hdr_out->putStr(HDR_FRONT_END_HTTPS, "On"); } if (flags.chunked_request) { // Do not just copy the original value so that if the client-side // starts decode other encodings, this code may remain valid. hdr_out->putStr(HDR_TRANSFER_ENCODING, "chunked"); } /* Now mangle the headers. */ if (Config2.onoff.mangle_request_headers) httpHdrMangleList(hdr_out, request, ROR_REQUEST); strConnection.clean(); } /** * Decides whether a particular header may be cloned from the received Clients request * to our outgoing fetch request. */ void -copyOneHeaderFromClientsideRequestToUpstreamRequest(const HttpHeaderEntry *e, const String strConnection, HttpRequest * request, const HttpRequest * orig_request, HttpHeader * hdr_out, const int we_do_ranges, const http_state_flags flags) +copyOneHeaderFromClientsideRequestToUpstreamRequest(const HttpHeaderEntry *e, const String strConnection, const HttpRequest * request, HttpHeader * hdr_out, const int we_do_ranges, const http_state_flags flags) { debugs(11, 5, "httpBuildRequestHeader: " << e->name << ": " << e->value ); switch (e->id) { /** \par RFC 2616 sect 13.5.1 - Hop-by-Hop headers which Squid should not pass on. */ case HDR_PROXY_AUTHORIZATION: /** \par Proxy-Authorization: * Only pass on proxy authentication to peers for which * authentication forwarding is explicitly enabled */ - if (!flags.originpeer && flags.proxying && orig_request->peer_login && - (strcmp(orig_request->peer_login, "PASS") == 0 || - strcmp(orig_request->peer_login, "PROXYPASS") == 0 || - strcmp(orig_request->peer_login, "PASSTHRU") == 0)) { + if (!flags.originpeer && flags.proxying && request->peer_login && + (strcmp(request->peer_login, "PASS") == 0 || + strcmp(request->peer_login, "PROXYPASS") == 0 || + strcmp(request->peer_login, "PASSTHRU") == 0)) { hdr_out->addEntry(e->clone()); } break; /** \par RFC 2616 sect 13.5.1 - Hop-by-Hop headers which Squid does not pass on. */ case HDR_CONNECTION: /** \par Connection: */ case HDR_TE: /** \par TE: */ case HDR_KEEP_ALIVE: /** \par Keep-Alive: */ case HDR_PROXY_AUTHENTICATE: /** \par Proxy-Authenticate: */ case HDR_TRAILER: /** \par Trailer: */ case HDR_UPGRADE: /** \par Upgrade: */ case HDR_TRANSFER_ENCODING: /** \par Transfer-Encoding: */ break; /** \par OTHER headers I haven't bothered to track down yet. */ case HDR_AUTHORIZATION: /** \par WWW-Authorization: * Pass on WWW authentication */ if (!flags.originpeer) { hdr_out->addEntry(e->clone()); } else { /** \note In accelerators, only forward authentication if enabled * (see also httpFixupAuthentication for special cases) */ - if (orig_request->peer_login && - (strcmp(orig_request->peer_login, "PASS") == 0 || - strcmp(orig_request->peer_login, "PASSTHRU") == 0 || - strcmp(orig_request->peer_login, "PROXYPASS") == 0)) { + if (request->peer_login && + (strcmp(request->peer_login, "PASS") == 0 || + strcmp(request->peer_login, "PASSTHRU") == 0 || + strcmp(request->peer_login, "PROXYPASS") == 0)) { hdr_out->addEntry(e->clone()); } } break; case HDR_HOST: /** \par Host: * Normally Squid rewrites the Host: header. * However, there is one case when we don't: If the URL * went through our redirector and the admin configured * 'redir_rewrites_host' to be off. */ - if (orig_request->peer_domain) - hdr_out->putStr(HDR_HOST, orig_request->peer_domain); + if (request->peer_domain) + hdr_out->putStr(HDR_HOST, request->peer_domain); else if (request->flags.redirected && !Config.onoff.redir_rewrites_host) hdr_out->addEntry(e->clone()); else { /* use port# only if not default */ - if (orig_request->port == urlDefaultPort(orig_request->protocol)) { - hdr_out->putStr(HDR_HOST, orig_request->GetHost()); + if (request->port == urlDefaultPort(request->protocol)) { + hdr_out->putStr(HDR_HOST, request->GetHost()); } else { httpHeaderPutStrf(hdr_out, HDR_HOST, "%s:%d", - orig_request->GetHost(), - (int) orig_request->port); + request->GetHost(), + (int) request->port); } } break; case HDR_IF_MODIFIED_SINCE: /** \par If-Modified-Since: * append unless we added our own; * \note at most one client's ims header can pass through */ if (!hdr_out->has(HDR_IF_MODIFIED_SINCE)) hdr_out->addEntry(e->clone()); break; case HDR_MAX_FORWARDS: /** \par Max-Forwards: * pass only on TRACE or OPTIONS requests */ - if (orig_request->method == METHOD_TRACE || orig_request->method == METHOD_OPTIONS) { + if (request->method == METHOD_TRACE || request->method == METHOD_OPTIONS) { const int64_t hops = e->getInt64(); if (hops > 0) hdr_out->putInt64(HDR_MAX_FORWARDS, hops - 1); } break; case HDR_VIA: /** \par Via: * If Via is disabled then forward any received header as-is. * Otherwise leave for explicit updated addition later. */ if (!Config.onoff.via) hdr_out->addEntry(e->clone()); break; case HDR_RANGE: @@ -1982,180 +1954,193 @@ if (!flags.front_end_https) hdr_out->addEntry(e->clone()); break; default: /** \par default. * pass on all other header fields * which are NOT listed by the special Connection: header. */ if (strConnection.size()>0 && strListIsMember(&strConnection, e->name.termedBuf(), ',')) { debugs(11, 2, "'" << e->name << "' header cropped by Connection: definition"); return; } hdr_out->addEntry(e->clone()); } } bool -HttpStateData::decideIfWeDoRanges (HttpRequest * orig_request) +HttpStateData::decideIfWeDoRanges (HttpRequest * request) { bool result = true; /* decide if we want to do Ranges ourselves * and fetch the whole object now) * We want to handle Ranges ourselves iff * - we can actually parse client Range specs * - the specs are expected to be simple enough (e.g. no out-of-order ranges) * - reply will be cachable * (If the reply will be uncachable we have to throw it away after * serving this request, so it is better to forward ranges to * the server and fetch only the requested content) */ - int64_t roffLimit = orig_request->getRangeOffsetLimit(); + int64_t roffLimit = request->getRangeOffsetLimit(); - if (NULL == orig_request->range || !orig_request->flags.cachable - || orig_request->range->offsetLimitExceeded(roffLimit) || orig_request->flags.connection_auth) + if (NULL == request->range || !request->flags.cachable + || request->range->offsetLimitExceeded(roffLimit) || request->flags.connection_auth) result = false; debugs(11, 8, "decideIfWeDoRanges: range specs: " << - orig_request->range << ", cachable: " << - orig_request->flags.cachable << "; we_do_ranges: " << result); + request->range << ", cachable: " << + request->flags.cachable << "; we_do_ranges: " << result); return result; } /* build request prefix and append it to a given MemBuf; * return the length of the prefix */ mb_size_t -HttpStateData::buildRequestPrefix(HttpRequest * aRequest, - HttpRequest * original_request, - StoreEntry * sentry, - MemBuf * mb) +HttpStateData::buildRequestPrefix(MemBuf * mb) { const int offset = mb->size; HttpVersion httpver(1,1); + const char * url; + if (_peer && !_peer->options.originserver) + url = entry->url(); + else + url = request->urlpath.termedBuf(); mb->Printf("%s %s HTTP/%d.%d\r\n", - RequestMethodStr(aRequest->method), - aRequest->urlpath.size() ? aRequest->urlpath.termedBuf() : "/", + RequestMethodStr(request->method), + url && *url ? url : "/", httpver.major,httpver.minor); /* build and pack headers */ { HttpHeader hdr(hoRequest); Packer p; - httpBuildRequestHeader(aRequest, original_request, sentry, &hdr, flags); + httpBuildRequestHeader(request, entry, &hdr, flags); - if (aRequest->flags.pinned && aRequest->flags.connection_auth) - aRequest->flags.auth_sent = 1; + if (request->flags.pinned && request->flags.connection_auth) + request->flags.auth_sent = 1; else if (hdr.has(HDR_AUTHORIZATION)) - aRequest->flags.auth_sent = 1; + request->flags.auth_sent = 1; packerToMemInit(&p, mb); hdr.packInto(&p); hdr.clean(); packerClean(&p); } /* append header terminator */ mb->append(crlf, 2); return mb->size - offset; } /* This will be called when connect completes. Write request. */ bool HttpStateData::sendRequest() { MemBuf mb; debugs(11, 5, HERE << serverConnection << ", request " << request << ", this " << this << "."); if (!Comm::IsConnOpen(serverConnection)) { debugs(11,3, HERE << "cannot send request to closing " << serverConnection); assert(closeHandler != NULL); return false; } typedef CommCbMemFunT TimeoutDialer; AsyncCall::Pointer timeoutCall = JobCallback(11, 5, TimeoutDialer, this, HttpStateData::httpTimeout); commSetConnTimeout(serverConnection, Config.Timeout.lifetime, timeoutCall); flags.do_next_read = 1; maybeReadVirginBody(); - if (orig_request->body_pipe != NULL) { + if (request->body_pipe != NULL) { if (!startRequestBodyFlow()) // register to receive body data return false; typedef CommCbMemFunT Dialer; requestSender = JobCallback(11,5, Dialer, this, HttpStateData::sentRequestBody); Must(!flags.chunked_request); // use chunked encoding if we do not know the length - if (orig_request->content_length < 0) + if (request->content_length < 0) flags.chunked_request = 1; } else { assert(!requestBodySource); typedef CommCbMemFunT Dialer; requestSender = JobCallback(11,5, Dialer, this, HttpStateData::wroteLast); } if (_peer != NULL) { if (_peer->options.originserver) { flags.proxying = 0; flags.originpeer = 1; } else { flags.proxying = 1; flags.originpeer = 0; } } else { flags.proxying = 0; flags.originpeer = 0; } /* * Is keep-alive okay for all request methods? */ - if (orig_request->flags.must_keepalive) + if (request->flags.must_keepalive) flags.keepalive = 1; else if (!Config.onoff.server_pconns) flags.keepalive = 0; else if (_peer == NULL) flags.keepalive = 1; else if (_peer->stats.n_keepalives_sent < 10) flags.keepalive = 1; else if ((double) _peer->stats.n_keepalives_recv / (double) _peer->stats.n_keepalives_sent > 0.50) flags.keepalive = 1; if (_peer) { + /*The old code here was + if (neighborType(_peer, request) == PEER_SIBLING && ... + which is equivalent to: + if (neighborType(_peer, NULL) == PEER_SIBLING && ... + or better: + if (((_peer->type == PEER_MULTICAST && p->options.mcast_siblings) || + _peer->type == PEER_SIBLINGS ) && _peer->options.allow_miss) + flags.only_if_cached = 1; + + But I suppose it was a bug + */ if (neighborType(_peer, request) == PEER_SIBLING && !_peer->options.allow_miss) flags.only_if_cached = 1; flags.front_end_https = _peer->front_end_https; } mb.init(); request->peer_host=_peer?_peer->host:NULL; - buildRequestPrefix(request, orig_request, entry, &mb); + buildRequestPrefix(&mb); debugs(11, 6, HERE << serverConnection << ":\n" << mb.buf); Comm::Write(serverConnection, &mb, requestSender); return true; } bool HttpStateData::getMoreRequestBody(MemBuf &buf) { // parent's implementation can handle the no-encoding case if (!flags.chunked_request) return ServerStateData::getMoreRequestBody(buf); MemBuf raw; Must(requestBodySource != NULL); if (!requestBodySource->getMoreData(raw)) return false; // no request body bytes to chunk yet // optimization: pre-allocate buffer size that should be enough @@ -2269,41 +2254,41 @@ // more origin request body data is available void HttpStateData::handleMoreRequestBodyAvailable() { if (eof || !Comm::IsConnOpen(serverConnection)) { // XXX: we should check this condition in other callbacks then! // TODO: Check whether this can actually happen: We should unsubscribe // as a body consumer when the above condition(s) are detected. debugs(11, 1, HERE << "Transaction aborted while reading HTTP body"); return; } assert(requestBodySource != NULL); if (requestBodySource->buf().hasContent()) { // XXX: why does not this trigger a debug message on every request? if (flags.headers_parsed && !flags.abuse_detected) { flags.abuse_detected = 1; - debugs(11, 1, "http handleMoreRequestBodyAvailable: Likely proxy abuse detected '" << orig_request->client_addr << "' -> '" << entry->url() << "'" ); + debugs(11, 1, "http handleMoreRequestBodyAvailable: Likely proxy abuse detected '" << request->client_addr << "' -> '" << entry->url() << "'" ); if (virginReply()->sline.status == HTTP_INVALID_HEADER) { serverConnection->close(); return; } } } HttpStateData::handleMoreRequestBodyAvailable(); } // premature end of the request body void HttpStateData::handleRequestBodyProducerAborted() { ServerStateData::handleRequestBodyProducerAborted(); if (entry->isEmpty()) { debugs(11, 3, "request body aborted: " << serverConnection); ErrorState *err; // We usually get here when ICAP REQMOD aborts during body processing. @@ -2328,26 +2313,20 @@ ServerStateData::sentRequestBody(io); } // Quickly abort the transaction // TODO: destruction should be sufficient as the destructor should cleanup, // including canceling close handlers void HttpStateData::abortTransaction(const char *reason) { debugs(11,5, HERE << "aborting transaction for " << reason << "; " << serverConnection << ", this " << this); if (Comm::IsConnOpen(serverConnection)) { serverConnection->close(); return; } fwd->handleUnregisteredServerEnd(); deleteThis("HttpStateData::abortTransaction"); } - -HttpRequest * -HttpStateData::originalRequest() -{ - return orig_request; -} === modified file 'src/http.h' --- src/http.h 2011-05-24 10:44:39 +0000 +++ src/http.h 2011-06-24 15:17:07 +0000 @@ -32,72 +32,68 @@ */ #ifndef SQUID_HTTP_H #define SQUID_HTTP_H #include "StoreIOBuffer.h" #include "comm.h" #include "comm/forward.h" #include "forward.h" #include "Server.h" #include "ChunkedCodingParser.h" class HttpStateData : public ServerStateData { public: HttpStateData(FwdState *); ~HttpStateData(); static void httpBuildRequestHeader(HttpRequest * request, - HttpRequest * orig_request, StoreEntry * entry, HttpHeader * hdr_out, const http_state_flags flags); virtual const Comm::ConnectionPointer & dataConnection() const; /* should be private */ bool sendRequest(); void processReplyHeader(); void processReplyBody(); void readReply(const CommIoCbParams &io); virtual void maybeReadVirginBody(); // read response data from the network int cacheableReply(); peer *_peer; /* peer request made to */ int eof; /* reached end-of-object? */ int lastChunk; /* reached last chunk of a chunk-encoded reply */ - HttpRequest *orig_request; http_state_flags flags; size_t read_sz; int header_bytes_read; // to find end of response, int64_t reply_bytes_read; // without relying on StoreEntry int body_bytes_truncated; // positive when we read more than we wanted MemBuf *readBuf; bool ignoreCacheControl; bool surrogateNoStore; void processSurrogateControl(HttpReply *); protected: - virtual HttpRequest *originalRequest(); - void processReply(); void proceedAfter1xx(); void handle1xx(HttpReply *msg); private: /** * The current server connection. * Maybe open, closed, or NULL. * Use doneWithServer() to check if the server is available for use. */ Comm::ConnectionPointer serverConnection; AsyncCall::Pointer closeHandler; enum ConnectionStatus { INCOMPLETE_MSG, COMPLETE_PERSISTENT_MSG, COMPLETE_NONPERSISTENT_MSG }; ConnectionStatus statusIfComplete() const; ConnectionStatus persistentConnStatus() const; void keepaliveAccounting(HttpReply *); @@ -111,33 +107,30 @@ virtual void closeServer(); // end communication with the server virtual bool doneWithServer() const; // did we end communication? virtual void abortTransaction(const char *reason); // abnormal termination // consuming request body virtual void handleMoreRequestBodyAvailable(); virtual void handleRequestBodyProducerAborted(); void writeReplyBody(); bool decodeAndWriteReplyBody(); bool finishingBrokenPost(); bool finishingChunkedRequest(); void doneSendingRequestBody(); void requestBodyHandler(MemBuf &); virtual void sentRequestBody(const CommIoCbParams &io); void wroteLast(const CommIoCbParams &io); void sendComplete(); void httpStateConnClosed(const CommCloseCbParams ¶ms); void httpTimeout(const CommTimeoutCbParams ¶ms); - mb_size_t buildRequestPrefix(HttpRequest * request, - HttpRequest * orig_request, - StoreEntry * entry, - MemBuf * mb); + mb_size_t buildRequestPrefix(MemBuf * mb); static bool decideIfWeDoRanges (HttpRequest * orig_request); bool peerSupportsConnectionPinning() const; ChunkedCodingParser *httpChunkDecoder; private: CBDATA_CLASS2(HttpStateData); }; #endif /* SQUID_HTTP_H */ === modified file 'src/tunnel.cc' --- src/tunnel.cc 2011-06-24 05:07:06 +0000 +++ src/tunnel.cc 2011-06-24 15:14:44 +0000 @@ -648,41 +648,40 @@ peerSelect(&(tunnelState->serverDestinations), request, NULL, tunnelPeerSelectComplete, tunnelState); } static void tunnelRelayConnectRequest(const Comm::ConnectionPointer &srv, void *data) { TunnelStateData *tunnelState = (TunnelStateData *)data; HttpHeader hdr_out(hoRequest); Packer p; http_state_flags flags; debugs(26, 3, HERE << srv << ", tunnelState=" << tunnelState); memset(&flags, '\0', sizeof(flags)); flags.proxying = tunnelState->request->flags.proxying; MemBuf mb; mb.init(); mb.Printf("CONNECT %s HTTP/1.1\r\n", tunnelState->url); HttpStateData::httpBuildRequestHeader(tunnelState->request, - tunnelState->request, NULL, /* StoreEntry */ &hdr_out, flags); /* flags */ packerToMemInit(&p, &mb); hdr_out.packInto(&p); hdr_out.clean(); packerClean(&p); mb.append("\r\n", 2); AsyncCall::Pointer writeCall = commCbCall(5,5, "tunnelConnectedWriteDone", CommIoCbPtrFun(tunnelConnectedWriteDone, tunnelState)); Comm::Write(srv, &mb, writeCall); AsyncCall::Pointer timeoutCall = commCbCall(5, 4, "tunnelTimeout", CommTimeoutCbPtrFun(tunnelTimeout, tunnelState)); commSetConnTimeout(srv, Config.Timeout.read, timeoutCall); } static void tunnelPeerSelectComplete(Comm::ConnectionList *peer_paths, void *data)