Close idle client connections associated with closed idle pinned connections. Squid was not monitoring idle persistent connections pinned to servers. Squid would discover that the pinned server connection is closed only after receiving a new request on the idle client connection and trying to write that request to the server. In such cases, Squid propagates the pinned connection closure to the client (as it should). Chrome and, to a lesser extent, Firefox handle such races by opening a new connection and resending the failed [idempotent] request transparently to the user. However, IE usually displays an error page to the user. While some pconn races cannot be avoided, without monitoring idle pconns, Squid virtually guaranteed such a race in environments where origin server idle connection timeout is smaller than client/Squid timeouts and users are revisiting pages in the window between those two timeouts. Squid now monitors idle pinned connections similar to idle connections in the pconn pool and closes the corresponding idle client connection to keep the two sides in sync (to the extent possible). === modified file 'src/client_side.cc' --- src/client_side.cc 2013-06-03 22:39:37 +0000 +++ src/client_side.cc 2013-08-16 23:04:51 +0000 @@ -4410,90 +4410,141 @@ ClientSocketContext::Pointer context = getCurrentContext(); if (context != NULL) { context->writeControlMsg(msg); // will call msg.cbSuccess return; } debugs(33, 3, HERE << " closing due to missing context for 1xx"); clientConnection->close(); } /// Our close handler called by Comm when the pinned connection is closed void ConnStateData::clientPinnedConnectionClosed(const CommCloseCbParams &io) { // FwdState might repin a failed connection sooner than this close // callback is called for the failed connection. assert(pinning.serverConnection == io.conn); pinning.closeHandler = NULL; // Comm unregisters handlers before calling const bool sawZeroReply = pinning.zeroReply; // reset when unpinning unpinConnection(); - if (sawZeroReply) { + if (sawZeroReply && clientConnection != NULL) { debugs(33, 3, "Closing client connection on pinned zero reply."); clientConnection->close(); } } void ConnStateData::pinConnection(const Comm::ConnectionPointer &pinServer, HttpRequest *request, CachePeer *aPeer, bool auth) { char desc[FD_DESC_SZ]; if (Comm::IsConnOpen(pinning.serverConnection)) { - if (pinning.serverConnection->fd == pinServer->fd) + if (pinning.serverConnection->fd == pinServer->fd) { + startPinnedConnectionMonitoring(); return; + } } unpinConnection(); // closes pinned connection, if any, and resets fields pinning.serverConnection = pinServer; debugs(33, 3, HERE << pinning.serverConnection); // when pinning an SSL bumped connection, the request may be NULL const char *pinnedHost = "[unknown]"; if (request) { pinning.host = xstrdup(request->GetHost()); pinning.port = request->port; pinnedHost = pinning.host; } else { pinning.port = pinServer->remote.GetPort(); } pinning.pinned = true; if (aPeer) pinning.peer = cbdataReference(aPeer); pinning.auth = auth; char stmp[MAX_IPSTRLEN]; snprintf(desc, FD_DESC_SZ, "%s pinned connection for %s (%d)", (auth || !aPeer) ? pinnedHost : aPeer->name, clientConnection->remote.ToURL(stmp,MAX_IPSTRLEN), clientConnection->fd); fd_note(pinning.serverConnection->fd, desc); typedef CommCbMemFunT Dialer; pinning.closeHandler = JobCallback(33, 5, Dialer, this, ConnStateData::clientPinnedConnectionClosed); // remember the pinned connection so that cb does not unpin a fresher one typedef CommCloseCbParams Params; Params ¶ms = GetCommParams(pinning.closeHandler); params.conn = pinning.serverConnection; comm_add_close_handler(pinning.serverConnection->fd, pinning.closeHandler); + + startPinnedConnectionMonitoring(); +} + +/// Assign a read handler to an idle pinned connection so that we can detect connection closures. +void +ConnStateData::startPinnedConnectionMonitoring() +{ + if (pinning.readHandler != NULL) + return; // already monitoring + + typedef CommCbMemFunT Dialer; + pinning.readHandler = JobCallback(33, 3, + Dialer, this, ConnStateData::clientPinnedConnectionRead); + static char unusedBuf[8]; + comm_read(pinning.serverConnection, unusedBuf, sizeof(unusedBuf), pinning.readHandler); +} + +void +ConnStateData::stopPinnedConnectionMonitoring() +{ + if (pinning.readHandler != NULL) { + comm_read_cancel(pinning.serverConnection->fd, pinning.readHandler); + pinning.readHandler = NULL; + } +} + +/// Our read handler called by Comm when the server either closes an idle pinned connection or +/// perhaps unexpectedly sends something on that idle (from Squid p.o.v.) connection. +void +ConnStateData::clientPinnedConnectionRead(const CommIoCbParams &io) +{ + pinning.readHandler = NULL; // Comm unregisters handlers before calling + + if (io.flag == COMM_ERR_CLOSING) + return; // close handler will clean up + + debugs(33, 3, "idle pinned " << pinning.serverConnection << " read " << io.size << + (!currentobject ? " with idle client" : "")); + + assert(pinning.serverConnection == io.conn); + pinning.serverConnection->close(); + + // If we are still sending data to the client, do not close now. When we are done sending, + // ClientSocketContext::keepaliveNextRequest() checks pinning.serverConnection and will close. + // However, if we are idle, then we must close to inform the idle client and minimize races. + // We could use getConcurrentRequestCount() instead of currentobject, but the latter is faster. + if (!currentobject && clientConnection != NULL) + clientConnection->close(); } const Comm::ConnectionPointer ConnStateData::validatePinnedConnection(HttpRequest *request, const CachePeer *aPeer) { debugs(33, 7, HERE << pinning.serverConnection); bool valid = true; if (!Comm::IsConnOpen(pinning.serverConnection)) valid = false; if (pinning.auth && request && strcasecmp(pinning.host, request->GetHost()) != 0) { valid = false; } if (request && pinning.port != request->port) { valid = false; } if (pinning.peer && !cbdataReferenceValid(pinning.peer)) { valid = false; } if (aPeer != pinning.peer) { === modified file 'src/client_side.h' --- src/client_side.h 2013-02-19 20:46:31 +0000 +++ src/client_side.h 2013-08-16 23:00:45 +0000 @@ -248,40 +248,41 @@ * used by the owner of the connection, opaque otherwise * TODO: generalise the connection owner concept. */ ClientSocketContext::Pointer currentobject; Ip::Address log_addr; int nrequests; struct { bool readMore; ///< needs comm_read (for this request or new requests) bool swanSang; // XXX: temporary flag to check proper cleanup } flags; struct { Comm::ConnectionPointer serverConnection; /* pinned server side connection */ char *host; /* host name of pinned connection */ int port; /* port of pinned connection */ bool pinned; /* this connection was pinned */ bool auth; /* pinned for www authentication */ bool zeroReply; ///< server closed w/o response (ERR_ZERO_SIZE_OBJECT) CachePeer *peer; /* CachePeer the connection goes via */ + AsyncCall::Pointer readHandler; ///< detects serverConnection closure AsyncCall::Pointer closeHandler; /*The close handler for pinned server side connection*/ } pinning; AnyP::PortCfg *port; bool transparent() const; bool reading() const; void stopReading(); ///< cancels comm_read if it is scheduled /// true if we stopped receiving the request const char *stoppedReceiving() const { return stoppedReceiving_; } /// true if we stopped sending the response const char *stoppedSending() const { return stoppedSending_; } /// note request receiving error and close as soon as we write the response void stopReceiving(const char *error); /// note response sending error and close as soon as we read the request void stopSending(const char *error); void expectNoForwarding(); ///< cleans up virgin request [body] forwarding state @@ -313,40 +314,43 @@ */ CachePeer *pinnedPeer() const {return pinning.peer;} bool pinnedAuth() const {return pinning.auth;} // pining related comm callbacks void clientPinnedConnectionClosed(const CommCloseCbParams &io); // comm callbacks void clientReadRequest(const CommIoCbParams &io); void connStateClosed(const CommCloseCbParams &io); void requestTimeout(const CommTimeoutCbParams ¶ms); // AsyncJob API virtual bool doneAll() const { return BodyProducer::doneAll() && false;} virtual void swanSong(); /// Changes state so that we close the connection and quit after serving /// the client-side-detected error response instead of getting stuck. void quitAfterError(HttpRequest *request); // meant to be private + /// The caller assumes responsibility for connection closure detection. + void stopPinnedConnectionMonitoring(); + #if USE_SSL /// called by FwdState when it is done bumping the server void httpsPeeked(Comm::ConnectionPointer serverConnection); /// Start to create dynamic SSL_CTX for host or uses static port SSL context. void getSslContextStart(); /** * Done create dynamic ssl certificate. * * \param[in] isNew if generated certificate is new, so we need to add this certificate to storage. */ void getSslContextDone(SSL_CTX * sslContext, bool isNew = false); /// Callback function. It is called when squid receive message from ssl_crtd. static void sslCrtdHandleReplyWrapper(void *data, char *reply); /// Proccess response from ssl_crtd. void sslCrtdHandleReply(const char * reply); void switchToHttps(HttpRequest *request, Ssl::BumpMode bumpServerMode); bool switchedToHttps() const { return switchedToHttps_; } Ssl::ServerBump *serverBump() {return sslServerBump;} @@ -360,40 +364,43 @@ /// and create the key for storing/retrieve the certificate to/from the cache void buildSslCertGenerationParams(Ssl::CertificateProperties &certProperties); /// Called when the client sends the first request on a bumped connection. /// Returns false if no [delayed] error should be written to the client. /// Otherwise, writes the error to the client and returns true. Also checks /// for SQUID_X509_V_ERR_DOMAIN_MISMATCH on bumped requests. bool serveDelayedError(ClientSocketContext *context); Ssl::BumpMode sslBumpMode; ///< ssl_bump decision (Ssl::bumpEnd if n/a). #else bool switchedToHttps() const { return false; } #endif protected: void startDechunkingRequest(); void finishDechunkingRequest(bool withSuccess); void abortChunkedRequestBody(const err_type error); err_type handleChunkedRequestBody(size_t &putSize); + void startPinnedConnectionMonitoring(); + void clientPinnedConnectionRead(const CommIoCbParams &io); + private: int connReadWasError(comm_err_t flag, int size, int xerrno); int connFinishedWithConn(int size); void clientAfterReadingRequests(); private: HttpParser parser_; // XXX: CBDATA plays with public/private and leaves the following 'private' fields all public... :( #if USE_SSL bool switchedToHttps_; /// The SSL server host name appears in CONNECT request or the server ip address for the intercepted requests String sslConnectHostOrIp; ///< The SSL server host name as passed in the CONNECT request String sslCommonName; ///< CN name for SSL certificate generation String sslBumpCertKey; ///< Key to use to store/retrieve generated certificate /// HTTPS server cert. fetching state for bump-ssl-server-first Ssl::ServerBump *sslServerBump; Ssl::CertSignAlgorithm signAlgorithm; ///< The signing algorithm to use === modified file 'src/forward.cc' --- src/forward.cc 2013-06-18 22:32:52 +0000 +++ src/forward.cc 2013-08-16 20:19:28 +0000 @@ -1124,40 +1124,41 @@ if (serverDestinations[0]->getPeer() && request->flags.sslBumped) { debugs(50, 4, "fwdConnectStart: Ssl bumped connections through parrent proxy are not allowed"); ErrorState *anErr = new ErrorState(ERR_CANNOT_FORWARD, HTTP_SERVICE_UNAVAILABLE, request); fail(anErr); self = NULL; // refcounted return; } request->flags.pinned = 0; // XXX: what if the ConnStateData set this to flag existing credentials? // XXX: answer: the peer selection *should* catch it and give us only the pinned peer. so we reverse the =0 step below. // XXX: also, logs will now lie if pinning is broken and leads to an error message. if (serverDestinations[0]->peerType == PINNED) { ConnStateData *pinned_connection = request->pinnedConnection(); debugs(17,7, "pinned peer connection: " << pinned_connection); // pinned_connection may become nil after a pconn race if (pinned_connection) serverConn = pinned_connection->validatePinnedConnection(request, serverDestinations[0]->getPeer()); else serverConn = NULL; if (Comm::IsConnOpen(serverConn)) { + pinned_connection->stopPinnedConnectionMonitoring(); flags.connected_okay = true; #if 0 if (!serverConn->getPeer()) serverConn->peerType = HIER_DIRECT; #endif ++n_tries; request->flags.pinned = 1; if (pinned_connection->pinnedAuth()) request->flags.auth = 1; comm_add_close_handler(serverConn->fd, fwdServerClosedWrapper, this); // the server may close the pinned connection before this request pconnRace = racePossible; dispatch(); return; } // Pinned connection failure. debugs(17,2,HERE << "Pinned connection failed: " << pinned_connection); ErrorState *anErr = new ErrorState(ERR_ZERO_SIZE_OBJECT, HTTP_SERVICE_UNAVAILABLE, request); fail(anErr); self = NULL; // refcounted