=== added file 'errors/templates/ERR_CONFLICT_HOST' --- errors/templates/ERR_CONFLICT_HOST 1970-01-01 00:00:00 +0000 +++ errors/templates/ERR_CONFLICT_HOST 2012-01-16 12:08:08 +0000 @@ -0,0 +1,43 @@ + + + +ERROR: The requested URL could not be retrieved + + +
+

ERROR

+

The requested URL could not be retrieved

+
+
+ +
+

The following error was encountered while trying to retrieve the URL: %U

+ +
+
URI Host Conflict
+
+ +

This means the domain name you are trying to access apparently no longer exists on the machine you are requesting it from.

+ +

Some possible problems are:

+ + +

Your cache administrator is %w.

+
+
+ +
+ + === modified file 'src/HttpRequest.cc' --- src/HttpRequest.cc 2011-09-14 18:21:46 +0000 +++ src/HttpRequest.cc 2012-01-21 05:15:00 +0000 @@ -605,6 +605,13 @@ bool HttpRequest::cacheable() const { + // intercepted request with Host: header which cannot be trusted + // because if failed verification, or someone bypassed the security tests. + // we cannot cache the reponse for sharing between clients + // TODO: update cache to store for particular clients only (going to same Host: and destination IP) + if (!flags.hostVerified && (flags.intercepted || flags.spoof_client_ip)) + return false; + if (protocol == AnyP::PROTO_HTTP) return httpCachable(method); === modified file 'src/cf.data.pre' --- src/cf.data.pre 2012-01-06 20:41:21 +0000 +++ src/cf.data.pre 2012-01-21 04:35:21 +0000 @@ -1754,6 +1754,35 @@ authority of the origin server or gateway given by the original URL". DOC_END +NAME: host_verify_loose +IFDEF: USE_HTTP_VIOLATIONS +TYPE: onoff +DEFAULT: off +LOC: Config.onoff.hostVerifyLoose +DOC_START + Regardless of this option setting, when dealing with intercepted + traffic, Squid always verifies that the destination IP address matches + the Host header domain or IP (called 'authority form URL'). + + When set to OFF (the default) Squid responds with an HTTP 409 (Conflict) + error page and logs a security warning if there is no match. + + When set to ON, Squid allows the suspicious request to continue but + logs a security warning and blocks caching of the response by Squid. + For now it also forces these requests to go DIRECT to the original + destination, overriding client_dst_passthru for requests which fail + Host: verification. + + This option violates a MUST-level requirement in RFC 2616 section 14.23: + "The Host field value MUST represent the naming authority + of the origin server or gateway given by the original URL". + + SECURITY WARNING: + This option allows web page scripts to bypass client browser + same-origin security and insert arbitrary content into the + browser cache. +DOC_END + NAME: client_dst_passthru TYPE: onoff DEFAULT: on === modified file 'src/client_side_request.cc' --- src/client_side_request.cc 2011-12-31 04:54:06 +0000 +++ src/client_side_request.cc 2012-01-17 12:56:56 +0000 @@ -553,6 +553,20 @@ void ClientRequestContext::hostHeaderVerifyFailed(const char *A, const char *B) { + // IP address validation for Host: failed. Admin wants to ignore them. + if (Config.onoff.hostVerifyLoose) { + debugs(85, 3, "SECURITY ALERT: Host header forgery detected on " << http->getConn()->clientConnection << + " (" << A << " does not match " << B << ") on URL: " << urlCanonical(http->request)); + + // NP: it is tempting to use 'flags.nocache' but that is all about READing cache data. + // The problems here are about WRITE for new cache content, which means flags.cachable + http->request->flags.cachable = 0; // MUST NOT cache (for now) + // XXX: when we have updated the cache key to base on raw-IP + URI this cacheable limit can go. + http->request->flags.hierarchical = 1; // MUST NOT pass to peers (for now) + // XXX: when we have sorted out the best way to relay requests properly to peers this hierarchical limit can go. + return; + } + debugs(85, DBG_IMPORTANT, "SECURITY ALERT: Host header forgery detected on " << http->getConn()->clientConnection << " (" << A << " does not match " << B << ")"); debugs(85, DBG_IMPORTANT, "SECURITY ALERT: By user agent: " << http->request->header.getStr(HDR_USER_AGENT)); @@ -562,7 +576,7 @@ clientStreamNode *node = (clientStreamNode *)http->client_stream.tail->prev->data; clientReplyContext *repContext = dynamic_cast(node->data.getRaw()); assert (repContext); - repContext->setReplyToError(ERR_INVALID_REQ, HTTP_CONFLICT, + repContext->setReplyToError(ERR_CONFLICT_HOST, HTTP_CONFLICT, http->request->method, NULL, http->getConn()->clientConnection->remote, http->request, @@ -657,6 +671,7 @@ } else { // Okay no problem. debugs(85, 3, HERE << "validate passed."); + http->request->flags.hostVerified = 1; http->doCallouts(); } safe_free(hostB); === modified file 'src/err_type.h' --- src/err_type.h 2011-12-30 16:01:37 +0000 +++ src/err_type.h 2012-01-17 12:58:32 +0000 @@ -35,6 +35,7 @@ ERR_INVALID_URL, ERR_ZERO_SIZE_OBJECT, ERR_PRECONDITION_FAILED, + ERR_CONFLICT_HOST, /* FTP Errors */ ERR_FTP_DISABLED, === modified file 'src/forward.cc' --- src/forward.cc 2011-11-28 01:39:47 +0000 +++ src/forward.cc 2012-01-15 12:01:24 +0000 @@ -120,8 +120,9 @@ // Bug 3243: CVE 2009-0801 // Bypass of browser same-origin access control in intercepted communication // To resolve this we must force DIRECT and only to the original client destination. - if (Config.onoff.client_dst_passthru && request && !request->flags.redirected && - (request->flags.intercepted || request->flags.spoof_client_ip)) { + const bool isIntercepted = request && !request->flags.redirected && (request->flags.intercepted || request->flags.spoof_client_ip); + const bool useOriginalDst = Config.onoff.client_dst_passthru || (request && !request->flags.hostVerified); + if (isIntercepted && useOriginalDst) { Comm::ConnectionPointer p = new Comm::Connection(); p->remote = clientConn->local; p->peerType = ORIGINAL_DST; === modified file 'src/structs.h' --- src/structs.h 2012-01-06 20:41:21 +0000 +++ src/structs.h 2012-01-21 04:35:21 +0000 @@ -459,6 +459,7 @@ int memory_cache_first; int memory_cache_disk; int hostStrictVerify; + int hostVerifyLoose; int client_dst_passthru; } onoff; @@ -963,7 +964,9 @@ struct request_flags { - request_flags(): range(0),nocache(0),ims(0),auth(0),cachable(0),hierarchical(0),loopdetect(0),proxy_keepalive(0),proxying(0),refresh(0),redirected(0),need_validation(0),fail_on_validation_err(0),stale_if_hit(0),accelerated(0),ignore_cc(0),intercepted(0),spoof_client_ip(0),internal(0),internalclient(0),must_keepalive(0),chunked_reply(0),stream_error(0),sslBumped(0),destinationIPLookedUp_(0) { + request_flags(): range(0),nocache(0),ims(0),auth(0),cachable(0),hierarchical(0),loopdetect(0),proxy_keepalive(0),proxying(0),refresh(0),redirected(0),need_validation(0),fail_on_validation_err(0),stale_if_hit(0),accelerated(0),ignore_cc(0),intercepted(0), + hostVerified(0), + spoof_client_ip(0),internal(0),internalclient(0),must_keepalive(0),chunked_reply(0),stream_error(0),sslBumped(0),destinationIPLookedUp_(0) { #if USE_HTTP_VIOLATIONS nocache_hack = 0; #endif @@ -973,10 +976,10 @@ } unsigned int range:1; - unsigned int nocache:1; + unsigned int nocache:1; ///< whether the response to this request may be READ from cache unsigned int ims:1; unsigned int auth:1; - unsigned int cachable:1; + unsigned int cachable:1; ///< whether the response to thie request may be stored in the cache unsigned int hierarchical:1; unsigned int loopdetect:1; unsigned int proxy_keepalive:1; @@ -992,7 +995,8 @@ #endif unsigned int accelerated:1; unsigned int ignore_cc:1; - unsigned int intercepted:1; /**< transparently intercepted request */ + unsigned int intercepted:1; ///< intercepted request + unsigned int hostVerified:1; ///< whether the Host: header passed verification unsigned int spoof_client_ip:1; /**< spoof client ip if possible */ unsigned int internal:1; unsigned int internalclient:1;