--------------------- PatchSet 2197 Date: 2001/05/04 13:43:39 Author: rbcollins Branch: newhttp Tag: (none) Log: first block from server processed Members: src/HttpRequest.c:1.1.1.3.8.2.4.2.2.1->1.1.1.3.8.2.4.2.2.2 src/broker.c:1.1.2.5->1.1.2.6 src/client_side.c:1.1.1.3.4.1.4.15.2.34.2.4->1.1.1.3.4.1.4.15.2.34.2.5 src/http.c:1.1.1.3.4.1.4.12.2.16.2.4->1.1.1.3.4.1.4.12.2.16.2.5 src/structs.h:1.1.1.3.4.1.4.12.2.26.2.4->1.1.1.3.4.1.4.12.2.26.2.5 Index: squid/src/HttpRequest.c =================================================================== RCS file: /cvsroot/squid-sf//squid/src/HttpRequest.c,v retrieving revision 1.1.1.3.8.2.4.2.2.1 retrieving revision 1.1.1.3.8.2.4.2.2.2 diff -u -r1.1.1.3.8.2.4.2.2.1 -r1.1.1.3.8.2.4.2.2.2 --- squid/src/HttpRequest.c 1 May 2001 09:02:51 -0000 1.1.1.3.8.2.4.2.2.1 +++ squid/src/HttpRequest.c 4 May 2001 13:43:39 -0000 1.1.1.3.8.2.4.2.2.2 @@ -1,6 +1,6 @@ /* - * $Id: HttpRequest.c,v 1.1.1.3.8.2.4.2.2.1 2001/05/01 09:02:51 rbcollins Exp $ + * $Id: HttpRequest.c,v 1.1.1.3.8.2.4.2.2.2 2001/05/04 13:43:39 rbcollins Exp $ * * DEBUG: section 73 HTTP Request * AUTHOR: Duane Wessels @@ -125,13 +125,14 @@ } /* packs request-line and headers, appends terminator */ +/* FIXME: HTTP version should be a parameter */ void httpRequestPack(const request_t * req, Packer * p) { assert(req && p); /* pack request-line */ packerPrintf(p, "%s %s HTTP/1.0\r\n", - RequestMethodStr[req->method], strBuf(req->urlpath)); + RequestMethodStr[req->method], strBuf(req->urlpath) ? strBuf(req->urlpath) : "/" ); /* headers */ httpHeaderPackInto(&req->header, p); /* trailer */ Index: squid/src/broker.c =================================================================== RCS file: /cvsroot/squid-sf//squid/src/Attic/broker.c,v retrieving revision 1.1.2.5 retrieving revision 1.1.2.6 diff -u -r1.1.2.5 -r1.1.2.6 --- squid/src/broker.c 2 May 2001 08:22:05 -0000 1.1.2.5 +++ squid/src/broker.c 4 May 2001 13:43:39 -0000 1.1.2.6 @@ -1,6 +1,6 @@ /* - * $Id: broker.c,v 1.1.2.5 2001/05/02 08:22:05 rbcollins Exp $ + * $Id: broker.c,v 1.1.2.6 2001/05/04 13:43:39 rbcollins Exp $ * * DEBUG: section 33 Client-side Routines * AUTHOR: Robert Collins @@ -404,7 +404,7 @@ filterChainAdd(&http->repfilters, identity_body, broker_callback_hdr ,NULL,http); temp_filter=BrokerState->filters->node.next->data; rvflags = temp_filter->filter_hdr(BrokerState->rep,BrokerState->request,BrokerState->filter_list,temp_filter, BrokerState->flags, temp_filter->data); - debug (3,3)("hroker got rvflags of %0x when forwarding the request\n",rvflags); + debug (3,3)("hroker got rvflags of %0x when forwarding the request; state data http is %0p\n",rvflags, http); cbdataUnlock(BrokerState); return; break; Index: squid/src/client_side.c =================================================================== RCS file: /cvsroot/squid-sf//squid/src/client_side.c,v retrieving revision 1.1.1.3.4.1.4.15.2.34.2.4 retrieving revision 1.1.1.3.4.1.4.15.2.34.2.5 diff -u -r1.1.1.3.4.1.4.15.2.34.2.4 -r1.1.1.3.4.1.4.15.2.34.2.5 --- squid/src/client_side.c 1 May 2001 23:32:18 -0000 1.1.1.3.4.1.4.15.2.34.2.4 +++ squid/src/client_side.c 4 May 2001 13:43:39 -0000 1.1.1.3.4.1.4.15.2.34.2.5 @@ -1,6 +1,6 @@ /* - * $Id: client_side.c,v 1.1.1.3.4.1.4.15.2.34.2.4 2001/05/01 23:32:18 rbcollins Exp $ + * $Id: client_side.c,v 1.1.1.3.4.1.4.15.2.34.2.5 2001/05/04 13:43:39 rbcollins Exp $ * * DEBUG: section 33 Client-side Routines * AUTHOR: Duane Wessels @@ -3155,7 +3155,7 @@ clientHttpRequest *http=data; request_t *r = http->request; /* Trace with a hop count of 0 is a local method */ - if (r->max_forwards < 1) { + if (r->method == METHOD_TRACE && r->max_forwards < 1) { /* set the source and start the flow */ filterChainAdd(&http->repfilters, http_trace_body,http_trace_hdr,NULL,http); Index: squid/src/http.c =================================================================== RCS file: /cvsroot/squid-sf//squid/src/http.c,v retrieving revision 1.1.1.3.4.1.4.12.2.16.2.4 retrieving revision 1.1.1.3.4.1.4.12.2.16.2.5 diff -u -r1.1.1.3.4.1.4.12.2.16.2.4 -r1.1.1.3.4.1.4.12.2.16.2.5 --- squid/src/http.c 3 May 2001 14:09:30 -0000 1.1.1.3.4.1.4.12.2.16.2.4 +++ squid/src/http.c 4 May 2001 13:43:39 -0000 1.1.1.3.4.1.4.12.2.16.2.5 @@ -1,6 +1,6 @@ /* - * $Id: http.c,v 1.1.1.3.4.1.4.12.2.16.2.4 2001/05/03 14:09:30 rbcollins Exp $ + * $Id: http.c,v 1.1.1.3.4.1.4.12.2.16.2.5 2001/05/04 13:43:39 rbcollins Exp $ * * DEBUG: section 11 Hypertext Transfer Protocol (HTTP) * AUTHOR: Harvest Derived @@ -56,29 +56,58 @@ static void httpMaybeRemovePublic(StoreEntry *, http_status); /* this will go in a header when this file becomes a module */ + +typedef struct _iobuf iobuf; +typedef struct _buf4k buf4k; + struct _HttpStateData { StoreEntry *entry; request_t *request; char *reply_hdr; size_t reply_hdr_size; int reply_hdr_state; - size_t written; - size_t bodysize; peer *peer; /* peer request made to */ int eof; /* reached end-of-object? */ request_t *orig_request; int fd; http_state_flags flags; FwdState *fwd; - dlink_list filters; + /* filter_hdr variables */ + HttpReply *rep; + clientHttpRequest *hcrequest; + /* filter data variables */ + /* buf is not needed - immediately used */ + /* offset is needed. */ + /* size isn't - its returned by the CWCB */ + /* filter variables */ + dlink_list * filter_list; + FILTER_list * filters; + unsigned int filterflags; + dlink_list oldfilters; /* How far into the network object are we? */ size_t read_offset; /* this should be for the ReplyHeaders filter */ char *headerstore; size_t headerlength; + iobuf *readbuf; +}; + +struct _iobuf { + /* used when passing an already alloced buffer to an io routine */ + off_t offset; + /* sizeof(*buffer) */ + size_t size; + /* well duh! */ + void * buffer; +}; + +struct _buf4k { + char data[4096]; }; CBDATA_TYPE(HttpStateData); +CBDATA_TYPE(iobuf); +CBDATA_TYPE(buf4k); /* temporary function */ void @@ -110,7 +139,7 @@ #endif if (httpState == NULL) return; - filterCleanChain(&httpState->filters); + filterCleanChain(&httpState->oldfilters); storeUnlockObject(httpState->entry); if (httpState->reply_hdr) { memFree(httpState->reply_hdr, MEM_8K_BUF); @@ -795,16 +824,16 @@ httpProcessReplyHeader(httpState, buf, len); if (httpState->reply_hdr_state == 2) { http_status s = entry->mem_obj->reply->sline.status; - te_build_decode_xlate_list(&entry->mem_obj->reply->header, &httpState->filters); + te_build_decode_xlate_list(&entry->mem_obj->reply->header, &httpState->oldfilters); /* test for EOF condition (has to happen after content decoding! */ - filterChainAddTail(&httpState->filters,httpPconnTransferDone,identity_header, NULL,httpState); + filterChainAddTail(&httpState->oldfilters,httpPconnTransferDone,identity_header, NULL,httpState); /* apply user configured filters */ - filterBuildChain(Config.response_filters, &httpState->filters, NULL, entry->mem_obj->reply , request); + filterBuildChain(Config.response_filters, &httpState->oldfilters, NULL, entry->mem_obj->reply , request); /* Send the data to the store manager */ - filterChainAddTail(&httpState->filters,httpDoAppend,identity_header, NULL, httpState); + filterChainAddTail(&httpState->oldfilters,httpDoAppend,identity_header, NULL, httpState); /* process the headers */ // filterChainAddTail(&httpState->filters,httpDoAppendHeaders, identity_header, NULL,httpState); @@ -860,9 +889,9 @@ } debug(1,1)("sending combined headers\n"); /* todo: split this into two calls like it is below */ - assert(httpState->filters.head); - temp_filter=httpState->filters.head->data; - rvflags |= temp_filter->filter(httpState->headerstore, httpState->reply_hdr_size, -1, &httpState->filters, temp_filter, flags | FILTER_HTTP_HEADER, temp_filter->data); + assert(httpState->oldfilters.head); + temp_filter=httpState->oldfilters.head->data; + rvflags |= temp_filter->filter(httpState->headerstore, httpState->reply_hdr_size, -1, &httpState->oldfilters, temp_filter, flags | FILTER_HTTP_HEADER, temp_filter->data); httpState->read_offset=0; if (httpState->headerlength-httpState->reply_hdr_size) { @@ -874,7 +903,7 @@ debug(1,1)("headers (%d bytes) written, sending body (%d of %d)\n", httpState->reply_hdr_size, httpState->headerlength-httpState->reply_hdr_size,httpState->headerlength); - rvflags |= temp_filter->filter(httpState->headerstore+httpState->reply_hdr_size, httpState->headerlength-httpState->reply_hdr_size, -1, &httpState->filters, temp_filter, flags , temp_filter->data); + rvflags |= temp_filter->filter(httpState->headerstore+httpState->reply_hdr_size, httpState->headerlength-httpState->reply_hdr_size, -1, &httpState->oldfilters, temp_filter, flags , temp_filter->data); httpState->read_offset+=len-hdr_len; } else debug(1,1)("headers (%d bytes) written, skipping body (%d of %d) due to filter return flags %d\n",httpState->reply_hdr_size,0,httpState->headerlength, rvflags); @@ -884,10 +913,10 @@ /* no partial headers, got them in one chunk */ debug(1,1)("headers in one packet... sending %d bytes\n", hdr_len); - assert(httpState->filters.head); - temp_filter=httpState->filters.head->data; + assert(httpState->oldfilters.head); + temp_filter=httpState->oldfilters.head->data; rvflags |= temp_filter->filter(buf, hdr_len, -1, - &httpState->filters, temp_filter, flags | FILTER_HTTP_HEADER, + &httpState->oldfilters, temp_filter, flags | FILTER_HTTP_HEADER, temp_filter->data); httpState->read_offset=0; if (!(rvflags & (FILTER_EOF | FILTER_ABORT))) { @@ -895,7 +924,7 @@ hdr_len, len-hdr_len,len); if (len-hdr_len) { - rvflags |= temp_filter->filter(buf+hdr_len,len-hdr_len, httpState->read_offset, &httpState->filters, temp_filter, flags , temp_filter->data); + rvflags |= temp_filter->filter(buf+hdr_len,len-hdr_len, httpState->read_offset, &httpState->oldfilters, temp_filter, flags , temp_filter->data); httpState->read_offset+=len-hdr_len; } } else @@ -907,9 +936,9 @@ /* headers have been seen. */ debug(1,1)("headers previously written \n"); - assert(httpState->filters.head); - temp_filter=httpState->filters.head->data; - rvflags |= temp_filter->filter(buf, len, httpState->read_offset, &httpState->filters, temp_filter, flags, temp_filter->data); + assert(httpState->oldfilters.head); + temp_filter=httpState->oldfilters.head->data; + rvflags |= temp_filter->filter(buf, len, httpState->read_offset, &httpState->oldfilters, temp_filter, flags, temp_filter->data); httpState->read_offset+=len; } debug(1,1)("*** HTTP RAW READ OFFSET NOW %d\n",httpState->read_offset); @@ -1330,22 +1359,440 @@ comm_write_mbuf(httpState->fd, mb, sendHeaderDone, httpState); } +/* we've got data from the server. It might be a new request,or part of an existing + * request. Later on we can look at different read callbacks for in progress requests + */ +static void +httpCommReadComplete(int fd, char *buf, off_t offset, size_t size, int howmuch, int flag, + void *data) +{ + HttpStateData *http=data; + size_t headerlen; + /* we can't have a reply yet! - take me out when the first packet gets through */ + assert(!http->rep); + assert(http->readbuf); + assert(http->readbuf->buffer); + assert(offset==http->readbuf->offset); + assert(size=http->readbuf->size); + /* we may have fun parsing pipelined responses without memcpy - we really need + length = how much is in the buffer from offset on + offset = our current start point in the buffer (incremented as we process responses + size = the buffer size + buf = the buffer data + or possibly, move size into buf... + */ + if (http->rep) + /* possibly just a case of call the filter chain... */ + fatal("reply present - don't know how to handle this\n"); + + headerlen = headersEnd(buf+offset, size); + if (headerlen) { + http->rep=httpReplyCreate(); + if (httpReplyParse(http->rep, buf+offset, headerlen)) { + /* process the request */ + /* TODO: filter the server-side only things (ie connection header...) */ + /* TODO 2: add the server-side filters (ie TE) */ + FILTER_list *temp_filter; + unsigned int rvflags, eofflag; + temp_filter=http->hcrequest->repfilters.head->data; + http->readbuf->offset+=howmuch; + /* FIXME: this is broken if the server writes the headers in a different + *packet to the first body data: we need to check the status line */ + eofflag = http->readbuf->offset-headerlen ? FILTER_EOF : 0; + rvflags = temp_filter->filter_hdr(http->rep, http->hcrequest ,&http->hcrequest->repfilters,temp_filter, eofflag, temp_filter->data); + debug (33,3)("Process request got flags %0x from rep_header\n", rvflags); + if (http->readbuf->offset-headerlen + && !(rvflags & (FILTER_ABORT | FILTER_EOF))) { + temp_filter=http->hcrequest->repfilters.head->data; + /* offset total length in buffer */ + rvflags=temp_filter->filter(http->readbuf->buffer+http->readbuf->offset+headerlen-howmuch,http->readbuf->offset-headerlen,0,&http->hcrequest->repfilters, + temp_filter,0,temp_filter->data); + debug (33,3)("Process request got flags %0x from rep_filter\n", rvflags); + } + /* all data written */ + cbdataUnlock(http->readbuf); /* FIXME: make iobuf unlock the buffer */ + http->readbuf=NULL; + + + } + } else { + /* couldn't parse the reply */ + http->readbuf->offset+=howmuch; + if (http->readbuf->offset >= http->readbuf->size) + fatalf("more than buffer size (%d) without a parsable response header\n",http->readbuf->size); + } + /* it's up to the filter chain to schedule another read */ +} + +/* push the body up to the server */ +DATAFILTER_FILTER(http_server_req_body) +{ + /* check for EOF or content length overrun */ + /* check offset is valid. (=httpstate offset) + update httpstate offset - it is not updated by the commcomplete routine */ + return 0; +} + +/* we could check for request->content_length... but what if that gets... corrupted? + */ +DATAFILTER_FILTER(http_server_unexpected_body) +{ + fatal("unexpected body.. abort request/reply cleanly here\n"); + return 0; +} + +/* the header has been called.. should never be again + * remove me once stable code + */ +DATAFILTER_FILTERHEADER(http_trap) +{ + fatal("We should never have been called\n"); + return 0; +} + +static CWCB httpCommWriteComplete; +static CRCB httpCommReadComplete; + +/* we are the last in the filter chain. Always. */ +/* Unknowns: how do we detect early server writes: for example if the server rejects our + * header (ie content_length too long) + */ +static void +httpCommWriteComplete(int fd, char *bufnotused, size_t size, int errflag, void *data) +{ + HttpStateData *http=data; + iobuf *readbuf; + if (http->hcrequest->request->content_length && ~(http->filterflags & FILTER_EOF )) { + /* there is a body.. and we haven't finished */ +// hcrequest->read(http->hcrequest); + } + /* there is no more body */ + /* allocate a buffer. This is annoying: could comm_read allocate it for us? + * (when we pass a NULL buf) */ + CBDATA_INIT_TYPE(iobuf); + CBDATA_INIT_TYPE(buf4k); + readbuf = cbdataAlloc(iobuf); + assert(readbuf); + memset(readbuf,'\0', sizeof(iobuf)); + readbuf->size = sizeof(buf4k); + readbuf->buffer = cbdataAlloc(buf4k); + assert(readbuf->buffer); + cbdataLock(readbuf->buffer); + /* this has to be empty ! */ + assert(!http->readbuf); + http->readbuf=readbuf; + cbdataLock(http->readbuf); + comm_read(http->fd, readbuf->buffer, readbuf->offset, readbuf->size, httpCommReadComplete, http); +} + +/* helper function for http_server_req. + * We modify the original request. Why? speed. + * This raises a general issue: + * + * the common-to-all-upstream headers should be processed once. + * then we process the per-upstream headers on each forward. + * this either requires a filter for the one-time which we remove, or + * a flag in the httpstate/request variable. + * TODO: DO THIS BEFORE THE FATALS RECOMMENDED BELOW + */ +void +httpPrepareRequestHeader(request_t * request, + http_state_flags flags) +{ + /* building buffer for complex strings */ +#define BBUF_SZ (MAX_URL+32) + LOCAL_ARRAY(char, bbuf, BBUF_SZ); + String strConnection = StringNull; + int we_do_ranges; + const HttpHeaderEntry *e; + const int hops; + String strVia; + String strFwd; + HttpHeaderPos pos = HttpHeaderInitPos; + +#if 0 + httpHeaderInit(hdr_out, hoRequest); + /* append our IMS header */ + if (request->lastmod > -1 && request->method == METHOD_GET) + httpHeaderPutTime(hdr_out, HDR_IF_MODIFIED_SINCE, request->lastmod); +#endif + + /* decide if we want to do Ranges ourselves + * (and fetch the whole object now) + * We want to handle Ranges ourselves iff + * - we can actually parse client Range specs + * - the specs are expected to be simple enough (e.g. no out-of-order ranges) + * - reply will be cachable + * (If the reply will be uncachable we have to throw it away after + * serving this request, so it is better to forward ranges to + * the server and fetch only the requested content) + */ + +/* Dump the headers - debugging purposes only*/ +{ + HttpHeaderPos pos = HttpHeaderInitPos; + const HttpHeaderEntry *e; + debug(55, 7) ("packing hdr: (%p)\n", request->header); + /* pack all entries one by one */ + while ((e = httpHeaderGetEntry(&request->header, &pos))) +printf("%s: %s\r\n",strBuf(e->name),strBuf(e->value)); +} + + if (NULL == request->range) + we_do_ranges = 0; + else if (!request->flags.cachable) + we_do_ranges = 0; + else if (httpHdrRangeOffsetLimit(request->range)) + we_do_ranges = 0; + else + we_do_ranges = 1; + debug(11, 8) ("httpPrepareRequestHeader: range specs: %p, cachable: %d; we_do_ranges: %d +\n", + request->range, request->flags.cachable, we_do_ranges); + +/* remove any and all headers we don't want here - so that the loop isn't broken */ +if (httpHeaderHas(&request->header, HDR_MAX_FORWARDS)) { + hops = httpHeaderGetInt(&request->header, HDR_MAX_FORWARDS); + httpHeaderDelById(&request->header, HDR_MAX_FORWARDS); + /* sacrificing efficiency over clarity, etc. */ + /* why don't we check for 0? because we are never reached for 0 */ + httpHeaderPutInt(&request->header, HDR_MAX_FORWARDS, hops - 1); +} +if (we_do_ranges) { + httpHeaderDelById(&request->header, HDR_RANGE); + httpHeaderDelById(&request->header, HDR_IF_RANGE); + httpHeaderDelById(&request->header, HDR_REQUEST_RANGE); +} +httpHeaderDelById(&request->header, HDR_PROXY_CONNECTION); +httpHeaderDelById(&request->header, HDR_CONNECTION); + + strConnection = httpHeaderGetList(&request->header, HDR_CONNECTION); + while ((e = httpHeaderGetEntry(&request->header, &pos))) { + debug(11, 5) ("httpPrepareRequestHeader: %s: %s\n", + strBuf(e->name), strBuf(e->value)); + if (!httpRequestHdrAllowed(e, &strConnection)) { + debug(11, 2) ("'%s' header denied by anonymize_headers configuration\n", + strBuf(e->name)); + continue; + } + switch (e->id) { + case HDR_PROXY_AUTHORIZATION: + /* FIXME: client side todo: remove the auth if it's not needed. + * Then we simply test: is it present? no->add peer credentials */ + /* Only pass on proxy authentication to peers for which + * authentication forwarding is explicitly enabled + */ +#if 0 + if (request->flags.proxying && request->peer_login && + strcmp(request->peer_login, "PASS") == 0) { + httpHeaderAddEntry(hdr_out, httpHeaderEntryClone(e)); + } + +#endif + break; + case HDR_AUTHORIZATION: + /* FIXME: is there _ever_ anything to do here? */ + /* Pass on WWW authentication even if used locally. If this is + * not wanted in an accelerator then the header can be removed + * using the anonymization functions + */ +#if 0 + httpHeaderAddEntry(hdr_out, httpHeaderEntryClone(e)); +#endif + /* XXX Some accelerators might want to strip the header + * and regard the reply as cacheable, but authentication + * is not normally enabled for accelerators without reading + * the code, so there is not much use in adding logics here + * without first defining the concept of having authentication + * in the accelerator... + */ + break; + case HDR_HOST: + /* + * Normally Squid does not copy the Host: header from + * a client request into the forwarded request headers. + * However, there is one case when we do: If the URL + * went through our redirector and the admin configured + * 'redir_rewrites_host' to be off. + */ + /* FIXME: this seems broken: host is a required header for + * HTTP/1.1 + */ +#if 0 + if (request->flags.redirected) + if (!Config.onoff.redir_rewrites_host) + httpHeaderAddEntry(hdr_out, httpHeaderEntryClone(e)); +#endif + break; + case HDR_IF_MODIFIED_SINCE: + /* append unless we added our own; + * note: at most one client's ims header can pass through */ + /* FIXME: nothing to see, move along. (The cache adds this header itself + */ +#if 0 + if (!httpHeaderHas(&request->header, HDR_IF_MODIFIED_SINCE)) + httpHeaderAddEntry(&request->header, httpHeaderEntryClone(e)); +#endif + break; + case HDR_MAX_FORWARDS: + break; + case HDR_RANGE: + case HDR_IF_RANGE: + case HDR_REQUEST_RANGE: + /* we inherit the client range request */ + /* if we do the header logic, we have to delete the existing header */ +#if 0 + if (!we_do_ranges) + httpHeaderAddEntry(hdr_out, httpHeaderEntryClone(e)); +#endif + break; + case HDR_PROXY_CONNECTION: + /* FIXME: update client side to clean this, then fatal here to indicate a + * broken client module + */ + httpHeaderDelById(&request->header, HDR_PROXY_CONNECTION); + break; + case HDR_CONNECTION: + /* DITTO */ + httpHeaderDelById(&request->header, HDR_CONNECTION); + break; + case HDR_VIA: + case HDR_X_FORWARDED_FOR: + case HDR_CACHE_CONTROL: + /* append these after the loop if needed */ + /* FIXME: check what of these we are willing to pass on */ + break; + default: + /* pass on all other header fields */ + } + } + + /* append TE */ + { + char strTE[128]; + + /* FIXME: update client_side, then fatal here if the header is present */ + httpHeaderDelById(&request->header, HDR_TE); /* hop by hop.. that's what last + hop could do*/ + /* TODO: this should be built from a list of known types & a acl allowing type + * to be used in requests vs responses + */ + strcpy (strTE,"chunked;q=1.0"); + + httpHeaderPutStr (&request->header,HDR_TE,strTE); + httpHeaderPutStr (&request->header,HDR_CONNECTION, "TE"); /* its hop by hop */ + + } + + /* append Via */ + /* DOES THIS WORK?! or do we need to delete and add the header ? */ + strVia = httpHeaderGetList(&request->header, HDR_VIA); + /* this is broken: we want out http version don't we? */ + snprintf(bbuf, BBUF_SZ, "%d.%d %s", + request->http_ver.major, + request->http_ver.minor, ThisCache); + strListAdd(&strVia, bbuf, ','); + stringClean(&strVia); +#if 0 + strVia = httpHeaderGetList(&request->header, HDR_VIA); + snprintf(bbuf, BBUF_SZ, "%d.%d %s", + orig_request->http_ver.major, + orig_request->http_ver.minor, ThisCache); + strListAdd(&strVia, bbuf, ','); + httpHeaderPutStr(hdr_out, HDR_VIA, strBuf(strVia)); + stringClean(&strVia); +#endif + +#if 0 + /* the client side should choose to add this. We cannot depend on having a + * client. (think timed cache refreshes for instance) + */ + + /* append X-Forwarded-For */ + strFwd = httpHeaderGetList(hdr_in, HDR_X_FORWARDED_FOR); + strListAdd(&strFwd, (cfd < 0 ? "unknown" : fd_table[cfd].ipaddr), ','); + httpHeaderPutStr(hdr_out, HDR_X_FORWARDED_FOR, strBuf(strFwd)); + stringClean(&strFwd); +#endif + + /* append Host if not there already */ + if (!httpHeaderHas(&request->header, HDR_HOST)) { + /* use port# only if not default */ + if (request->port == urlDefaultPort(request->protocol)) { + httpHeaderPutStr(&request->header, HDR_HOST, request->host); + } else { + httpHeaderPutStrf(&request->header, HDR_HOST, "%s:%d", + request->host, (int) request->port); + } + } + /* append Authorization if known in URL, not in header and going direct */ + if (!httpHeaderHas(&request->header, HDR_AUTHORIZATION)) { + if (!request->flags.proxying && *request->login) { + httpHeaderPutStrf(&request->header, HDR_AUTHORIZATION, "Basic %s", + base64_encode(request->login)); + } + } + /* Remember: the client side strips this if not needed */ + /* append Proxy-Authorization if configured for peer, and proxying */ + if (request->flags.proxying && request->peer_login && + !httpHeaderHas(&request->header, HDR_PROXY_AUTHORIZATION) && + strcmp(request->peer_login, "PASS") != 0) { + if (*request->peer_login == '*') { + /* Special mode, to pass the username to the upstream cache */ + char loginbuf[256]; + char *username = "-"; + if (request->auth_user_request) + username = authenticateUserRequestUsername(request->auth_user_request); + snprintf(loginbuf, sizeof(loginbuf), "%s%s", username, request->peer_login + 1); + httpHeaderPutStrf(&request->header, HDR_PROXY_AUTHORIZATION, "Basic %s", + base64_encode(loginbuf)); + } else { + httpHeaderPutStrf(&request->header, HDR_PROXY_AUTHORIZATION, "Basic %s", + base64_encode(request->peer_login)); + } + } + + /* maybe append Connection: keep-alive */ + /* what about connection: TE ? ? -rob*/ + if (flags.keepalive) { + if (flags.proxying) { + httpHeaderPutStr(&request->header, HDR_PROXY_CONNECTION, "keep-alive"); + } else { + httpHeaderPutStr(&request->header, HDR_CONNECTION, "keep-alive"); + } + } + /* Now mangle the headers. */ + httpHdrMangleList(&request->header, request); + stringClean(&strConnection); +} + + /* we're attached to an fd, and have a request to make */ unsigned int -http_server_req(HttpReply *rep, httpClientRequest *request, +http_server_req(HttpReply *rep, clientHttpRequest *request, dlink_list * filter_list,FILTER_list * filters, unsigned int flags, void *data) { - cfd=-1; - /* todo: - build a request - if we expect a body, attach the body filter, otherwise attach a dummy spitting - filter. - write the request - remove us from the list (we've done our bit). - -->the header function only get called once.. duh! - */ - fatal("end of code! (about to send request \n"); + HttpStateData *http=data; + int cfd=-1; /* we don't care about the client fd */ + MemBuf mb; + /* FIXME: check we haven't been called to cleanup _before_ the write completed */ + httpPrepareRequestHeader(request->request, http->flags); + mb = HttpRequestPackMemBuf(request->request); + + if (request->request->content_length) + filterChainAddTail(filter_list, http_server_req_body, http_trap, NULL, data); + else + filterChainAddTail(filter_list, http_server_unexpected_body, http_trap, NULL, data); + comm_write_mbuf(http->fd, mb, httpCommWriteComplete, data); + printf("\n===\n%s===\n",mb.buf); + http->rep=rep; + http->hcrequest=request; + http->filter_list=filter_list; + http->filters=filters->node.next->data; + http->filterflags=flags; + dlinkDelete(&filters->node, filter_list); + xfree(filters); } @@ -1377,7 +1824,7 @@ else if ((double) httpState->peer->stats.n_keepalives_recv / (double) httpState->peer->stats.n_keepalives_sent > 0.50) httpState->flags.keepalive = 1; if (httpState->peer && - (neighborType(httpState->peer, httpState->request) == PEER_SIBLING && + neighborType(httpState->peer, httpState->request) == PEER_SIBLING && !httpState->peer->options.allow_miss) httpState->flags.only_if_cached = 1; @@ -1394,7 +1841,7 @@ * This may not be correct anymore: thanks to filters, one cancel should percolate * through anyway. FIXME */ - comm_add_close_handler(fd, httpStateFree, httpState); + comm_add_close_handler(httpState->fd, httpStateFree, httpState); statCounter.server.all.requests++; statCounter.server.http.requests++; return httpState; Index: squid/src/structs.h =================================================================== RCS file: /cvsroot/squid-sf//squid/src/structs.h,v retrieving revision 1.1.1.3.4.1.4.12.2.26.2.4 retrieving revision 1.1.1.3.4.1.4.12.2.26.2.5 diff -u -r1.1.1.3.4.1.4.12.2.26.2.4 -r1.1.1.3.4.1.4.12.2.26.2.5 --- squid/src/structs.h 3 May 2001 14:09:30 -0000 1.1.1.3.4.1.4.12.2.26.2.4 +++ squid/src/structs.h 4 May 2001 13:43:39 -0000 1.1.1.3.4.1.4.12.2.26.2.5 @@ -1,6 +1,6 @@ /* - * $Id: structs.h,v 1.1.1.3.4.1.4.12.2.26.2.4 2001/05/03 14:09:30 rbcollins Exp $ + * $Id: structs.h,v 1.1.1.3.4.1.4.12.2.26.2.5 2001/05/04 13:43:39 rbcollins Exp $ * * * SQUID Web Proxy Cache http://www.squid-cache.org/ @@ -1599,6 +1599,7 @@ char *canonical; int link_count; /* free when zero */ request_flags flags; + ssize_t content_length; /* -1 means post no size known, 0 means no body/zero length body, >0 means known content_length */ HttpHdrCc *cache_control; HttpHdrRange *range; http_version_t http_ver; @@ -1611,7 +1612,6 @@ unsigned short my_port; HttpHeader header; ConnStateData *body_connection; /* used by clientReadBody() */ - int content_length; HierarchyLogEntry hier; err_type err_type; char *peer_login; /* Configured peer login:password */