/* * Curl_urldecode() URL decodes the given string. * * Optionally detects control characters (byte codes lower than 32) in the * data and rejects such data. * * Returns a pointer to a malloced string in *ostring with length given in * *olen. If length == 0, the length is assumed to be strlen(string). * */ CURLcode Curl_urldecode(struct SessionHandle *data, const char *string, size_t length, char **ostring, size_t *olen, bool reject_ctrl) { size_t alloc = (length?length:strlen(string))+1; char *ns = malloc(alloc); unsigned char in; size_t strindex=0; unsigned long hex; CURLcode res; if(!ns) return CURLE_OUT_OF_MEMORY; while(--alloc > 0) { in = *string; if(('%' == in) && (alloc > 2) && ISXDIGIT(string[1]) && ISXDIGIT(string[2])) { /* this is two hexadecimal digits following a '%' */ char hexstr[3]; char *ptr; hexstr[0] = string[1]; hexstr[1] = string[2]; hexstr[2] = 0; hex = strtoul(hexstr, &ptr, 16); in = curlx_ultouc(hex); /* this long is never bigger than 255 anyway */ res = Curl_convert_from_network(data, &in, 1); if(res) { /* Curl_convert_from_network calls failf if unsuccessful */ free(ns); return res; } string+=2; alloc-=2; } if(reject_ctrl && (in < 0x20)) { free(ns); return CURLE_URL_MALFORMAT; } ns[strindex++] = in; string++; } ns[strindex]=0; /* terminate it */ if(olen) /* store output size */ *olen = strindex; if(ostring) /* store output string */ *ostring = ns; return CURLE_OK; }
/* Curl_client_write() sends data to the write callback(s) The bit pattern defines to what "streams" to write to. Body and/or header. The defines are in sendf.h of course. If CURL_DO_LINEEND_CONV is enabled, data is converted IN PLACE to the local character encoding. This is a problem and should be changed in the future to leave the original data alone. */ CURLcode Curl_client_write(struct connectdata *conn, int type, char *ptr, size_t len) { struct Curl_easy *data = conn->data; if(0 == len) len = strlen(ptr); DEBUGASSERT(type <= 3); /* FTP data may need conversion. */ if((type & CLIENTWRITE_BODY) && (conn->handler->protocol & PROTO_FAMILY_FTP) && conn->proto.ftpc.transfertype == 'A') { /* convert from the network encoding */ CURLcode result = Curl_convert_from_network(data, ptr, len); /* Curl_convert_from_network calls failf if unsuccessful */ if(result) return result; #ifdef CURL_DO_LINEEND_CONV /* convert end-of-line markers */ len = convert_lineends(data, ptr, len); #endif /* CURL_DO_LINEEND_CONV */ } return chop_write(conn, type, ptr, len); }
/* * Unescapes the given URL escaped string of given length. Returns a * pointer to a malloced string with length given in *olen. * If length == 0, the length is assumed to be strlen(string). * If olen == NULL, no output length is stored. */ char *curl_easy_unescape(CURL *handle, const char *string, int length, int *olen) { int alloc = (length?length:(int)strlen(string))+1; char *ns = malloc(alloc); unsigned char in; int strindex=0; long hex; #ifndef CURL_DOES_CONVERSIONS /* avoid compiler warnings */ (void)handle; #endif if( !ns ) return NULL; while(--alloc > 0) { in = *string; if(('%' == in) && ISXDIGIT(string[1]) && ISXDIGIT(string[2])) { /* this is two hexadecimal digits following a '%' */ char hexstr[3]; char *ptr; hexstr[0] = string[1]; hexstr[1] = string[2]; hexstr[2] = 0; hex = strtol(hexstr, &ptr, 16); in = (unsigned char)hex; /* this long is never bigger than 255 anyway */ #ifdef CURL_DOES_CONVERSIONS /* escape sequences are always in ASCII so convert them on non-ASCII hosts */ if(!handle || (Curl_convert_from_network(handle, &in, 1) != CURLE_OK)) { /* Curl_convert_from_network calls failf if unsuccessful */ free(ns); return NULL; } #endif /* CURL_DOES_CONVERSIONS */ string+=2; alloc-=2; } ns[strindex++] = in; string++; } ns[strindex]=0; /* terminate it */ if(olen) /* store output size */ *olen = strindex; return ns; }
/* * Unescapes the given URL escaped string of given length. Returns a * pointer to a malloced string with length given in *olen. * If length == 0, the length is assumed to be strlen(string). * If olen == NULL, no output length is stored. */ char *curl_easy_unescape(CURL *handle, const char *string, int length, int *olen) { int alloc = (length?length:(int)strlen(string))+1; char *ns = malloc(alloc); unsigned char in; int strindex=0; unsigned long hex; CURLcode res; if(!ns) return NULL; while(--alloc > 0) { in = *string; if(('%' == in) && ISXDIGIT(string[1]) && ISXDIGIT(string[2])) { /* this is two hexadecimal digits following a '%' */ char hexstr[3]; char *ptr; hexstr[0] = string[1]; hexstr[1] = string[2]; hexstr[2] = 0; hex = strtoul(hexstr, &ptr, 16); in = curlx_ultouc(hex); /* this long is never bigger than 255 anyway */ res = Curl_convert_from_network(handle, &in, 1); if(res) { /* Curl_convert_from_network calls failf if unsuccessful */ free(ns); return NULL; } string+=2; alloc-=2; } ns[strindex++] = in; string++; } ns[strindex]=0; /* terminate it */ if(olen) /* store output size */ *olen = strindex; return ns; }
CURLcode Curl_proxyCONNECT(struct connectdata *conn, int sockindex, const char *hostname, unsigned short remote_port) { int subversion=0; struct SessionHandle *data=conn->data; struct SingleRequest *k = &data->req; CURLcode result; long timeout = data->set.timeout?data->set.timeout:PROXY_TIMEOUT; /* in milliseconds */ curl_socket_t tunnelsocket = conn->sock[sockindex]; curl_off_t cl=0; bool closeConnection = FALSE; bool chunked_encoding = FALSE; long check; #define SELECT_OK 0 #define SELECT_ERROR 1 #define SELECT_TIMEOUT 2 int error = SELECT_OK; if(conn->tunnel_state[sockindex] == TUNNEL_COMPLETE) return CURLE_OK; /* CONNECT is already completed */ conn->bits.proxy_connect_closed = FALSE; do { if(TUNNEL_INIT == conn->tunnel_state[sockindex]) { /* BEGIN CONNECT PHASE */ char *host_port; Curl_send_buffer *req_buffer; infof(data, "Establish HTTP proxy tunnel to %s:%hu\n", hostname, remote_port); if(data->req.newurl) { /* This only happens if we've looped here due to authentication reasons, and we don't really use the newly cloned URL here then. Just free() it. */ free(data->req.newurl); data->req.newurl = NULL; } /* initialize a dynamic send-buffer */ req_buffer = Curl_add_buffer_init(); if(!req_buffer) return CURLE_OUT_OF_MEMORY; host_port = aprintf("%s:%hu", hostname, remote_port); if(!host_port) { free(req_buffer); return CURLE_OUT_OF_MEMORY; } /* Setup the proxy-authorization header, if any */ result = Curl_http_output_auth(conn, "CONNECT", host_port, TRUE); free(host_port); if(CURLE_OK == result) { char *host=(char *)""; const char *proxyconn=""; const char *useragent=""; const char *http = (conn->proxytype == CURLPROXY_HTTP_1_0) ? "1.0" : "1.1"; char *hostheader= /* host:port with IPv6 support */ aprintf("%s%s%s:%hu", conn->bits.ipv6_ip?"[":"", hostname, conn->bits.ipv6_ip?"]":"", remote_port); if(!hostheader) { free(req_buffer); return CURLE_OUT_OF_MEMORY; } if(!Curl_checkheaders(data, "Host:")) { host = aprintf("Host: %s\r\n", hostheader); if(!host) { free(hostheader); free(req_buffer); return CURLE_OUT_OF_MEMORY; } } if(!Curl_checkheaders(data, "Proxy-Connection:")) proxyconn = "Proxy-Connection: Keep-Alive\r\n"; if(!Curl_checkheaders(data, "User-Agent:") && data->set.str[STRING_USERAGENT]) useragent = conn->allocptr.uagent; result = Curl_add_bufferf(req_buffer, "CONNECT %s HTTP/%s\r\n" "%s" /* Host: */ "%s" /* Proxy-Authorization */ "%s" /* User-Agent */ "%s", /* Proxy-Connection */ hostheader, http, host, conn->allocptr.proxyuserpwd? conn->allocptr.proxyuserpwd:"", useragent, proxyconn); if(host && *host) free(host); free(hostheader); if(CURLE_OK == result) result = Curl_add_custom_headers(conn, req_buffer); if(CURLE_OK == result) /* CRLF terminate the request */ result = Curl_add_bufferf(req_buffer, "\r\n"); if(CURLE_OK == result) { /* Send the connect request to the proxy */ /* BLOCKING */ result = Curl_add_buffer_send(req_buffer, conn, &data->info.request_size, 0, sockindex); } req_buffer = NULL; if(result) failf(data, "Failed sending CONNECT to proxy"); } Curl_safefree(req_buffer); if(result) return result; conn->tunnel_state[sockindex] = TUNNEL_CONNECT; /* now we've issued the CONNECT and we're waiting to hear back, return and get called again polling-style */ return CURLE_OK; } /* END CONNECT PHASE */ { /* BEGIN NEGOTIATION PHASE */ size_t nread; /* total size read */ int perline; /* count bytes per line */ int keepon=TRUE; ssize_t gotbytes; char *ptr; char *line_start; ptr=data->state.buffer; line_start = ptr; nread=0; perline=0; keepon=TRUE; while((nread<BUFSIZE) && (keepon && !error)) { /* if timeout is requested, find out how much remaining time we have */ check = timeout - /* timeout time */ Curl_tvdiff(Curl_tvnow(), conn->now); /* spent time */ if(check <= 0) { failf(data, "Proxy CONNECT aborted due to timeout"); error = SELECT_TIMEOUT; /* already too little time */ break; } /* loop every second at least, less if the timeout is near */ switch (Curl_socket_ready(tunnelsocket, CURL_SOCKET_BAD, check<1000L?check:1000)) { case -1: /* select() error, stop reading */ error = SELECT_ERROR; failf(data, "Proxy CONNECT aborted due to select/poll error"); break; case 0: /* timeout */ break; default: DEBUGASSERT(ptr+BUFSIZE-nread <= data->state.buffer+BUFSIZE+1); result = Curl_read(conn, tunnelsocket, ptr, BUFSIZE-nread, &gotbytes); if(result==CURLE_AGAIN) continue; /* go loop yourself */ else if(result) keepon = FALSE; else if(gotbytes <= 0) { keepon = FALSE; if(data->set.proxyauth && data->state.authproxy.avail) { /* proxy auth was requested and there was proxy auth available, then deem this as "mere" proxy disconnect */ conn->bits.proxy_connect_closed = TRUE; } else { error = SELECT_ERROR; failf(data, "Proxy CONNECT aborted"); } } else { /* * We got a whole chunk of data, which can be anything from one * byte to a set of lines and possibly just a piece of the last * line. */ int i; nread += gotbytes; if(keepon > TRUE) { /* This means we are currently ignoring a response-body */ nread = 0; /* make next read start over in the read buffer */ ptr=data->state.buffer; if(cl) { /* A Content-Length based body: simply count down the counter and make sure to break out of the loop when we're done! */ cl -= gotbytes; if(cl<=0) { keepon = FALSE; break; } } else { /* chunked-encoded body, so we need to do the chunked dance properly to know when the end of the body is reached */ CHUNKcode r; ssize_t tookcareof=0; /* now parse the chunked piece of data so that we can properly tell when the stream ends */ r = Curl_httpchunk_read(conn, ptr, gotbytes, &tookcareof); if(r == CHUNKE_STOP) { /* we're done reading chunks! */ infof(data, "chunk reading DONE\n"); keepon = FALSE; /* we did the full CONNECT treatment, go COMPLETE */ conn->tunnel_state[sockindex] = TUNNEL_COMPLETE; } else infof(data, "Read %zd bytes of chunk, continue\n", tookcareof); } } else for(i = 0; i < gotbytes; ptr++, i++) { perline++; /* amount of bytes in this line so far */ if(*ptr == 0x0a) { char letter; int writetype; /* convert from the network encoding */ result = Curl_convert_from_network(data, line_start, perline); /* Curl_convert_from_network calls failf if unsuccessful */ if(result) return result; /* output debug if that is requested */ if(data->set.verbose) Curl_debug(data, CURLINFO_HEADER_IN, line_start, (size_t)perline, conn); /* send the header to the callback */ writetype = CLIENTWRITE_HEADER; if(data->set.include_header) writetype |= CLIENTWRITE_BODY; result = Curl_client_write(conn, writetype, line_start, perline); data->info.header_size += (long)perline; data->req.headerbytecount += (long)perline; if(result) return result; /* Newlines are CRLF, so the CR is ignored as the line isn't really terminated until the LF comes. Treat a following CR as end-of-headers as well.*/ if(('\r' == line_start[0]) || ('\n' == line_start[0])) { /* end of response-headers from the proxy */ nread = 0; /* make next read start over in the read buffer */ ptr=data->state.buffer; if((407 == k->httpcode) && !data->state.authproblem) { /* If we get a 407 response code with content length when we have no auth problem, we must ignore the whole response-body */ keepon = 2; if(cl) { infof(data, "Ignore %" FORMAT_OFF_T " bytes of response-body\n", cl); /* remove the remaining chunk of what we already read */ cl -= (gotbytes - i); if(cl<=0) /* if the whole thing was already read, we are done! */ keepon=FALSE; } else if(chunked_encoding) { CHUNKcode r; /* We set ignorebody true here since the chunked decoder function will acknowledge that. Pay attention so that this is cleared again when this function returns! */ k->ignorebody = TRUE; infof(data, "%zd bytes of chunk left\n", gotbytes-i); if(line_start[1] == '\n') { /* this can only be a LF if the letter at index 0 was a CR */ line_start++; i++; } /* now parse the chunked piece of data so that we can properly tell when the stream ends */ r = Curl_httpchunk_read(conn, line_start+1, gotbytes -i, &gotbytes); if(r == CHUNKE_STOP) { /* we're done reading chunks! */ infof(data, "chunk reading DONE\n"); keepon = FALSE; /* we did the full CONNECT treatment, go to COMPLETE */ conn->tunnel_state[sockindex] = TUNNEL_COMPLETE; } else infof(data, "Read %zd bytes of chunk, continue\n", gotbytes); } else { /* without content-length or chunked encoding, we can't keep the connection alive since the close is the end signal so we bail out at once instead */ keepon=FALSE; } } else { keepon = FALSE; if(200 == data->info.httpproxycode) { if(gotbytes - (i+1)) failf(data, "Proxy CONNECT followed by %zd bytes " "of opaque data. Data ignored (known bug #39)", gotbytes - (i+1)); } } /* we did the full CONNECT treatment, go to COMPLETE */ conn->tunnel_state[sockindex] = TUNNEL_COMPLETE; break; /* breaks out of for-loop, not switch() */ } /* keep a backup of the position we are about to blank */ letter = line_start[perline]; line_start[perline]=0; /* zero terminate the buffer */ if((checkprefix("WWW-Authenticate:", line_start) && (401 == k->httpcode)) || (checkprefix("Proxy-authenticate:", line_start) && (407 == k->httpcode))) { result = Curl_http_input_auth(conn, k->httpcode, line_start); if(result) return result; } else if(checkprefix("Content-Length:", line_start)) { cl = curlx_strtoofft(line_start + strlen("Content-Length:"), NULL, 10); } else if(Curl_compareheader(line_start, "Connection:", "close")) closeConnection = TRUE; else if(Curl_compareheader(line_start, "Transfer-Encoding:", "chunked")) { infof(data, "CONNECT responded chunked\n"); chunked_encoding = TRUE; /* init our chunky engine */ Curl_httpchunk_init(conn); } else if(Curl_compareheader(line_start, "Proxy-Connection:", "close")) closeConnection = TRUE; else if(2 == sscanf(line_start, "HTTP/1.%d %d", &subversion, &k->httpcode)) { /* store the HTTP code from the proxy */ data->info.httpproxycode = k->httpcode; } /* put back the letter we blanked out before */ line_start[perline]= letter; perline=0; /* line starts over here */ line_start = ptr+1; /* this skips the zero byte we wrote */ } } } break; } /* switch */ if(Curl_pgrsUpdate(conn)) return CURLE_ABORTED_BY_CALLBACK; } /* while there's buffer left and loop is requested */ if(error) return CURLE_RECV_ERROR; if(data->info.httpproxycode != 200) { /* Deal with the possibly already received authenticate headers. 'newurl' is set to a new URL if we must loop. */ result = Curl_http_auth_act(conn); if(result) return result; if(conn->bits.close) /* the connection has been marked for closure, most likely in the Curl_http_auth_act() function and thus we can kill it at once below */ closeConnection = TRUE; } if(closeConnection && data->req.newurl) { /* Connection closed by server. Don't use it anymore */ Curl_closesocket(conn, conn->sock[sockindex]); conn->sock[sockindex] = CURL_SOCKET_BAD; break; } } /* END NEGOTIATION PHASE */ /* If we are supposed to continue and request a new URL, which basically * means the HTTP authentication is still going on so if the tunnel * is complete we start over in INIT state */ if(data->req.newurl && (TUNNEL_COMPLETE == conn->tunnel_state[sockindex])) { conn->tunnel_state[sockindex] = TUNNEL_INIT; infof(data, "TUNNEL_STATE switched to: %d\n", conn->tunnel_state[sockindex]); } } while(data->req.newurl); if(200 != data->req.httpcode) { failf(data, "Received HTTP code %d from proxy after CONNECT", data->req.httpcode); if(closeConnection && data->req.newurl) conn->bits.proxy_connect_closed = TRUE; if(data->req.newurl) { /* this won't be used anymore for the CONNECT so free it now */ free(data->req.newurl); data->req.newurl = NULL; } /* to back to init state */ conn->tunnel_state[sockindex] = TUNNEL_INIT; return CURLE_RECV_ERROR; } conn->tunnel_state[sockindex] = TUNNEL_COMPLETE; /* If a proxy-authorization header was used for the proxy, then we should make sure that it isn't accidentally used for the document request after we've connected. So let's free and clear it here. */ Curl_safefree(conn->allocptr.proxyuserpwd); conn->allocptr.proxyuserpwd = NULL; data->state.authproxy.done = TRUE; infof (data, "Proxy replied OK to CONNECT request\n"); data->req.ignorebody = FALSE; /* put it (back) to non-ignore state */ conn->bits.rewindaftersend = FALSE; /* make sure this isn't set for the document request */ return CURLE_OK; }
/* * Curl_pp_readresp() * * Reads a piece of a server response. */ CURLcode Curl_pp_readresp(curl_socket_t sockfd, struct pingpong *pp, int *code, /* return the server code if done */ size_t *size) /* size of the response */ { ssize_t perline; /* count bytes per line */ bool keepon=TRUE; ssize_t gotbytes; char *ptr; struct connectdata *conn = pp->conn; struct SessionHandle *data = conn->data; char * const buf = data->state.buffer; CURLcode result = CURLE_OK; *code = 0; /* 0 for errors or not done */ *size = 0; ptr=buf + pp->nread_resp; /* number of bytes in the current line, so far */ perline = (ssize_t)(ptr-pp->linestart_resp); keepon=TRUE; while((pp->nread_resp<BUFSIZE) && (keepon && !result)) { if(pp->cache) { /* we had data in the "cache", copy that instead of doing an actual * read * * ftp->cache_size is cast to int here. This should be safe, * because it would have been populated with something of size * int to begin with, even though its datatype may be larger * than an int. */ DEBUGASSERT((ptr+pp->cache_size) <= (buf+BUFSIZE+1)); memcpy(ptr, pp->cache, pp->cache_size); gotbytes = pp->cache_size; free(pp->cache); /* free the cache */ pp->cache = NULL; /* clear the pointer */ pp->cache_size = 0; /* zero the size just in case */ } else { int res; #if defined(HAVE_KRB4) || defined(HAVE_GSSAPI) enum protection_level prot = conn->data_prot; conn->data_prot = 0; #endif DEBUGASSERT((ptr+BUFSIZE-pp->nread_resp) <= (buf+BUFSIZE+1)); res = Curl_read(conn, sockfd, ptr, BUFSIZE-pp->nread_resp, &gotbytes); #if defined(HAVE_KRB4) || defined(HAVE_GSSAPI) conn->data_prot = prot; #endif if(res < 0) /* EWOULDBLOCK */ return CURLE_OK; /* return */ #ifdef CURL_DOES_CONVERSIONS if((res == CURLE_OK) && (gotbytes > 0)) { /* convert from the network encoding */ res = Curl_convert_from_network(data, ptr, gotbytes); /* Curl_convert_from_network calls failf if unsuccessful */ } #endif /* CURL_DOES_CONVERSIONS */ if(CURLE_OK != res) { result = (CURLcode)res; /* Set outer result variable to this error. */ keepon = FALSE; } } if(!keepon) ; else if(gotbytes <= 0) { keepon = FALSE; result = CURLE_RECV_ERROR; failf(data, "FTP response reading failed"); } else { /* we got a whole chunk of data, which can be anything from one * byte to a set of lines and possible just a piece of the last * line */ ssize_t i; ssize_t clipamount = 0; bool restart = FALSE; data->req.headerbytecount += (long)gotbytes; pp->nread_resp += gotbytes; for(i = 0; i < gotbytes; ptr++, i++) { perline++; if(*ptr=='\n') { /* a newline is CRLF in ftp-talk, so the CR is ignored as the line isn't really terminated until the LF comes */ /* output debug output if that is requested */ #if defined(HAVE_KRB4) || defined(HAVE_GSSAPI) if(!conn->sec_complete) #endif if(data->set.verbose) Curl_debug(data, CURLINFO_HEADER_IN, pp->linestart_resp, (size_t)perline, conn); /* * We pass all response-lines to the callback function registered * for "headers". The response lines can be seen as a kind of * headers. */ result = Curl_client_write(conn, CLIENTWRITE_HEADER, pp->linestart_resp, perline); if(result) return result; if(pp->endofresp(pp, code)) { /* This is the end of the last line, copy the last line to the start of the buffer and zero terminate, for old times sake (and krb4)! */ char *meow; int n; for(meow=pp->linestart_resp, n=0; meow<ptr; meow++, n++) buf[n] = *meow; *meow=0; /* zero terminate */ keepon=FALSE; pp->linestart_resp = ptr+1; /* advance pointer */ i++; /* skip this before getting out */ *size = pp->nread_resp; /* size of the response */ pp->nread_resp = 0; /* restart */ break; } perline=0; /* line starts over here */ pp->linestart_resp = ptr+1; } } if(!keepon && (i != gotbytes)) { /* We found the end of the response lines, but we didn't parse the full chunk of data we have read from the server. We therefore need to store the rest of the data to be checked on the next invoke as it may actually contain another end of response already! */ clipamount = gotbytes - i; restart = TRUE; } else if(keepon) { if((perline == gotbytes) && (gotbytes > BUFSIZE/2)) { /* We got an excessive line without newlines and we need to deal with it. We keep the first bytes of the line then we throw away the rest. */ infof(data, "Excessive server response line length received, %zd bytes." " Stripping\n", gotbytes); restart = TRUE; /* we keep 40 bytes since all our pingpong protocols are only interested in the first piece */ clipamount = 40; } else if(pp->nread_resp > BUFSIZE/2) { /* We got a large chunk of data and there's potentially still trailing data to take care of, so we put any such part in the "cache", clear the buffer to make space and restart. */ clipamount = perline; restart = TRUE; } } else if(i == gotbytes) restart = TRUE; if(clipamount) { pp->cache_size = clipamount; pp->cache = malloc(pp->cache_size); if(pp->cache) memcpy(pp->cache, pp->linestart_resp, pp->cache_size); else return CURLE_OUT_OF_MEMORY; } if(restart) { /* now reset a few variables to start over nicely from the start of the big buffer */ pp->nread_resp = 0; /* start over from scratch in the buffer */ ptr = pp->linestart_resp = buf; perline = 0; } } /* there was data */ } /* while there's buffer left and loop is requested */ pp->pending_resp = FALSE; return result; }
/* * chunk_read() returns a OK for normal operations, or a positive return code * for errors. STOP means this sequence of chunks is complete. The 'wrote' * argument is set to tell the caller how many bytes we actually passed to the * client (for byte-counting and whatever). * * The states and the state-machine is further explained in the header file. * * This function always uses ASCII hex values to accommodate non-ASCII hosts. * For example, 0x0d and 0x0a are used instead of '\r' and '\n'. */ CHUNKcode Curl_httpchunk_read(struct connectdata *conn, char *datap, ssize_t datalen, ssize_t *wrotep) { CURLcode result=CURLE_OK; struct SessionHandle *data = conn->data; struct Curl_chunker *ch = &conn->chunk; struct SingleRequest *k = &data->req; size_t piece; size_t length = (size_t)datalen; size_t *wrote = (size_t *)wrotep; *wrote = 0; /* nothing's written yet */ /* the original data is written to the client, but we go on with the chunk read process, to properly calculate the content length*/ if(data->set.http_te_skip && !k->ignorebody) { result = Curl_client_write(conn, CLIENTWRITE_BODY, datap, datalen); if(result) return CHUNKE_WRITE_ERROR; } while(length) { switch(ch->state) { case CHUNK_HEX: if(Curl_isxdigit(*datap)) { if(ch->hexindex < MAXNUM_SIZE) { ch->hexbuffer[ch->hexindex] = *datap; datap++; length--; ch->hexindex++; } else { return CHUNKE_TOO_LONG_HEX; /* longer hex than we support */ } } else { if(0 == ch->hexindex) { /* This is illegal data, we received junk where we expected a hexadecimal digit. */ return CHUNKE_ILLEGAL_HEX; } /* length and datap are unmodified */ ch->hexbuffer[ch->hexindex]=0; #ifdef CURL_DOES_CONVERSIONS /* convert to host encoding before calling strtoul */ result = Curl_convert_from_network(conn->data, ch->hexbuffer, ch->hexindex); if(result != CURLE_OK) { /* Curl_convert_from_network calls failf if unsuccessful */ /* Treat it as a bad hex character */ return(CHUNKE_ILLEGAL_HEX); } #endif /* CURL_DOES_CONVERSIONS */ ch->datasize=strtoul(ch->hexbuffer, NULL, 16); ch->state = CHUNK_POSTHEX; } break; case CHUNK_POSTHEX: /* In this state, we're waiting for CRLF to arrive. We support this to allow so called chunk-extensions to show up here before the CRLF comes. */ if(*datap == 0x0d) ch->state = CHUNK_CR; length--; datap++; break; case CHUNK_CR: /* waiting for the LF */ if(*datap == 0x0a) { /* we're now expecting data to come, unless size was zero! */ if(0 == ch->datasize) { ch->state = CHUNK_TRAILER; /* now check for trailers */ conn->trlPos=0; } else { ch->state = CHUNK_DATA; } } else /* previously we got a fake CR, go back to CR waiting! */ ch->state = CHUNK_CR; datap++; length--; break; case CHUNK_DATA: /* we get pure and fine data We expect another 'datasize' of data. We have 'length' right now, it can be more or less than 'datasize'. Get the smallest piece. */ piece = (ch->datasize >= length)?length:ch->datasize; /* Write the data portion available */ #ifdef HAVE_LIBZ switch (conn->data->set.http_ce_skip? IDENTITY : data->req.content_encoding) { case IDENTITY: #endif if(!k->ignorebody) { if( !data->set.http_te_skip ) result = Curl_client_write(conn, CLIENTWRITE_BODY, datap, piece); else result = CURLE_OK; } #ifdef HAVE_LIBZ break; case DEFLATE: /* update data->req.keep.str to point to the chunk data. */ data->req.str = datap; result = Curl_unencode_deflate_write(conn, &data->req, (ssize_t)piece); break; case GZIP: /* update data->req.keep.str to point to the chunk data. */ data->req.str = datap; result = Curl_unencode_gzip_write(conn, &data->req, (ssize_t)piece); break; case COMPRESS: default: failf (conn->data, "Unrecognized content encoding type. " "libcurl understands `identity', `deflate' and `gzip' " "content encodings."); return CHUNKE_BAD_ENCODING; } #endif if(result) return CHUNKE_WRITE_ERROR; *wrote += piece; ch->datasize -= piece; /* decrease amount left to expect */ datap += piece; /* move read pointer forward */ length -= piece; /* decrease space left in this round */ if(0 == ch->datasize) /* end of data this round, we now expect a trailing CRLF */ ch->state = CHUNK_POSTCR; break; case CHUNK_POSTCR: if(*datap == 0x0d) { ch->state = CHUNK_POSTLF; datap++; length--; } else return CHUNKE_BAD_CHUNK; break; case CHUNK_POSTLF: if(*datap == 0x0a) { /* * The last one before we go back to hex state and start all * over. */ Curl_httpchunk_init(conn); datap++; length--; } else return CHUNKE_BAD_CHUNK; break; case CHUNK_TRAILER: if(*datap == 0x0d) { /* this is the end of a trailer, but if the trailer was zero bytes there was no trailer and we move on */ if(conn->trlPos) { /* we allocate trailer with 3 bytes extra room to fit this */ conn->trailer[conn->trlPos++]=0x0d; conn->trailer[conn->trlPos++]=0x0a; conn->trailer[conn->trlPos]=0; #ifdef CURL_DOES_CONVERSIONS /* Convert to host encoding before calling Curl_client_write */ result = Curl_convert_from_network(conn->data, conn->trailer, conn->trlPos); if(result != CURLE_OK) /* Curl_convert_from_network calls failf if unsuccessful */ /* Treat it as a bad chunk */ return CHUNKE_BAD_CHUNK; #endif /* CURL_DOES_CONVERSIONS */ if(!data->set.http_te_skip) { result = Curl_client_write(conn, CLIENTWRITE_HEADER, conn->trailer, conn->trlPos); if(result) return CHUNKE_WRITE_ERROR; } conn->trlPos=0; ch->state = CHUNK_TRAILER_CR; } else { /* no trailer, we're on the final CRLF pair */ ch->state = CHUNK_TRAILER_POSTCR; break; /* don't advance the pointer */ } } else { /* conn->trailer is assumed to be freed in url.c on a connection basis */ if(conn->trlPos >= conn->trlMax) { /* we always allocate three extra bytes, just because when the full header has been received we append CRLF\0 */ char *ptr; if(conn->trlMax) { conn->trlMax *= 2; ptr = realloc(conn->trailer, conn->trlMax + 3); } else { conn->trlMax=128; ptr = malloc(conn->trlMax + 3); } if(!ptr) return CHUNKE_OUT_OF_MEMORY; conn->trailer = ptr; } fprintf(stderr, "MOO: %c\n", *datap); conn->trailer[conn->trlPos++]=*datap; } datap++; length--; break; case CHUNK_TRAILER_CR: if(*datap == 0x0a) { ch->state = CHUNK_TRAILER_POSTCR; datap++; length--; } else return CHUNKE_BAD_CHUNK; break; case CHUNK_TRAILER_POSTCR: /* We enter this state when a CR should arrive so we expect to have to first pass a CR before we wait for LF */ if(*datap != 0x0d) { /* not a CR then it must be another header in the trailer */ ch->state = CHUNK_TRAILER; break; } datap++; length--; /* now wait for the final LF */ ch->state = CHUNK_STOP; break; case CHUNK_STOPCR: /* Read the final CRLF that ends all chunk bodies */ if(*datap == 0x0d) { ch->state = CHUNK_STOP; datap++; length--; } else return CHUNKE_BAD_CHUNK; break; case CHUNK_STOP: if(*datap == 0x0a) { length--; /* Record the length of any data left in the end of the buffer even if there's no more chunks to read */ ch->dataleft = length; return CHUNKE_STOP; /* return stop */ } else return CHUNKE_BAD_CHUNK; default: return CHUNKE_STATE_ERROR; } } return CHUNKE_OK; }
/* return 0 on success */ static int showit(struct SessionHandle *data, curl_infotype type, char *ptr, size_t size) { static const char s_infotype[CURLINFO_END][3] = { "* ", "< ", "> ", "{ ", "} ", "{ ", "} " }; #ifdef CURL_DOES_CONVERSIONS char buf[BUFSIZE+1]; size_t conv_size = 0; switch(type) { case CURLINFO_HEADER_OUT: /* assume output headers are ASCII */ /* copy the data into my buffer so the original is unchanged */ if(size > BUFSIZE) { size = BUFSIZE; /* truncate if necessary */ buf[BUFSIZE] = '\0'; } conv_size = size; memcpy(buf, ptr, size); /* Special processing is needed for this block if it * contains both headers and data (separated by CRLFCRLF). * We want to convert just the headers, leaving the data as-is. */ if(size > 4) { size_t i; for(i = 0; i < size-4; i++) { if(memcmp(&buf[i], "\x0d\x0a\x0d\x0a", 4) == 0) { /* convert everything through this CRLFCRLF but no further */ conv_size = i + 4; break; } } } Curl_convert_from_network(data, buf, conv_size); /* Curl_convert_from_network calls failf if unsuccessful */ /* we might as well continue even if it fails... */ ptr = buf; /* switch pointer to use my buffer instead */ break; default: /* leave everything else as-is */ break; } #endif /* CURL_DOES_CONVERSIONS */ if(data->set.fdebug) return (*data->set.fdebug)(data, type, ptr, size, data->set.debugdata); switch(type) { case CURLINFO_TEXT: case CURLINFO_HEADER_OUT: case CURLINFO_HEADER_IN: fwrite(s_infotype[type], 2, 1, data->set.err); fwrite(ptr, size, 1, data->set.err); #ifdef CURL_DOES_CONVERSIONS if(size != conv_size) { /* we had untranslated data so we need an explicit newline */ fwrite("\n", 1, 1, data->set.err); } #endif break; default: /* nada */ break; } return 0; }
/* Curl_client_write() sends data to the write callback(s) The bit pattern defines to what "streams" to write to. Body and/or header. The defines are in sendf.h of course. If CURL_DO_LINEEND_CONV is enabled, data is converted IN PLACE to the local character encoding. This is a problem and should be changed in the future to leave the original data alone. */ CURLcode Curl_client_write(struct connectdata *conn, int type, char *ptr, size_t len) { struct SessionHandle *data = conn->data; size_t wrote; if(0 == len) len = strlen(ptr); /* If reading is actually paused, we're forced to append this chunk of data to the already held data, but only if it is the same type as otherwise it can't work and it'll return error instead. */ if(data->req.keepon & KEEP_RECV_PAUSE) { size_t newlen; char *newptr; if(type != data->state.tempwritetype) /* major internal confusion */ return CURLE_RECV_ERROR; DEBUGASSERT(data->state.tempwrite); /* figure out the new size of the data to save */ newlen = len + data->state.tempwritesize; /* allocate the new memory area */ newptr = realloc(data->state.tempwrite, newlen); if(!newptr) return CURLE_OUT_OF_MEMORY; /* copy the new data to the end of the new area */ memcpy(newptr + data->state.tempwritesize, ptr, len); /* update the pointer and the size */ data->state.tempwrite = newptr; data->state.tempwritesize = newlen; return CURLE_OK; } if(type & CLIENTWRITE_BODY) { if((conn->handler->protocol&CURLPROTO_FTP) && conn->proto.ftpc.transfertype == 'A') { /* convert from the network encoding */ size_t rc = Curl_convert_from_network(data, ptr, len); /* Curl_convert_from_network calls failf if unsuccessful */ if(rc) return rc; #ifdef CURL_DO_LINEEND_CONV /* convert end-of-line markers */ len = convert_lineends(data, ptr, len); #endif /* CURL_DO_LINEEND_CONV */ } /* If the previous block of data ended with CR and this block of data is just a NL, then the length might be zero */ if(len) { wrote = data->set.fwrite_func(ptr, 1, len, data->set.out); } else { wrote = len; } if(CURL_WRITEFUNC_PAUSE == wrote) return pausewrite(data, type, ptr, len); if(wrote != len) { failf(data, "Failed writing body (%zu != %zu)", wrote, len); return CURLE_WRITE_ERROR; } } if((type & CLIENTWRITE_HEADER) && (data->set.fwrite_header || data->set.writeheader) ) { /* * Write headers to the same callback or to the especially setup * header callback function (added after version 7.7.1). */ curl_write_callback writeit= data->set.fwrite_header?data->set.fwrite_header:data->set.fwrite_func; /* Note: The header is in the host encoding regardless of the ftp transfer mode (ASCII/Image) */ wrote = writeit(ptr, 1, len, data->set.writeheader); if(CURL_WRITEFUNC_PAUSE == wrote) /* here we pass in the HEADER bit only since if this was body as well then it was passed already and clearly that didn't trigger the pause, so this is saved for later with the HEADER bit only */ return pausewrite(data, CLIENTWRITE_HEADER, ptr, len); if(wrote != len) { failf (data, "Failed writing header"); return CURLE_WRITE_ERROR; } } return CURLE_OK; }
static CURLcode CONNECT(struct connectdata *conn, int sockindex, const char *hostname, int remote_port) { int subversion = 0; struct Curl_easy *data = conn->data; struct SingleRequest *k = &data->req; CURLcode result; curl_socket_t tunnelsocket = conn->sock[sockindex]; struct http_connect_state *s = conn->connect_state; #define SELECT_OK 0 #define SELECT_ERROR 1 if(Curl_connect_complete(conn)) return CURLE_OK; /* CONNECT is already completed */ conn->bits.proxy_connect_closed = FALSE; do { timediff_t check; if(TUNNEL_INIT == s->tunnel_state) { /* BEGIN CONNECT PHASE */ char *host_port; Curl_send_buffer *req_buffer; infof(data, "Establish HTTP proxy tunnel to %s:%d\n", hostname, remote_port); /* This only happens if we've looped here due to authentication reasons, and we don't really use the newly cloned URL here then. Just free() it. */ free(data->req.newurl); data->req.newurl = NULL; /* initialize a dynamic send-buffer */ req_buffer = Curl_add_buffer_init(); if(!req_buffer) return CURLE_OUT_OF_MEMORY; host_port = aprintf("%s:%d", hostname, remote_port); if(!host_port) { Curl_add_buffer_free(req_buffer); return CURLE_OUT_OF_MEMORY; } /* Setup the proxy-authorization header, if any */ result = Curl_http_output_auth(conn, "CONNECT", host_port, TRUE); free(host_port); if(!result) { char *host = NULL; const char *proxyconn = ""; const char *useragent = ""; const char *http = (conn->http_proxy.proxytype == CURLPROXY_HTTP_1_0) ? "1.0" : "1.1"; bool ipv6_ip = conn->bits.ipv6_ip; char *hostheader; /* the hostname may be different */ if(hostname != conn->host.name) ipv6_ip = (strchr(hostname, ':') != NULL); hostheader = /* host:port with IPv6 support */ aprintf("%s%s%s:%d", ipv6_ip?"[":"", hostname, ipv6_ip?"]":"", remote_port); if(!hostheader) { Curl_add_buffer_free(req_buffer); return CURLE_OUT_OF_MEMORY; } if(!Curl_checkProxyheaders(conn, "Host")) { host = aprintf("Host: %s\r\n", hostheader); if(!host) { free(hostheader); Curl_add_buffer_free(req_buffer); return CURLE_OUT_OF_MEMORY; } } if(!Curl_checkProxyheaders(conn, "Proxy-Connection")) proxyconn = "Proxy-Connection: Keep-Alive\r\n"; if(!Curl_checkProxyheaders(conn, "User-Agent") && data->set.str[STRING_USERAGENT]) useragent = conn->allocptr.uagent; result = Curl_add_bufferf(req_buffer, "CONNECT %s HTTP/%s\r\n" "%s" /* Host: */ "%s" /* Proxy-Authorization */ "%s" /* User-Agent */ "%s", /* Proxy-Connection */ hostheader, http, host?host:"", conn->allocptr.proxyuserpwd? conn->allocptr.proxyuserpwd:"", useragent, proxyconn); if(host) free(host); free(hostheader); if(!result) result = Curl_add_custom_headers(conn, TRUE, req_buffer); if(!result) /* CRLF terminate the request */ result = Curl_add_bufferf(req_buffer, "\r\n"); if(!result) { /* Send the connect request to the proxy */ /* BLOCKING */ result = Curl_add_buffer_send(req_buffer, conn, &data->info.request_size, 0, sockindex); } req_buffer = NULL; if(result) failf(data, "Failed sending CONNECT to proxy"); } Curl_add_buffer_free(req_buffer); if(result) return result; s->tunnel_state = TUNNEL_CONNECT; s->perline = 0; } /* END CONNECT PHASE */ check = Curl_timeleft(data, NULL, TRUE); if(check <= 0) { failf(data, "Proxy CONNECT aborted due to timeout"); return CURLE_OPERATION_TIMEDOUT; } if(!Curl_conn_data_pending(conn, sockindex)) /* return so we'll be called again polling-style */ return CURLE_OK; /* at this point, the tunnel_connecting phase is over. */ { /* READING RESPONSE PHASE */ int error = SELECT_OK; while(s->keepon && !error) { ssize_t gotbytes; /* make sure we have space to read more data */ if(s->ptr >= &s->connect_buffer[CONNECT_BUFFER_SIZE]) { failf(data, "CONNECT response too large!"); return CURLE_RECV_ERROR; } /* Read one byte at a time to avoid a race condition. Wait at most one second before looping to ensure continuous pgrsUpdates. */ result = Curl_read(conn, tunnelsocket, s->ptr, 1, &gotbytes); if(result == CURLE_AGAIN) /* socket buffer drained, return */ return CURLE_OK; if(Curl_pgrsUpdate(conn)) return CURLE_ABORTED_BY_CALLBACK; if(result) { s->keepon = FALSE; break; } else if(gotbytes <= 0) { if(data->set.proxyauth && data->state.authproxy.avail) { /* proxy auth was requested and there was proxy auth available, then deem this as "mere" proxy disconnect */ conn->bits.proxy_connect_closed = TRUE; infof(data, "Proxy CONNECT connection closed\n"); } else { error = SELECT_ERROR; failf(data, "Proxy CONNECT aborted"); } s->keepon = FALSE; break; } if(s->keepon > TRUE) { /* This means we are currently ignoring a response-body */ s->ptr = s->connect_buffer; if(s->cl) { /* A Content-Length based body: simply count down the counter and make sure to break out of the loop when we're done! */ s->cl--; if(s->cl <= 0) { s->keepon = FALSE; s->tunnel_state = TUNNEL_COMPLETE; break; } } else { /* chunked-encoded body, so we need to do the chunked dance properly to know when the end of the body is reached */ CHUNKcode r; ssize_t tookcareof = 0; /* now parse the chunked piece of data so that we can properly tell when the stream ends */ r = Curl_httpchunk_read(conn, s->ptr, 1, &tookcareof); if(r == CHUNKE_STOP) { /* we're done reading chunks! */ infof(data, "chunk reading DONE\n"); s->keepon = FALSE; /* we did the full CONNECT treatment, go COMPLETE */ s->tunnel_state = TUNNEL_COMPLETE; } } continue; } s->perline++; /* amount of bytes in this line so far */ /* if this is not the end of a header line then continue */ if(*s->ptr != 0x0a) { s->ptr++; continue; } /* convert from the network encoding */ result = Curl_convert_from_network(data, s->line_start, (size_t)s->perline); /* Curl_convert_from_network calls failf if unsuccessful */ if(result) return result; /* output debug if that is requested */ if(data->set.verbose) Curl_debug(data, CURLINFO_HEADER_IN, s->line_start, (size_t)s->perline); if(!data->set.suppress_connect_headers) { /* send the header to the callback */ int writetype = CLIENTWRITE_HEADER; if(data->set.include_header) writetype |= CLIENTWRITE_BODY; result = Curl_client_write(conn, writetype, s->line_start, s->perline); if(result) return result; } data->info.header_size += (long)s->perline; data->req.headerbytecount += (long)s->perline; /* Newlines are CRLF, so the CR is ignored as the line isn't really terminated until the LF comes. Treat a following CR as end-of-headers as well.*/ if(('\r' == s->line_start[0]) || ('\n' == s->line_start[0])) { /* end of response-headers from the proxy */ s->ptr = s->connect_buffer; if((407 == k->httpcode) && !data->state.authproblem) { /* If we get a 407 response code with content length when we have no auth problem, we must ignore the whole response-body */ s->keepon = 2; if(s->cl) { infof(data, "Ignore %" CURL_FORMAT_CURL_OFF_T " bytes of response-body\n", s->cl); } else if(s->chunked_encoding) { CHUNKcode r; infof(data, "Ignore chunked response-body\n"); /* We set ignorebody true here since the chunked decoder function will acknowledge that. Pay attention so that this is cleared again when this function returns! */ k->ignorebody = TRUE; if(s->line_start[1] == '\n') { /* this can only be a LF if the letter at index 0 was a CR */ s->line_start++; } /* now parse the chunked piece of data so that we can properly tell when the stream ends */ r = Curl_httpchunk_read(conn, s->line_start + 1, 1, &gotbytes); if(r == CHUNKE_STOP) { /* we're done reading chunks! */ infof(data, "chunk reading DONE\n"); s->keepon = FALSE; /* we did the full CONNECT treatment, go to COMPLETE */ s->tunnel_state = TUNNEL_COMPLETE; } } else { /* without content-length or chunked encoding, we can't keep the connection alive since the close is the end signal so we bail out at once instead */ s->keepon = FALSE; } } else s->keepon = FALSE; if(!s->cl) /* we did the full CONNECT treatment, go to COMPLETE */ s->tunnel_state = TUNNEL_COMPLETE; continue; } s->line_start[s->perline] = 0; /* zero terminate the buffer */ if((checkprefix("WWW-Authenticate:", s->line_start) && (401 == k->httpcode)) || (checkprefix("Proxy-authenticate:", s->line_start) && (407 == k->httpcode))) { bool proxy = (k->httpcode == 407) ? TRUE : FALSE; char *auth = Curl_copy_header_value(s->line_start); if(!auth) return CURLE_OUT_OF_MEMORY; result = Curl_http_input_auth(conn, proxy, auth); free(auth); if(result) return result; } else if(checkprefix("Content-Length:", s->line_start)) { if(k->httpcode/100 == 2) { /* A client MUST ignore any Content-Length or Transfer-Encoding header fields received in a successful response to CONNECT. "Successful" described as: 2xx (Successful). RFC 7231 4.3.6 */ infof(data, "Ignoring Content-Length in CONNECT %03d response\n", k->httpcode); } else { (void)curlx_strtoofft(s->line_start + strlen("Content-Length:"), NULL, 10, &s->cl); } } else if(Curl_compareheader(s->line_start, "Connection:", "close")) s->close_connection = TRUE; else if(checkprefix("Transfer-Encoding:", s->line_start)) { if(k->httpcode/100 == 2) { /* A client MUST ignore any Content-Length or Transfer-Encoding header fields received in a successful response to CONNECT. "Successful" described as: 2xx (Successful). RFC 7231 4.3.6 */ infof(data, "Ignoring Transfer-Encoding in " "CONNECT %03d response\n", k->httpcode); } else if(Curl_compareheader(s->line_start, "Transfer-Encoding:", "chunked")) { infof(data, "CONNECT responded chunked\n"); s->chunked_encoding = TRUE; /* init our chunky engine */ Curl_httpchunk_init(conn); } } else if(Curl_compareheader(s->line_start, "Proxy-Connection:", "close")) s->close_connection = TRUE; else if(2 == sscanf(s->line_start, "HTTP/1.%d %d", &subversion, &k->httpcode)) { /* store the HTTP code from the proxy */ data->info.httpproxycode = k->httpcode; } s->perline = 0; /* line starts over here */ s->ptr = s->connect_buffer; s->line_start = s->ptr; } /* while there's buffer left and loop is requested */ if(Curl_pgrsUpdate(conn)) return CURLE_ABORTED_BY_CALLBACK; if(error) return CURLE_RECV_ERROR; if(data->info.httpproxycode/100 != 2) { /* Deal with the possibly already received authenticate headers. 'newurl' is set to a new URL if we must loop. */ result = Curl_http_auth_act(conn); if(result) return result; if(conn->bits.close) /* the connection has been marked for closure, most likely in the Curl_http_auth_act() function and thus we can kill it at once below */ s->close_connection = TRUE; } if(s->close_connection && data->req.newurl) { /* Connection closed by server. Don't use it anymore */ Curl_closesocket(conn, conn->sock[sockindex]); conn->sock[sockindex] = CURL_SOCKET_BAD; break; } } /* END READING RESPONSE PHASE */ /* If we are supposed to continue and request a new URL, which basically * means the HTTP authentication is still going on so if the tunnel * is complete we start over in INIT state */ if(data->req.newurl && (TUNNEL_COMPLETE == s->tunnel_state)) { connect_init(conn, TRUE); /* reinit */ } } while(data->req.newurl); if(data->info.httpproxycode/100 != 2) { if(s->close_connection && data->req.newurl) { conn->bits.proxy_connect_closed = TRUE; infof(data, "Connect me again please\n"); connect_done(conn); } else { free(data->req.newurl); data->req.newurl = NULL; /* failure, close this connection to avoid re-use */ streamclose(conn, "proxy CONNECT failure"); Curl_closesocket(conn, conn->sock[sockindex]); conn->sock[sockindex] = CURL_SOCKET_BAD; } /* to back to init state */ s->tunnel_state = TUNNEL_INIT; if(conn->bits.proxy_connect_closed) /* this is not an error, just part of the connection negotiation */ return CURLE_OK; failf(data, "Received HTTP code %d from proxy after CONNECT", data->req.httpcode); return CURLE_RECV_ERROR; } s->tunnel_state = TUNNEL_COMPLETE; /* If a proxy-authorization header was used for the proxy, then we should make sure that it isn't accidentally used for the document request after we've connected. So let's free and clear it here. */ Curl_safefree(conn->allocptr.proxyuserpwd); conn->allocptr.proxyuserpwd = NULL; data->state.authproxy.done = TRUE; infof(data, "Proxy replied %d to CONNECT request\n", data->info.httpproxycode); data->req.ignorebody = FALSE; /* put it (back) to non-ignore state */ conn->bits.rewindaftersend = FALSE; /* make sure this isn't set for the document request */ return CURLE_OK; }
/* client_write() sends data to the write callback(s) The bit pattern defines to what "streams" to write to. Body and/or header. The defines are in sendf.h of course. */ CURLcode Curl_client_write(struct connectdata *conn, int type, char *ptr, size_t len) { struct SessionHandle *data = conn->data; size_t wrote; if (data->state.cancelled) { /* We just suck everything into a black hole */ return CURLE_OK; } if(0 == len) len = strlen(ptr); if(type & CLIENTWRITE_BODY) { if((conn->protocol&PROT_FTP) && conn->proto.ftpc.transfertype == 'A') { #ifdef CURL_DOES_CONVERSIONS /* convert from the network encoding */ size_t rc; rc = Curl_convert_from_network(data, ptr, len); /* Curl_convert_from_network calls failf if unsuccessful */ if(rc != CURLE_OK) return rc; #endif /* CURL_DOES_CONVERSIONS */ #ifdef CURL_DO_LINEEND_CONV /* convert end-of-line markers */ len = convert_lineends(data, ptr, len); #endif /* CURL_DO_LINEEND_CONV */ } /* If the previous block of data ended with CR and this block of data is just a NL, then the length might be zero */ if (len) { wrote = data->set.fwrite(ptr, 1, len, data->set.out); } else { wrote = len; } if(wrote != len) { failf (data, "Failed writing body"); return CURLE_WRITE_ERROR; } } if((type & CLIENTWRITE_HEADER) && (data->set.fwrite_header || data->set.writeheader) ) { /* * Write headers to the same callback or to the especially setup * header callback function (added after version 7.7.1). */ curl_write_callback writeit= data->set.fwrite_header?data->set.fwrite_header:data->set.fwrite; /* Note: The header is in the host encoding regardless of the ftp transfer mode (ASCII/Image) */ wrote = writeit(ptr, 1, len, data->set.writeheader); if(wrote != len) { failf (data, "Failed writing header"); return CURLE_WRITE_ERROR; } } return CURLE_OK; }
/* * chunk_read() returns a OK for normal operations, or a positive return code * for errors. STOP means this sequence of chunks is complete. The 'wrote' * argument is set to tell the caller how many bytes we actually passed to the * client (for byte-counting and whatever). * * The states and the state-machine is further explained in the header file. * * This function always uses ASCII hex values to accommodate non-ASCII hosts. * For example, 0x0d and 0x0a are used instead of '\r' and '\n'. */ CHUNKcode Curl_httpchunk_read(struct connectdata *conn, char *datap, ssize_t datalen, ssize_t *wrotep) { CURLcode result=CURLE_OK; struct SessionHandle *data = conn->data; struct Curl_chunker *ch = &data->reqdata.proto.http->chunk; struct Curl_transfer_keeper *k = &data->reqdata.keep; size_t piece; size_t length = (size_t)datalen; size_t *wrote = (size_t *)wrotep; *wrote = 0; /* nothing's written yet */ /* the original data is written to the client, but we go on with the chunk read process, to properly calculate the content length*/ if (data->set.http_te_skip && !k->ignorebody) Curl_client_write(conn, CLIENTWRITE_BODY, datap,datalen); while(length) { switch(ch->state) { case CHUNK_HEX: /* Check for an ASCII hex digit. We avoid the use of isxdigit to accommodate non-ASCII hosts. */ if((*datap >= 0x30 && *datap <= 0x39) /* 0-9 */ || (*datap >= 0x41 && *datap <= 0x46) /* A-F */ || (*datap >= 0x61 && *datap <= 0x66)) { /* a-f */ if(ch->hexindex < MAXNUM_SIZE) { ch->hexbuffer[ch->hexindex] = *datap; datap++; length--; ch->hexindex++; } else { return CHUNKE_TOO_LONG_HEX; /* longer hex than we support */ } } else { if(0 == ch->hexindex) { /* This is illegal data, we received junk where we expected a hexadecimal digit. */ return CHUNKE_ILLEGAL_HEX; } /* length and datap are unmodified */ ch->hexbuffer[ch->hexindex]=0; #ifdef CURL_DOES_CONVERSIONS /* convert to host encoding before calling strtoul */ result = Curl_convert_from_network(conn->data, ch->hexbuffer, ch->hexindex); if(result != CURLE_OK) { /* Curl_convert_from_network calls failf if unsuccessful */ /* Treat it as a bad hex character */ return(CHUNKE_ILLEGAL_HEX); } #endif /* CURL_DOES_CONVERSIONS */ ch->datasize=strtoul(ch->hexbuffer, NULL, 16); ch->state = CHUNK_POSTHEX; } break; case CHUNK_POSTHEX: /* In this state, we're waiting for CRLF to arrive. We support this to allow so called chunk-extensions to show up here before the CRLF comes. */ if(*datap == 0x0d) ch->state = CHUNK_CR; length--; datap++; break; case CHUNK_CR: /* waiting for the LF */ if(*datap == 0x0a) { /* we're now expecting data to come, unless size was zero! */ if(0 == ch->datasize) { if (conn->bits.trailerHdrPresent!=TRUE) { /* No Trailer: header found - revert to original Curl processing */ ch->state = CHUNK_STOPCR; /* We need to increment the datap here since we bypass the increment below with the immediate break */ length--; datap++; /* This is the final byte, continue to read the final CRLF */ break; } else { ch->state = CHUNK_TRAILER; /* attempt to read trailers */ conn->trlPos=0; } } else { ch->state = CHUNK_DATA; } } else /* previously we got a fake CR, go back to CR waiting! */ ch->state = CHUNK_CR; datap++; length--; break; case CHUNK_DATA: /* we get pure and fine data We expect another 'datasize' of data. We have 'length' right now, it can be more or less than 'datasize'. Get the smallest piece. */ piece = (ch->datasize >= length)?length:ch->datasize; /* Write the data portion available */ #ifdef HAVE_LIBZ switch (conn->data->set.http_ce_skip? IDENTITY : data->reqdata.keep.content_encoding) { case IDENTITY: #endif if(!k->ignorebody) { if ( !data->set.http_te_skip ) result = Curl_client_write(conn, CLIENTWRITE_BODY, datap, piece); else result = CURLE_OK; } #ifdef HAVE_LIBZ break; case DEFLATE: /* update data->reqdata.keep.str to point to the chunk data. */ data->reqdata.keep.str = datap; result = Curl_unencode_deflate_write(conn, &data->reqdata.keep, (ssize_t)piece); break; case GZIP: /* update data->reqdata.keep.str to point to the chunk data. */ data->reqdata.keep.str = datap; result = Curl_unencode_gzip_write(conn, &data->reqdata.keep, (ssize_t)piece); break; case COMPRESS: default: failf (conn->data, "Unrecognized content encoding type. " "libcurl understands `identity', `deflate' and `gzip' " "content encodings."); return CHUNKE_BAD_ENCODING; } #endif if(result) return CHUNKE_WRITE_ERROR; *wrote += piece; ch->datasize -= piece; /* decrease amount left to expect */ datap += piece; /* move read pointer forward */ length -= piece; /* decrease space left in this round */ if(0 == ch->datasize) /* end of data this round, we now expect a trailing CRLF */ ch->state = CHUNK_POSTCR; break; case CHUNK_POSTCR: if(*datap == 0x0d) { ch->state = CHUNK_POSTLF; datap++; length--; } else { return CHUNKE_BAD_CHUNK; } break; case CHUNK_POSTLF: if(*datap == 0x0a) { /* * The last one before we go back to hex state and start all * over. */ Curl_httpchunk_init(conn); datap++; length--; } else { return CHUNKE_BAD_CHUNK; } break; case CHUNK_TRAILER: /* conn->trailer is assumed to be freed in url.c on a connection basis */ if (conn->trlPos >= conn->trlMax) { char *ptr; if(conn->trlMax) { conn->trlMax *= 2; ptr = (char*)realloc(conn->trailer,conn->trlMax); } else { conn->trlMax=128; ptr = (char*)malloc(conn->trlMax); } if(!ptr) return CHUNKE_OUT_OF_MEMORY; conn->trailer = ptr; } conn->trailer[conn->trlPos++]=*datap; if(*datap == 0x0d) ch->state = CHUNK_TRAILER_CR; else { datap++; length--; } break; case CHUNK_TRAILER_CR: if(*datap == 0x0d) { ch->state = CHUNK_TRAILER_POSTCR; datap++; length--; } else return CHUNKE_BAD_CHUNK; break; case CHUNK_TRAILER_POSTCR: if (*datap == 0x0a) { conn->trailer[conn->trlPos++]=0x0a; conn->trailer[conn->trlPos]=0; if (conn->trlPos==2) { ch->state = CHUNK_STOP; datap++; length--; /* * Note that this case skips over the final STOP states since we've * already read the final CRLF and need to return */ ch->dataleft = length; return CHUNKE_STOP; /* return stop */ } else { #ifdef CURL_DOES_CONVERSIONS /* Convert to host encoding before calling Curl_client_write */ result = Curl_convert_from_network(conn->data, conn->trailer, conn->trlPos); if(result != CURLE_OK) { /* Curl_convert_from_network calls failf if unsuccessful */ /* Treat it as a bad chunk */ return(CHUNKE_BAD_CHUNK); } #endif /* CURL_DOES_CONVERSIONS */ if ( !data->set.http_te_skip ) Curl_client_write(conn, CLIENTWRITE_HEADER, conn->trailer, conn->trlPos); } ch->state = CHUNK_TRAILER; conn->trlPos=0; datap++; length--; } else return CHUNKE_BAD_CHUNK; break; case CHUNK_STOPCR: /* Read the final CRLF that ends all chunk bodies */ if(*datap == 0x0d) { ch->state = CHUNK_STOP; datap++; length--; } else { return CHUNKE_BAD_CHUNK; } break; case CHUNK_STOP: if (*datap == 0x0a) { datap++; length--; /* Record the length of any data left in the end of the buffer even if there's no more chunks to read */ ch->dataleft = length; return CHUNKE_STOP; /* return stop */ } else { return CHUNKE_BAD_CHUNK; } default: return CHUNKE_STATE_ERROR; } } return CHUNKE_OK; }
/* return 0 on success */ static int showit(struct Curl_easy *data, curl_infotype type, char *ptr, size_t size) { static const char s_infotype[CURLINFO_END][3] = { "* ", "< ", "> ", "{ ", "} ", "{ ", "} " }; int rc = 0; #ifdef CURL_DOES_CONVERSIONS char *buf = NULL; size_t conv_size = 0; switch(type) { case CURLINFO_HEADER_OUT: buf = Curl_memdup(ptr, size); if(!buf) return 1; conv_size = size; /* Special processing is needed for this block if it * contains both headers and data (separated by CRLFCRLF). * We want to convert just the headers, leaving the data as-is. */ if(size > 4) { size_t i; for(i = 0; i < size-4; i++) { if(memcmp(&buf[i], "\x0d\x0a\x0d\x0a", 4) == 0) { /* convert everything through this CRLFCRLF but no further */ conv_size = i + 4; break; } } } Curl_convert_from_network(data, buf, conv_size); /* Curl_convert_from_network calls failf if unsuccessful */ /* we might as well continue even if it fails... */ ptr = buf; /* switch pointer to use my buffer instead */ break; default: /* leave everything else as-is */ break; } #endif /* CURL_DOES_CONVERSIONS */ if(data->set.fdebug) { Curl_set_in_callback(data, true); rc = (*data->set.fdebug)(data, type, ptr, size, data->set.debugdata); Curl_set_in_callback(data, false); } else { switch(type) { case CURLINFO_TEXT: case CURLINFO_HEADER_OUT: case CURLINFO_HEADER_IN: fwrite(s_infotype[type], 2, 1, data->set.err); fwrite(ptr, size, 1, data->set.err); #ifdef CURL_DOES_CONVERSIONS if(size != conv_size) { /* we had untranslated data so we need an explicit newline */ fwrite("\n", 1, 1, data->set.err); } #endif break; default: /* nada */ break; } } #ifdef CURL_DOES_CONVERSIONS free(buf); #endif return rc; }