1/***************************************************************************
2 * _ _ ____ _
3 * Project ___| | | | _ \| |
4 * / __| | | | |_) | |
5 * | (__| |_| | _ <| |___
6 * \___|\___/|_| \_\_____|
7 *
8 * Copyright (C) 1998 - 2022, Daniel Stenberg, <[email protected]>, et al.
9 *
10 * This software is licensed as described in the file COPYING, which
11 * you should have received as part of this distribution. The terms
12 * are also available at https://curl.se/docs/copyright.html.
13 *
14 * You may opt to use, copy, modify, merge, publish, distribute and/or sell
15 * copies of the Software, and permit persons to whom the Software is
16 * furnished to do so, under the terms of the COPYING file.
17 *
18 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
19 * KIND, either express or implied.
20 *
21 * SPDX-License-Identifier: curl
22 *
23 ***************************************************************************/
24
25#include "curl_setup.h"
26#include "strtoofft.h"
27
28#ifdef HAVE_NETINET_IN_H
29#include <netinet/in.h>
30#endif
31#ifdef HAVE_NETDB_H
32#include <netdb.h>
33#endif
34#ifdef HAVE_ARPA_INET_H
35#include <arpa/inet.h>
36#endif
37#ifdef HAVE_NET_IF_H
38#include <net/if.h>
39#endif
40#ifdef HAVE_SYS_IOCTL_H
41#include <sys/ioctl.h>
42#endif
43#ifdef HAVE_SIGNAL_H
44#include <signal.h>
45#endif
46
47#ifdef HAVE_SYS_PARAM_H
48#include <sys/param.h>
49#endif
50
51#ifdef HAVE_SYS_SELECT_H
52#include <sys/select.h>
53#elif defined(HAVE_UNISTD_H)
54#include <unistd.h>
55#endif
56
57#ifndef HAVE_SOCKET
58#error "We can't compile without socket() support!"
59#endif
60
61#include "urldata.h"
62#include <curl/curl.h>
63#include "netrc.h"
64
65#include "content_encoding.h"
66#include "hostip.h"
67#include "transfer.h"
68#include "sendf.h"
69#include "speedcheck.h"
70#include "progress.h"
71#include "http.h"
72#include "url.h"
73#include "getinfo.h"
74#include "vtls/vtls.h"
75#include "select.h"
76#include "multiif.h"
77#include "connect.h"
78#include "http2.h"
79#include "mime.h"
80#include "strcase.h"
81#include "urlapi-int.h"
82#include "hsts.h"
83#include "setopt.h"
84#include "headers.h"
85
86/* The last 3 #include files should be in this order */
87#include "curl_printf.h"
88#include "curl_memory.h"
89#include "memdebug.h"
90
91#if !defined(CURL_DISABLE_HTTP) || !defined(CURL_DISABLE_SMTP) || \
92 !defined(CURL_DISABLE_IMAP)
93/*
94 * checkheaders() checks the linked list of custom headers for a
95 * particular header (prefix). Provide the prefix without colon!
96 *
97 * Returns a pointer to the first matching header or NULL if none matched.
98 */
99char *Curl_checkheaders(const struct Curl_easy *data,
100 const char *thisheader,
101 const size_t thislen)
102{
103 struct curl_slist *head;
104 DEBUGASSERT(thislen);
105 DEBUGASSERT(thisheader[thislen-1] != ':');
106
107 for(head = data->set.headers; head; head = head->next) {
108 if(strncasecompare(head->data, thisheader, thislen) &&
109 Curl_headersep(head->data[thislen]) )
110 return head->data;
111 }
112
113 return NULL;
114}
115#endif
116
117CURLcode Curl_get_upload_buffer(struct Curl_easy *data)
118{
119 if(!data->state.ulbuf) {
120 data->state.ulbuf = malloc(data->set.upload_buffer_size);
121 if(!data->state.ulbuf)
122 return CURLE_OUT_OF_MEMORY;
123 }
124 return CURLE_OK;
125}
126
127#ifndef CURL_DISABLE_HTTP
128/*
129 * This function will be called to loop through the trailers buffer
130 * until no more data is available for sending.
131 */
132static size_t trailers_read(char *buffer, size_t size, size_t nitems,
133 void *raw)
134{
135 struct Curl_easy *data = (struct Curl_easy *)raw;
136 struct dynbuf *trailers_buf = &data->state.trailers_buf;
137 size_t bytes_left = Curl_dyn_len(trailers_buf) -
138 data->state.trailers_bytes_sent;
139 size_t to_copy = (size*nitems < bytes_left) ? size*nitems : bytes_left;
140 if(to_copy) {
141 memcpy(buffer,
142 Curl_dyn_ptr(trailers_buf) + data->state.trailers_bytes_sent,
143 to_copy);
144 data->state.trailers_bytes_sent += to_copy;
145 }
146 return to_copy;
147}
148
149static size_t trailers_left(void *raw)
150{
151 struct Curl_easy *data = (struct Curl_easy *)raw;
152 struct dynbuf *trailers_buf = &data->state.trailers_buf;
153 return Curl_dyn_len(trailers_buf) - data->state.trailers_bytes_sent;
154}
155#endif
156
157/*
158 * This function will call the read callback to fill our buffer with data
159 * to upload.
160 */
161CURLcode Curl_fillreadbuffer(struct Curl_easy *data, size_t bytes,
162 size_t *nreadp)
163{
164 size_t buffersize = bytes;
165 size_t nread;
166
167 curl_read_callback readfunc = NULL;
168 void *extra_data = NULL;
169
170#ifndef CURL_DISABLE_HTTP
171 if(data->state.trailers_state == TRAILERS_INITIALIZED) {
172 struct curl_slist *trailers = NULL;
173 CURLcode result;
174 int trailers_ret_code;
175
176 /* at this point we already verified that the callback exists
177 so we compile and store the trailers buffer, then proceed */
178 infof(data,
179 "Moving trailers state machine from initialized to sending.");
180 data->state.trailers_state = TRAILERS_SENDING;
181 Curl_dyn_init(&data->state.trailers_buf, DYN_TRAILERS);
182
183 data->state.trailers_bytes_sent = 0;
184 Curl_set_in_callback(data, true);
185 trailers_ret_code = data->set.trailer_callback(&trailers,
186 data->set.trailer_data);
187 Curl_set_in_callback(data, false);
188 if(trailers_ret_code == CURL_TRAILERFUNC_OK) {
189 result = Curl_http_compile_trailers(trailers, &data->state.trailers_buf,
190 data);
191 }
192 else {
193 failf(data, "operation aborted by trailing headers callback");
194 *nreadp = 0;
195 result = CURLE_ABORTED_BY_CALLBACK;
196 }
197 if(result) {
198 Curl_dyn_free(&data->state.trailers_buf);
199 curl_slist_free_all(trailers);
200 return result;
201 }
202 infof(data, "Successfully compiled trailers.");
203 curl_slist_free_all(trailers);
204 }
205#endif
206
207#ifndef CURL_DISABLE_HTTP
208 /* if we are transmitting trailing data, we don't need to write
209 a chunk size so we skip this */
210 if(data->req.upload_chunky &&
211 data->state.trailers_state == TRAILERS_NONE) {
212 /* if chunked Transfer-Encoding */
213 buffersize -= (8 + 2 + 2); /* 32bit hex + CRLF + CRLF */
214 data->req.upload_fromhere += (8 + 2); /* 32bit hex + CRLF */
215 }
216
217 if(data->state.trailers_state == TRAILERS_SENDING) {
218 /* if we're here then that means that we already sent the last empty chunk
219 but we didn't send a final CR LF, so we sent 0 CR LF. We then start
220 pulling trailing data until we have no more at which point we
221 simply return to the previous point in the state machine as if
222 nothing happened.
223 */
224 readfunc = trailers_read;
225 extra_data = (void *)data;
226 }
227 else
228#endif
229 {
230 readfunc = data->state.fread_func;
231 extra_data = data->state.in;
232 }
233
234 Curl_set_in_callback(data, true);
235 nread = readfunc(data->req.upload_fromhere, 1,
236 buffersize, extra_data);
237 Curl_set_in_callback(data, false);
238
239 if(nread == CURL_READFUNC_ABORT) {
240 failf(data, "operation aborted by callback");
241 *nreadp = 0;
242 return CURLE_ABORTED_BY_CALLBACK;
243 }
244 if(nread == CURL_READFUNC_PAUSE) {
245 struct SingleRequest *k = &data->req;
246
247 if(data->conn->handler->flags & PROTOPT_NONETWORK) {
248 /* protocols that work without network cannot be paused. This is
249 actually only FILE:// just now, and it can't pause since the transfer
250 isn't done using the "normal" procedure. */
251 failf(data, "Read callback asked for PAUSE when not supported");
252 return CURLE_READ_ERROR;
253 }
254
255 /* CURL_READFUNC_PAUSE pauses read callbacks that feed socket writes */
256 k->keepon |= KEEP_SEND_PAUSE; /* mark socket send as paused */
257 if(data->req.upload_chunky) {
258 /* Back out the preallocation done above */
259 data->req.upload_fromhere -= (8 + 2);
260 }
261 *nreadp = 0;
262
263 return CURLE_OK; /* nothing was read */
264 }
265 else if(nread > buffersize) {
266 /* the read function returned a too large value */
267 *nreadp = 0;
268 failf(data, "read function returned funny value");
269 return CURLE_READ_ERROR;
270 }
271
272#ifndef CURL_DISABLE_HTTP
273 if(!data->req.forbidchunk && data->req.upload_chunky) {
274 /* if chunked Transfer-Encoding
275 * build chunk:
276 *
277 * <HEX SIZE> CRLF
278 * <DATA> CRLF
279 */
280 /* On non-ASCII platforms the <DATA> may or may not be
281 translated based on state.prefer_ascii while the protocol
282 portion must always be translated to the network encoding.
283 To further complicate matters, line end conversion might be
284 done later on, so we need to prevent CRLFs from becoming
285 CRCRLFs if that's the case. To do this we use bare LFs
286 here, knowing they'll become CRLFs later on.
287 */
288
289 bool added_crlf = FALSE;
290 int hexlen = 0;
291 const char *endofline_native;
292 const char *endofline_network;
293
294 if(
295#ifdef CURL_DO_LINEEND_CONV
296 (data->state.prefer_ascii) ||
297#endif
298 (data->set.crlf)) {
299 /* \n will become \r\n later on */
300 endofline_native = "\n";
301 endofline_network = "\x0a";
302 }
303 else {
304 endofline_native = "\r\n";
305 endofline_network = "\x0d\x0a";
306 }
307
308 /* if we're not handling trailing data, proceed as usual */
309 if(data->state.trailers_state != TRAILERS_SENDING) {
310 char hexbuffer[11] = "";
311 hexlen = msnprintf(hexbuffer, sizeof(hexbuffer),
312 "%zx%s", nread, endofline_native);
313
314 /* move buffer pointer */
315 data->req.upload_fromhere -= hexlen;
316 nread += hexlen;
317
318 /* copy the prefix to the buffer, leaving out the NUL */
319 memcpy(data->req.upload_fromhere, hexbuffer, hexlen);
320
321 /* always append ASCII CRLF to the data unless
322 we have a valid trailer callback */
323 if((nread-hexlen) == 0 &&
324 data->set.trailer_callback != NULL &&
325 data->state.trailers_state == TRAILERS_NONE) {
326 data->state.trailers_state = TRAILERS_INITIALIZED;
327 }
328 else {
329 memcpy(data->req.upload_fromhere + nread,
330 endofline_network,
331 strlen(endofline_network));
332 added_crlf = TRUE;
333 }
334 }
335
336 if(data->state.trailers_state == TRAILERS_SENDING &&
337 !trailers_left(data)) {
338 Curl_dyn_free(&data->state.trailers_buf);
339 data->state.trailers_state = TRAILERS_DONE;
340 data->set.trailer_data = NULL;
341 data->set.trailer_callback = NULL;
342 /* mark the transfer as done */
343 data->req.upload_done = TRUE;
344 infof(data, "Signaling end of chunked upload after trailers.");
345 }
346 else
347 if((nread - hexlen) == 0 &&
348 data->state.trailers_state != TRAILERS_INITIALIZED) {
349 /* mark this as done once this chunk is transferred */
350 data->req.upload_done = TRUE;
351 infof(data,
352 "Signaling end of chunked upload via terminating chunk.");
353 }
354
355 if(added_crlf)
356 nread += strlen(endofline_network); /* for the added end of line */
357 }
358#endif
359
360 *nreadp = nread;
361
362 return CURLE_OK;
363}
364
365
366/*
367 * Curl_readrewind() rewinds the read stream. This is typically used for HTTP
368 * POST/PUT with multi-pass authentication when a sending was denied and a
369 * resend is necessary.
370 */
371CURLcode Curl_readrewind(struct Curl_easy *data)
372{
373 struct connectdata *conn = data->conn;
374 curl_mimepart *mimepart = &data->set.mimepost;
375
376 conn->bits.rewindaftersend = FALSE; /* we rewind now */
377
378 /* explicitly switch off sending data on this connection now since we are
379 about to restart a new transfer and thus we want to avoid inadvertently
380 sending more data on the existing connection until the next transfer
381 starts */
382 data->req.keepon &= ~KEEP_SEND;
383
384 /* We have sent away data. If not using CURLOPT_POSTFIELDS or
385 CURLOPT_HTTPPOST, call app to rewind
386 */
387 if(conn->handler->protocol & PROTO_FAMILY_HTTP) {
388 struct HTTP *http = data->req.p.http;
389
390 if(http->sendit)
391 mimepart = http->sendit;
392 }
393 if(data->set.postfields)
394 ; /* do nothing */
395 else if(data->state.httpreq == HTTPREQ_POST_MIME ||
396 data->state.httpreq == HTTPREQ_POST_FORM) {
397 CURLcode result = Curl_mime_rewind(mimepart);
398 if(result) {
399 failf(data, "Cannot rewind mime/post data");
400 return result;
401 }
402 }
403 else {
404 if(data->set.seek_func) {
405 int err;
406
407 Curl_set_in_callback(data, true);
408 err = (data->set.seek_func)(data->set.seek_client, 0, SEEK_SET);
409 Curl_set_in_callback(data, false);
410 if(err) {
411 failf(data, "seek callback returned error %d", (int)err);
412 return CURLE_SEND_FAIL_REWIND;
413 }
414 }
415 else if(data->set.ioctl_func) {
416 curlioerr err;
417
418 Curl_set_in_callback(data, true);
419 err = (data->set.ioctl_func)(data, CURLIOCMD_RESTARTREAD,
420 data->set.ioctl_client);
421 Curl_set_in_callback(data, false);
422 infof(data, "the ioctl callback returned %d", (int)err);
423
424 if(err) {
425 failf(data, "ioctl callback returned error %d", (int)err);
426 return CURLE_SEND_FAIL_REWIND;
427 }
428 }
429 else {
430 /* If no CURLOPT_READFUNCTION is used, we know that we operate on a
431 given FILE * stream and we can actually attempt to rewind that
432 ourselves with fseek() */
433 if(data->state.fread_func == (curl_read_callback)fread) {
434 if(-1 != fseek(data->state.in, 0, SEEK_SET))
435 /* successful rewind */
436 return CURLE_OK;
437 }
438
439 /* no callback set or failure above, makes us fail at once */
440 failf(data, "necessary data rewind wasn't possible");
441 return CURLE_SEND_FAIL_REWIND;
442 }
443 }
444 return CURLE_OK;
445}
446
447static int data_pending(const struct Curl_easy *data)
448{
449 struct connectdata *conn = data->conn;
450
451#ifdef ENABLE_QUIC
452 if(conn->transport == TRNSPRT_QUIC)
453 return Curl_quic_data_pending(data);
454#endif
455
456 if(conn->handler->protocol&PROTO_FAMILY_FTP)
457 return Curl_ssl_data_pending(conn, SECONDARYSOCKET);
458
459 /* in the case of libssh2, we can never be really sure that we have emptied
460 its internal buffers so we MUST always try until we get EAGAIN back */
461 return conn->handler->protocol&(CURLPROTO_SCP|CURLPROTO_SFTP) ||
462#ifdef USE_NGHTTP2
463 /* For HTTP/2, we may read up everything including response body
464 with header fields in Curl_http_readwrite_headers. If no
465 content-length is provided, curl waits for the connection
466 close, which we emulate it using conn->proto.httpc.closed =
467 TRUE. The thing is if we read everything, then http2_recv won't
468 be called and we cannot signal the HTTP/2 stream has closed. As
469 a workaround, we return nonzero here to call http2_recv. */
470 ((conn->handler->protocol&PROTO_FAMILY_HTTP) && conn->httpversion >= 20) ||
471#endif
472 Curl_ssl_data_pending(conn, FIRSTSOCKET);
473}
474
475/*
476 * Check to see if CURLOPT_TIMECONDITION was met by comparing the time of the
477 * remote document with the time provided by CURLOPT_TIMEVAL
478 */
479bool Curl_meets_timecondition(struct Curl_easy *data, time_t timeofdoc)
480{
481 if((timeofdoc == 0) || (data->set.timevalue == 0))
482 return TRUE;
483
484 switch(data->set.timecondition) {
485 case CURL_TIMECOND_IFMODSINCE:
486 default:
487 if(timeofdoc <= data->set.timevalue) {
488 infof(data,
489 "The requested document is not new enough");
490 data->info.timecond = TRUE;
491 return FALSE;
492 }
493 break;
494 case CURL_TIMECOND_IFUNMODSINCE:
495 if(timeofdoc >= data->set.timevalue) {
496 infof(data,
497 "The requested document is not old enough");
498 data->info.timecond = TRUE;
499 return FALSE;
500 }
501 break;
502 }
503
504 return TRUE;
505}
506
507/*
508 * Go ahead and do a read if we have a readable socket or if
509 * the stream was rewound (in which case we have data in a
510 * buffer)
511 *
512 * return '*comeback' TRUE if we didn't properly drain the socket so this
513 * function should get called again without select() or similar in between!
514 */
515static CURLcode readwrite_data(struct Curl_easy *data,
516 struct connectdata *conn,
517 struct SingleRequest *k,
518 int *didwhat, bool *done,
519 bool *comeback)
520{
521 CURLcode result = CURLE_OK;
522 ssize_t nread; /* number of bytes read */
523 size_t excess = 0; /* excess bytes read */
524 bool readmore = FALSE; /* used by RTP to signal for more data */
525 int maxloops = 100;
526 char *buf = data->state.buffer;
527 DEBUGASSERT(buf);
528
529 *done = FALSE;
530 *comeback = FALSE;
531
532 /* This is where we loop until we have read everything there is to
533 read or we get a CURLE_AGAIN */
534 do {
535 bool is_empty_data = FALSE;
536 size_t buffersize = data->set.buffer_size;
537 size_t bytestoread = buffersize;
538#ifdef USE_NGHTTP2
539 bool is_http2 = ((conn->handler->protocol & PROTO_FAMILY_HTTP) &&
540 (conn->httpversion == 20));
541#endif
542 bool is_http3 =
543#ifdef ENABLE_QUIC
544 ((conn->handler->protocol & PROTO_FAMILY_HTTP) &&
545 (conn->httpversion == 30));
546#else
547 FALSE;
548#endif
549
550 if(
551#ifdef USE_NGHTTP2
552 /* For HTTP/2, read data without caring about the content length. This
553 is safe because body in HTTP/2 is always segmented thanks to its
554 framing layer. Meanwhile, we have to call Curl_read to ensure that
555 http2_handle_stream_close is called when we read all incoming bytes
556 for a particular stream. */
557 !is_http2 &&
558#endif
559 !is_http3 && /* Same reason mentioned above. */
560 k->size != -1 && !k->header) {
561 /* make sure we don't read too much */
562 curl_off_t totalleft = k->size - k->bytecount;
563 if(totalleft < (curl_off_t)bytestoread)
564 bytestoread = (size_t)totalleft;
565 }
566
567 if(bytestoread) {
568 /* receive data from the network! */
569 result = Curl_read(data, conn->sockfd, buf, bytestoread, &nread);
570
571 /* read would've blocked */
572 if(CURLE_AGAIN == result)
573 break; /* get out of loop */
574
575 if(result>0)
576 return result;
577 }
578 else {
579 /* read nothing but since we wanted nothing we consider this an OK
580 situation to proceed from */
581 DEBUGF(infof(data, "readwrite_data: we're done"));
582 nread = 0;
583 }
584
585 if(!k->bytecount) {
586 Curl_pgrsTime(data, TIMER_STARTTRANSFER);
587 if(k->exp100 > EXP100_SEND_DATA)
588 /* set time stamp to compare with when waiting for the 100 */
589 k->start100 = Curl_now();
590 }
591
592 *didwhat |= KEEP_RECV;
593 /* indicates data of zero size, i.e. empty file */
594 is_empty_data = ((nread == 0) && (k->bodywrites == 0)) ? TRUE : FALSE;
595
596 if(0 < nread || is_empty_data) {
597 buf[nread] = 0;
598 }
599 else {
600 /* if we receive 0 or less here, either the http2 stream is closed or the
601 server closed the connection and we bail out from this! */
602#ifdef USE_NGHTTP2
603 if(is_http2 && !nread)
604 DEBUGF(infof(data, "nread == 0, stream closed, bailing"));
605 else
606#endif
607 if(is_http3 && !nread)
608 DEBUGF(infof(data, "nread == 0, stream closed, bailing"));
609 else
610 DEBUGF(infof(data, "nread <= 0, server closed connection, bailing"));
611 k->keepon &= ~KEEP_RECV;
612 break;
613 }
614
615 /* Default buffer to use when we write the buffer, it may be changed
616 in the flow below before the actual storing is done. */
617 k->str = buf;
618
619 if(conn->handler->readwrite) {
620 result = conn->handler->readwrite(data, conn, &nread, &readmore);
621 if(result)
622 return result;
623 if(readmore)
624 break;
625 }
626
627#ifndef CURL_DISABLE_HTTP
628 /* Since this is a two-state thing, we check if we are parsing
629 headers at the moment or not. */
630 if(k->header) {
631 /* we are in parse-the-header-mode */
632 bool stop_reading = FALSE;
633 result = Curl_http_readwrite_headers(data, conn, &nread, &stop_reading);
634 if(result)
635 return result;
636
637 if(conn->handler->readwrite &&
638 (k->maxdownload <= 0 && nread > 0)) {
639 result = conn->handler->readwrite(data, conn, &nread, &readmore);
640 if(result)
641 return result;
642 if(readmore)
643 break;
644 }
645
646 if(stop_reading) {
647 /* We've stopped dealing with input, get out of the do-while loop */
648
649 if(nread > 0) {
650 infof(data,
651 "Excess found:"
652 " excess = %zd"
653 " url = %s (zero-length body)",
654 nread, data->state.up.path);
655 }
656
657 break;
658 }
659 }
660#endif /* CURL_DISABLE_HTTP */
661
662
663 /* This is not an 'else if' since it may be a rest from the header
664 parsing, where the beginning of the buffer is headers and the end
665 is non-headers. */
666 if(!k->header && (nread > 0 || is_empty_data)) {
667
668 if(data->set.opt_no_body) {
669 /* data arrives although we want none, bail out */
670 streamclose(conn, "ignoring body");
671 *done = TRUE;
672 return CURLE_WEIRD_SERVER_REPLY;
673 }
674
675#ifndef CURL_DISABLE_HTTP
676 if(0 == k->bodywrites && !is_empty_data) {
677 /* These checks are only made the first time we are about to
678 write a piece of the body */
679 if(conn->handler->protocol&(PROTO_FAMILY_HTTP|CURLPROTO_RTSP)) {
680 /* HTTP-only checks */
681 result = Curl_http_firstwrite(data, conn, done);
682 if(result || *done)
683 return result;
684 }
685 } /* this is the first time we write a body part */
686#endif /* CURL_DISABLE_HTTP */
687
688 k->bodywrites++;
689
690 /* pass data to the debug function before it gets "dechunked" */
691 if(data->set.verbose) {
692 if(k->badheader) {
693 Curl_debug(data, CURLINFO_DATA_IN,
694 Curl_dyn_ptr(&data->state.headerb),
695 Curl_dyn_len(&data->state.headerb));
696 if(k->badheader == HEADER_PARTHEADER)
697 Curl_debug(data, CURLINFO_DATA_IN,
698 k->str, (size_t)nread);
699 }
700 else
701 Curl_debug(data, CURLINFO_DATA_IN,
702 k->str, (size_t)nread);
703 }
704
705#ifndef CURL_DISABLE_HTTP
706 if(k->chunk) {
707 /*
708 * Here comes a chunked transfer flying and we need to decode this
709 * properly. While the name says read, this function both reads
710 * and writes away the data. The returned 'nread' holds the number
711 * of actual data it wrote to the client.
712 */
713 CURLcode extra;
714 CHUNKcode res =
715 Curl_httpchunk_read(data, k->str, nread, &nread, &extra);
716
717 if(CHUNKE_OK < res) {
718 if(CHUNKE_PASSTHRU_ERROR == res) {
719 failf(data, "Failed reading the chunked-encoded stream");
720 return extra;
721 }
722 failf(data, "%s in chunked-encoding", Curl_chunked_strerror(res));
723 return CURLE_RECV_ERROR;
724 }
725 if(CHUNKE_STOP == res) {
726 /* we're done reading chunks! */
727 k->keepon &= ~KEEP_RECV; /* read no more */
728
729 /* N number of bytes at the end of the str buffer that weren't
730 written to the client. */
731 if(conn->chunk.datasize) {
732 infof(data, "Leftovers after chunking: % "
733 CURL_FORMAT_CURL_OFF_T "u bytes",
734 conn->chunk.datasize);
735 }
736 }
737 /* If it returned OK, we just keep going */
738 }
739#endif /* CURL_DISABLE_HTTP */
740
741 /* Account for body content stored in the header buffer */
742 if((k->badheader == HEADER_PARTHEADER) && !k->ignorebody) {
743 size_t headlen = Curl_dyn_len(&data->state.headerb);
744 DEBUGF(infof(data, "Increasing bytecount by %zu", headlen));
745 k->bytecount += headlen;
746 }
747
748 if((-1 != k->maxdownload) &&
749 (k->bytecount + nread >= k->maxdownload)) {
750
751 excess = (size_t)(k->bytecount + nread - k->maxdownload);
752 if(excess > 0 && !k->ignorebody) {
753 infof(data,
754 "Excess found in a read:"
755 " excess = %zu"
756 ", size = %" CURL_FORMAT_CURL_OFF_T
757 ", maxdownload = %" CURL_FORMAT_CURL_OFF_T
758 ", bytecount = %" CURL_FORMAT_CURL_OFF_T,
759 excess, k->size, k->maxdownload, k->bytecount);
760 connclose(conn, "excess found in a read");
761 }
762
763 nread = (ssize_t) (k->maxdownload - k->bytecount);
764 if(nread < 0) /* this should be unusual */
765 nread = 0;
766
767 /* HTTP/3 over QUIC should keep reading until QUIC connection
768 is closed. In contrast to HTTP/2 which can stop reading
769 from TCP connection, HTTP/3 over QUIC needs ACK from server
770 to ensure stream closure. It should keep reading. */
771 if(!is_http3) {
772 k->keepon &= ~KEEP_RECV; /* we're done reading */
773 }
774 }
775
776 k->bytecount += nread;
777
778 Curl_pgrsSetDownloadCounter(data, k->bytecount);
779
780 if(!k->chunk && (nread || k->badheader || is_empty_data)) {
781 /* If this is chunky transfer, it was already written */
782
783 if(k->badheader && !k->ignorebody) {
784 /* we parsed a piece of data wrongly assuming it was a header
785 and now we output it as body instead */
786 size_t headlen = Curl_dyn_len(&data->state.headerb);
787
788 /* Don't let excess data pollute body writes */
789 if(k->maxdownload == -1 || (curl_off_t)headlen <= k->maxdownload)
790 result = Curl_client_write(data, CLIENTWRITE_BODY,
791 Curl_dyn_ptr(&data->state.headerb),
792 headlen);
793 else
794 result = Curl_client_write(data, CLIENTWRITE_BODY,
795 Curl_dyn_ptr(&data->state.headerb),
796 (size_t)k->maxdownload);
797
798 if(result)
799 return result;
800 }
801 if(k->badheader < HEADER_ALLBAD) {
802 /* This switch handles various content encodings. If there's an
803 error here, be sure to check over the almost identical code
804 in http_chunks.c.
805 Make sure that ALL_CONTENT_ENCODINGS contains all the
806 encodings handled here. */
807 if(data->set.http_ce_skip || !k->writer_stack) {
808 if(!k->ignorebody && nread) {
809#ifndef CURL_DISABLE_POP3
810 if(conn->handler->protocol & PROTO_FAMILY_POP3)
811 result = Curl_pop3_write(data, k->str, nread);
812 else
813#endif /* CURL_DISABLE_POP3 */
814 result = Curl_client_write(data, CLIENTWRITE_BODY, k->str,
815 nread);
816 }
817 }
818 else if(!k->ignorebody && nread)
819 result = Curl_unencode_write(data, k->writer_stack, k->str, nread);
820 }
821 k->badheader = HEADER_NORMAL; /* taken care of now */
822
823 if(result)
824 return result;
825 }
826
827 } /* if(!header and data to read) */
828
829 if(conn->handler->readwrite && excess) {
830 /* Parse the excess data */
831 k->str += nread;
832
833 if(&k->str[excess] > &buf[data->set.buffer_size]) {
834 /* the excess amount was too excessive(!), make sure
835 it doesn't read out of buffer */
836 excess = &buf[data->set.buffer_size] - k->str;
837 }
838 nread = (ssize_t)excess;
839
840 result = conn->handler->readwrite(data, conn, &nread, &readmore);
841 if(result)
842 return result;
843
844 if(readmore)
845 k->keepon |= KEEP_RECV; /* we're not done reading */
846 break;
847 }
848
849 if(is_empty_data) {
850 /* if we received nothing, the server closed the connection and we
851 are done */
852 k->keepon &= ~KEEP_RECV;
853 }
854
855 if(k->keepon & KEEP_RECV_PAUSE) {
856 /* this is a paused transfer */
857 break;
858 }
859
860 } while(data_pending(data) && maxloops--);
861
862 if(maxloops <= 0) {
863 /* we mark it as read-again-please */
864 conn->cselect_bits = CURL_CSELECT_IN;
865 *comeback = TRUE;
866 }
867
868 if(((k->keepon & (KEEP_RECV|KEEP_SEND)) == KEEP_SEND) &&
869 conn->bits.close) {
870 /* When we've read the entire thing and the close bit is set, the server
871 may now close the connection. If there's now any kind of sending going
872 on from our side, we need to stop that immediately. */
873 infof(data, "we are done reading and this is set to close, stop send");
874 k->keepon &= ~KEEP_SEND; /* no writing anymore either */
875 }
876
877 return CURLE_OK;
878}
879
880CURLcode Curl_done_sending(struct Curl_easy *data,
881 struct SingleRequest *k)
882{
883 struct connectdata *conn = data->conn;
884 k->keepon &= ~KEEP_SEND; /* we're done writing */
885
886 /* These functions should be moved into the handler struct! */
887 Curl_http2_done_sending(data, conn);
888 Curl_quic_done_sending(data);
889
890 if(conn->bits.rewindaftersend) {
891 CURLcode result = Curl_readrewind(data);
892 if(result)
893 return result;
894 }
895 return CURLE_OK;
896}
897
898#if defined(WIN32) && defined(USE_WINSOCK)
899#ifndef SIO_IDEAL_SEND_BACKLOG_QUERY
900#define SIO_IDEAL_SEND_BACKLOG_QUERY 0x4004747B
901#endif
902
903static void win_update_buffer_size(curl_socket_t sockfd)
904{
905 int result;
906 ULONG ideal;
907 DWORD ideallen;
908 result = WSAIoctl(sockfd, SIO_IDEAL_SEND_BACKLOG_QUERY, 0, 0,
909 &ideal, sizeof(ideal), &ideallen, 0, 0);
910 if(result == 0) {
911 setsockopt(sockfd, SOL_SOCKET, SO_SNDBUF,
912 (const char *)&ideal, sizeof(ideal));
913 }
914}
915#else
916#define win_update_buffer_size(x)
917#endif
918
919#define curl_upload_refill_watermark(data) \
920 ((ssize_t)((data)->set.upload_buffer_size >> 5))
921
922/*
923 * Send data to upload to the server, when the socket is writable.
924 */
925static CURLcode readwrite_upload(struct Curl_easy *data,
926 struct connectdata *conn,
927 int *didwhat)
928{
929 ssize_t i, si;
930 ssize_t bytes_written;
931 CURLcode result;
932 ssize_t nread; /* number of bytes read */
933 bool sending_http_headers = FALSE;
934 struct SingleRequest *k = &data->req;
935
936 if((k->bytecount == 0) && (k->writebytecount == 0))
937 Curl_pgrsTime(data, TIMER_STARTTRANSFER);
938
939 *didwhat |= KEEP_SEND;
940
941 do {
942 curl_off_t nbody;
943 ssize_t offset = 0;
944
945 if(0 != k->upload_present &&
946 k->upload_present < curl_upload_refill_watermark(data) &&
947 !k->upload_chunky &&/*(variable sized chunked header; append not safe)*/
948 !k->upload_done && /*!(k->upload_done once k->upload_present sent)*/
949 !(k->writebytecount + k->upload_present - k->pendingheader ==
950 data->state.infilesize)) {
951 offset = k->upload_present;
952 }
953
954 /* only read more data if there's no upload data already
955 present in the upload buffer, or if appending to upload buffer */
956 if(0 == k->upload_present || offset) {
957 result = Curl_get_upload_buffer(data);
958 if(result)
959 return result;
960 if(offset && k->upload_fromhere != data->state.ulbuf)
961 memmove(data->state.ulbuf, k->upload_fromhere, offset);
962 /* init the "upload from here" pointer */
963 k->upload_fromhere = data->state.ulbuf;
964
965 if(!k->upload_done) {
966 /* HTTP pollution, this should be written nicer to become more
967 protocol agnostic. */
968 size_t fillcount;
969 struct HTTP *http = k->p.http;
970
971 if((k->exp100 == EXP100_SENDING_REQUEST) &&
972 (http->sending == HTTPSEND_BODY)) {
973 /* If this call is to send body data, we must take some action:
974 We have sent off the full HTTP 1.1 request, and we shall now
975 go into the Expect: 100 state and await such a header */
976 k->exp100 = EXP100_AWAITING_CONTINUE; /* wait for the header */
977 k->keepon &= ~KEEP_SEND; /* disable writing */
978 k->start100 = Curl_now(); /* timeout count starts now */
979 *didwhat &= ~KEEP_SEND; /* we didn't write anything actually */
980 /* set a timeout for the multi interface */
981 Curl_expire(data, data->set.expect_100_timeout, EXPIRE_100_TIMEOUT);
982 break;
983 }
984
985 if(conn->handler->protocol&(PROTO_FAMILY_HTTP|CURLPROTO_RTSP)) {
986 if(http->sending == HTTPSEND_REQUEST)
987 /* We're sending the HTTP request headers, not the data.
988 Remember that so we don't change the line endings. */
989 sending_http_headers = TRUE;
990 else
991 sending_http_headers = FALSE;
992 }
993
994 k->upload_fromhere += offset;
995 result = Curl_fillreadbuffer(data, data->set.upload_buffer_size-offset,
996 &fillcount);
997 k->upload_fromhere -= offset;
998 if(result)
999 return result;
1000
1001 nread = offset + fillcount;
1002 }
1003 else
1004 nread = 0; /* we're done uploading/reading */
1005
1006 if(!nread && (k->keepon & KEEP_SEND_PAUSE)) {
1007 /* this is a paused transfer */
1008 break;
1009 }
1010 if(nread <= 0) {
1011 result = Curl_done_sending(data, k);
1012 if(result)
1013 return result;
1014 break;
1015 }
1016
1017 /* store number of bytes available for upload */
1018 k->upload_present = nread;
1019
1020 /* convert LF to CRLF if so asked */
1021 if((!sending_http_headers) && (
1022#ifdef CURL_DO_LINEEND_CONV
1023 /* always convert if we're FTPing in ASCII mode */
1024 (data->state.prefer_ascii) ||
1025#endif
1026 (data->set.crlf))) {
1027 /* Do we need to allocate a scratch buffer? */
1028 if(!data->state.scratch) {
1029 data->state.scratch = malloc(2 * data->set.upload_buffer_size);
1030 if(!data->state.scratch) {
1031 failf(data, "Failed to alloc scratch buffer");
1032
1033 return CURLE_OUT_OF_MEMORY;
1034 }
1035 }
1036
1037 /*
1038 * ASCII/EBCDIC Note: This is presumably a text (not binary)
1039 * transfer so the data should already be in ASCII.
1040 * That means the hex values for ASCII CR (0x0d) & LF (0x0a)
1041 * must be used instead of the escape sequences \r & \n.
1042 */
1043 if(offset)
1044 memcpy(data->state.scratch, k->upload_fromhere, offset);
1045 for(i = offset, si = offset; i < nread; i++, si++) {
1046 if(k->upload_fromhere[i] == 0x0a) {
1047 data->state.scratch[si++] = 0x0d;
1048 data->state.scratch[si] = 0x0a;
1049 if(!data->set.crlf) {
1050 /* we're here only because FTP is in ASCII mode...
1051 bump infilesize for the LF we just added */
1052 if(data->state.infilesize != -1)
1053 data->state.infilesize++;
1054 }
1055 }
1056 else
1057 data->state.scratch[si] = k->upload_fromhere[i];
1058 }
1059
1060 if(si != nread) {
1061 /* only perform the special operation if we really did replace
1062 anything */
1063 nread = si;
1064
1065 /* upload from the new (replaced) buffer instead */
1066 k->upload_fromhere = data->state.scratch;
1067
1068 /* set the new amount too */
1069 k->upload_present = nread;
1070 }
1071 }
1072
1073#ifndef CURL_DISABLE_SMTP
1074 if(conn->handler->protocol & PROTO_FAMILY_SMTP) {
1075 result = Curl_smtp_escape_eob(data, nread, offset);
1076 if(result)
1077 return result;
1078 }
1079#endif /* CURL_DISABLE_SMTP */
1080 } /* if 0 == k->upload_present or appended to upload buffer */
1081 else {
1082 /* We have a partial buffer left from a previous "round". Use
1083 that instead of reading more data */
1084 }
1085
1086 /* write to socket (send away data) */
1087 result = Curl_write(data,
1088 conn->writesockfd, /* socket to send to */
1089 k->upload_fromhere, /* buffer pointer */
1090 k->upload_present, /* buffer size */
1091 &bytes_written); /* actually sent */
1092 if(result)
1093 return result;
1094
1095 win_update_buffer_size(conn->writesockfd);
1096
1097 if(k->pendingheader) {
1098 /* parts of what was sent was header */
1099 curl_off_t n = CURLMIN(k->pendingheader, bytes_written);
1100 /* show the data before we change the pointer upload_fromhere */
1101 Curl_debug(data, CURLINFO_HEADER_OUT, k->upload_fromhere, (size_t)n);
1102 k->pendingheader -= n;
1103 nbody = bytes_written - n; /* size of the written body part */
1104 }
1105 else
1106 nbody = bytes_written;
1107
1108 if(nbody) {
1109 /* show the data before we change the pointer upload_fromhere */
1110 Curl_debug(data, CURLINFO_DATA_OUT,
1111 &k->upload_fromhere[bytes_written - nbody],
1112 (size_t)nbody);
1113
1114 k->writebytecount += nbody;
1115 Curl_pgrsSetUploadCounter(data, k->writebytecount);
1116 }
1117
1118 if((!k->upload_chunky || k->forbidchunk) &&
1119 (k->writebytecount == data->state.infilesize)) {
1120 /* we have sent all data we were supposed to */
1121 k->upload_done = TRUE;
1122 infof(data, "We are completely uploaded and fine");
1123 }
1124
1125 if(k->upload_present != bytes_written) {
1126 /* we only wrote a part of the buffer (if anything), deal with it! */
1127
1128 /* store the amount of bytes left in the buffer to write */
1129 k->upload_present -= bytes_written;
1130
1131 /* advance the pointer where to find the buffer when the next send
1132 is to happen */
1133 k->upload_fromhere += bytes_written;
1134 }
1135 else {
1136 /* we've uploaded that buffer now */
1137 result = Curl_get_upload_buffer(data);
1138 if(result)
1139 return result;
1140 k->upload_fromhere = data->state.ulbuf;
1141 k->upload_present = 0; /* no more bytes left */
1142
1143 if(k->upload_done) {
1144 result = Curl_done_sending(data, k);
1145 if(result)
1146 return result;
1147 }
1148 }
1149
1150
1151 } while(0); /* just to break out from! */
1152
1153 return CURLE_OK;
1154}
1155
1156/*
1157 * Curl_readwrite() is the low-level function to be called when data is to
1158 * be read and written to/from the connection.
1159 *
1160 * return '*comeback' TRUE if we didn't properly drain the socket so this
1161 * function should get called again without select() or similar in between!
1162 */
1163CURLcode Curl_readwrite(struct connectdata *conn,
1164 struct Curl_easy *data,
1165 bool *done,
1166 bool *comeback)
1167{
1168 struct SingleRequest *k = &data->req;
1169 CURLcode result;
1170 int didwhat = 0;
1171
1172 curl_socket_t fd_read;
1173 curl_socket_t fd_write;
1174 int select_res = conn->cselect_bits;
1175
1176 conn->cselect_bits = 0;
1177
1178 /* only use the proper socket if the *_HOLD bit is not set simultaneously as
1179 then we are in rate limiting state in that transfer direction */
1180
1181 if((k->keepon & KEEP_RECVBITS) == KEEP_RECV)
1182 fd_read = conn->sockfd;
1183 else
1184 fd_read = CURL_SOCKET_BAD;
1185
1186 if((k->keepon & KEEP_SENDBITS) == KEEP_SEND)
1187 fd_write = conn->writesockfd;
1188 else
1189 fd_write = CURL_SOCKET_BAD;
1190
1191#if defined(USE_HTTP2) || defined(USE_HTTP3)
1192 if(data->state.drain) {
1193 select_res |= CURL_CSELECT_IN;
1194 DEBUGF(infof(data, "Curl_readwrite: forcibly told to drain data"));
1195 }
1196#endif
1197
1198 if(!select_res) /* Call for select()/poll() only, if read/write/error
1199 status is not known. */
1200 select_res = Curl_socket_check(fd_read, CURL_SOCKET_BAD, fd_write, 0);
1201
1202 if(select_res == CURL_CSELECT_ERR) {
1203 failf(data, "select/poll returned error");
1204 return CURLE_SEND_ERROR;
1205 }
1206
1207#ifdef USE_HYPER
1208 if(conn->datastream) {
1209 result = conn->datastream(data, conn, &didwhat, done, select_res);
1210 if(result || *done)
1211 return result;
1212 }
1213 else {
1214#endif
1215 /* We go ahead and do a read if we have a readable socket or if
1216 the stream was rewound (in which case we have data in a
1217 buffer) */
1218 if((k->keepon & KEEP_RECV) && (select_res & CURL_CSELECT_IN)) {
1219 result = readwrite_data(data, conn, k, &didwhat, done, comeback);
1220 if(result || *done)
1221 return result;
1222 }
1223
1224 /* If we still have writing to do, we check if we have a writable socket. */
1225 if((k->keepon & KEEP_SEND) && (select_res & CURL_CSELECT_OUT)) {
1226 /* write */
1227
1228 result = readwrite_upload(data, conn, &didwhat);
1229 if(result)
1230 return result;
1231 }
1232#ifdef USE_HYPER
1233 }
1234#endif
1235
1236 k->now = Curl_now();
1237 if(!didwhat) {
1238 /* no read no write, this is a timeout? */
1239 if(k->exp100 == EXP100_AWAITING_CONTINUE) {
1240 /* This should allow some time for the header to arrive, but only a
1241 very short time as otherwise it'll be too much wasted time too
1242 often. */
1243
1244 /* Quoting RFC2616, section "8.2.3 Use of the 100 (Continue) Status":
1245
1246 Therefore, when a client sends this header field to an origin server
1247 (possibly via a proxy) from which it has never seen a 100 (Continue)
1248 status, the client SHOULD NOT wait for an indefinite period before
1249 sending the request body.
1250
1251 */
1252
1253 timediff_t ms = Curl_timediff(k->now, k->start100);
1254 if(ms >= data->set.expect_100_timeout) {
1255 /* we've waited long enough, continue anyway */
1256 k->exp100 = EXP100_SEND_DATA;
1257 k->keepon |= KEEP_SEND;
1258 Curl_expire_done(data, EXPIRE_100_TIMEOUT);
1259 infof(data, "Done waiting for 100-continue");
1260 }
1261 }
1262
1263#ifdef ENABLE_QUIC
1264 if(conn->transport == TRNSPRT_QUIC) {
1265 result = Curl_quic_idle(data);
1266 if(result)
1267 return result;
1268 }
1269#endif
1270 }
1271
1272 if(Curl_pgrsUpdate(data))
1273 result = CURLE_ABORTED_BY_CALLBACK;
1274 else
1275 result = Curl_speedcheck(data, k->now);
1276 if(result)
1277 return result;
1278
1279 if(k->keepon) {
1280 if(0 > Curl_timeleft(data, &k->now, FALSE)) {
1281 if(k->size != -1) {
1282 failf(data, "Operation timed out after %" CURL_FORMAT_TIMEDIFF_T
1283 " milliseconds with %" CURL_FORMAT_CURL_OFF_T " out of %"
1284 CURL_FORMAT_CURL_OFF_T " bytes received",
1285 Curl_timediff(k->now, data->progress.t_startsingle),
1286 k->bytecount, k->size);
1287 }
1288 else {
1289 failf(data, "Operation timed out after %" CURL_FORMAT_TIMEDIFF_T
1290 " milliseconds with %" CURL_FORMAT_CURL_OFF_T " bytes received",
1291 Curl_timediff(k->now, data->progress.t_startsingle),
1292 k->bytecount);
1293 }
1294 return CURLE_OPERATION_TIMEDOUT;
1295 }
1296 }
1297 else {
1298 /*
1299 * The transfer has been performed. Just make some general checks before
1300 * returning.
1301 */
1302
1303 if(!(data->set.opt_no_body) && (k->size != -1) &&
1304 (k->bytecount != k->size) &&
1305#ifdef CURL_DO_LINEEND_CONV
1306 /* Most FTP servers don't adjust their file SIZE response for CRLFs,
1307 so we'll check to see if the discrepancy can be explained
1308 by the number of CRLFs we've changed to LFs.
1309 */
1310 (k->bytecount != (k->size + data->state.crlf_conversions)) &&
1311#endif /* CURL_DO_LINEEND_CONV */
1312 !k->newurl) {
1313 failf(data, "transfer closed with %" CURL_FORMAT_CURL_OFF_T
1314 " bytes remaining to read", k->size - k->bytecount);
1315 return CURLE_PARTIAL_FILE;
1316 }
1317 if(!(data->set.opt_no_body) && k->chunk &&
1318 (conn->chunk.state != CHUNK_STOP)) {
1319 /*
1320 * In chunked mode, return an error if the connection is closed prior to
1321 * the empty (terminating) chunk is read.
1322 *
1323 * The condition above used to check for
1324 * conn->proto.http->chunk.datasize != 0 which is true after reading
1325 * *any* chunk, not just the empty chunk.
1326 *
1327 */
1328 failf(data, "transfer closed with outstanding read data remaining");
1329 return CURLE_PARTIAL_FILE;
1330 }
1331 if(Curl_pgrsUpdate(data))
1332 return CURLE_ABORTED_BY_CALLBACK;
1333 }
1334
1335 /* Now update the "done" boolean we return */
1336 *done = (0 == (k->keepon&(KEEP_RECV|KEEP_SEND|
1337 KEEP_RECV_PAUSE|KEEP_SEND_PAUSE))) ? TRUE : FALSE;
1338
1339 return CURLE_OK;
1340}
1341
1342/*
1343 * Curl_single_getsock() gets called by the multi interface code when the app
1344 * has requested to get the sockets for the current connection. This function
1345 * will then be called once for every connection that the multi interface
1346 * keeps track of. This function will only be called for connections that are
1347 * in the proper state to have this information available.
1348 */
1349int Curl_single_getsock(struct Curl_easy *data,
1350 struct connectdata *conn,
1351 curl_socket_t *sock)
1352{
1353 int bitmap = GETSOCK_BLANK;
1354 unsigned sockindex = 0;
1355
1356 if(conn->handler->perform_getsock)
1357 return conn->handler->perform_getsock(data, conn, sock);
1358
1359 /* don't include HOLD and PAUSE connections */
1360 if((data->req.keepon & KEEP_RECVBITS) == KEEP_RECV) {
1361
1362 DEBUGASSERT(conn->sockfd != CURL_SOCKET_BAD);
1363
1364 bitmap |= GETSOCK_READSOCK(sockindex);
1365 sock[sockindex] = conn->sockfd;
1366 }
1367
1368 /* don't include HOLD and PAUSE connections */
1369 if((data->req.keepon & KEEP_SENDBITS) == KEEP_SEND) {
1370
1371 if((conn->sockfd != conn->writesockfd) ||
1372 bitmap == GETSOCK_BLANK) {
1373 /* only if they are not the same socket and we have a readable
1374 one, we increase index */
1375 if(bitmap != GETSOCK_BLANK)
1376 sockindex++; /* increase index if we need two entries */
1377
1378 DEBUGASSERT(conn->writesockfd != CURL_SOCKET_BAD);
1379
1380 sock[sockindex] = conn->writesockfd;
1381 }
1382
1383 bitmap |= GETSOCK_WRITESOCK(sockindex);
1384 }
1385
1386 return bitmap;
1387}
1388
1389/* Curl_init_CONNECT() gets called each time the handle switches to CONNECT
1390 which means this gets called once for each subsequent redirect etc */
1391void Curl_init_CONNECT(struct Curl_easy *data)
1392{
1393 data->state.fread_func = data->set.fread_func_set;
1394 data->state.in = data->set.in_set;
1395}
1396
1397/*
1398 * Curl_pretransfer() is called immediately before a transfer starts, and only
1399 * once for one transfer no matter if it has redirects or do multi-pass
1400 * authentication etc.
1401 */
1402CURLcode Curl_pretransfer(struct Curl_easy *data)
1403{
1404 CURLcode result;
1405
1406 if(!data->state.url && !data->set.uh) {
1407 /* we can't do anything without URL */
1408 failf(data, "No URL set");
1409 return CURLE_URL_MALFORMAT;
1410 }
1411
1412 /* since the URL may have been redirected in a previous use of this handle */
1413 if(data->state.url_alloc) {
1414 /* the already set URL is allocated, free it first! */
1415 Curl_safefree(data->state.url);
1416 data->state.url_alloc = FALSE;
1417 }
1418
1419 if(!data->state.url && data->set.uh) {
1420 CURLUcode uc;
1421 free(data->set.str[STRING_SET_URL]);
1422 uc = curl_url_get(data->set.uh,
1423 CURLUPART_URL, &data->set.str[STRING_SET_URL], 0);
1424 if(uc) {
1425 failf(data, "No URL set");
1426 return CURLE_URL_MALFORMAT;
1427 }
1428 }
1429
1430 data->state.prefer_ascii = data->set.prefer_ascii;
1431 data->state.list_only = data->set.list_only;
1432 data->state.httpreq = data->set.method;
1433 data->state.url = data->set.str[STRING_SET_URL];
1434
1435 /* Init the SSL session ID cache here. We do it here since we want to do it
1436 after the *_setopt() calls (that could specify the size of the cache) but
1437 before any transfer takes place. */
1438 result = Curl_ssl_initsessions(data, data->set.general_ssl.max_ssl_sessions);
1439 if(result)
1440 return result;
1441
1442 data->state.followlocation = 0; /* reset the location-follow counter */
1443 data->state.this_is_a_follow = FALSE; /* reset this */
1444 data->state.errorbuf = FALSE; /* no error has occurred */
1445 data->state.httpwant = data->set.httpwant;
1446 data->state.httpversion = 0;
1447 data->state.authproblem = FALSE;
1448 data->state.authhost.want = data->set.httpauth;
1449 data->state.authproxy.want = data->set.proxyauth;
1450 Curl_safefree(data->info.wouldredirect);
1451
1452 if(data->state.httpreq == HTTPREQ_PUT)
1453 data->state.infilesize = data->set.filesize;
1454 else if((data->state.httpreq != HTTPREQ_GET) &&
1455 (data->state.httpreq != HTTPREQ_HEAD)) {
1456 data->state.infilesize = data->set.postfieldsize;
1457 if(data->set.postfields && (data->state.infilesize == -1))
1458 data->state.infilesize = (curl_off_t)strlen(data->set.postfields);
1459 }
1460 else
1461 data->state.infilesize = 0;
1462
1463#ifndef CURL_DISABLE_COOKIES
1464 /* If there is a list of cookie files to read, do it now! */
1465 if(data->state.cookielist)
1466 Curl_cookie_loadfiles(data);
1467#endif
1468 /* If there is a list of host pairs to deal with */
1469 if(data->state.resolve)
1470 result = Curl_loadhostpairs(data);
1471
1472 if(!result) {
1473 /* Allow data->set.use_port to set which port to use. This needs to be
1474 * disabled for example when we follow Location: headers to URLs using
1475 * different ports! */
1476 data->state.allow_port = TRUE;
1477
1478#if defined(HAVE_SIGNAL) && defined(SIGPIPE) && !defined(HAVE_MSG_NOSIGNAL)
1479 /*************************************************************
1480 * Tell signal handler to ignore SIGPIPE
1481 *************************************************************/
1482 if(!data->set.no_signal)
1483 data->state.prev_signal = signal(SIGPIPE, SIG_IGN);
1484#endif
1485
1486 Curl_initinfo(data); /* reset session-specific information "variables" */
1487 Curl_pgrsResetTransferSizes(data);
1488 Curl_pgrsStartNow(data);
1489
1490 /* In case the handle is re-used and an authentication method was picked
1491 in the session we need to make sure we only use the one(s) we now
1492 consider to be fine */
1493 data->state.authhost.picked &= data->state.authhost.want;
1494 data->state.authproxy.picked &= data->state.authproxy.want;
1495
1496#ifndef CURL_DISABLE_FTP
1497 data->state.wildcardmatch = data->set.wildcard_enabled;
1498 if(data->state.wildcardmatch) {
1499 struct WildcardData *wc = &data->wildcard;
1500 if(wc->state < CURLWC_INIT) {
1501 result = Curl_wildcard_init(wc); /* init wildcard structures */
1502 if(result)
1503 return CURLE_OUT_OF_MEMORY;
1504 }
1505 }
1506#endif
1507 Curl_http2_init_state(&data->state);
1508 result = Curl_hsts_loadcb(data, data->hsts);
1509 }
1510
1511 /*
1512 * Set user-agent. Used for HTTP, but since we can attempt to tunnel
1513 * basically anything through a http proxy we can't limit this based on
1514 * protocol.
1515 */
1516 if(data->set.str[STRING_USERAGENT]) {
1517 Curl_safefree(data->state.aptr.uagent);
1518 data->state.aptr.uagent =
1519 aprintf("User-Agent: %s\r\n", data->set.str[STRING_USERAGENT]);
1520 if(!data->state.aptr.uagent)
1521 return CURLE_OUT_OF_MEMORY;
1522 }
1523
1524 if(!result)
1525 result = Curl_setstropt(&data->state.aptr.user,
1526 data->set.str[STRING_USERNAME]);
1527 if(!result)
1528 result = Curl_setstropt(&data->state.aptr.passwd,
1529 data->set.str[STRING_PASSWORD]);
1530 if(!result)
1531 result = Curl_setstropt(&data->state.aptr.proxyuser,
1532 data->set.str[STRING_PROXYUSERNAME]);
1533 if(!result)
1534 result = Curl_setstropt(&data->state.aptr.proxypasswd,
1535 data->set.str[STRING_PROXYPASSWORD]);
1536
1537 data->req.headerbytecount = 0;
1538 Curl_headers_cleanup(data);
1539 return result;
1540}
1541
1542/*
1543 * Curl_posttransfer() is called immediately after a transfer ends
1544 */
1545CURLcode Curl_posttransfer(struct Curl_easy *data)
1546{
1547#if defined(HAVE_SIGNAL) && defined(SIGPIPE) && !defined(HAVE_MSG_NOSIGNAL)
1548 /* restore the signal handler for SIGPIPE before we get back */
1549 if(!data->set.no_signal)
1550 signal(SIGPIPE, data->state.prev_signal);
1551#else
1552 (void)data; /* unused parameter */
1553#endif
1554
1555 return CURLE_OK;
1556}
1557
1558/*
1559 * Curl_follow() handles the URL redirect magic. Pass in the 'newurl' string
1560 * as given by the remote server and set up the new URL to request.
1561 *
1562 * This function DOES NOT FREE the given url.
1563 */
1564CURLcode Curl_follow(struct Curl_easy *data,
1565 char *newurl, /* the Location: string */
1566 followtype type) /* see transfer.h */
1567{
1568#ifdef CURL_DISABLE_HTTP
1569 (void)data;
1570 (void)newurl;
1571 (void)type;
1572 /* Location: following will not happen when HTTP is disabled */
1573 return CURLE_TOO_MANY_REDIRECTS;
1574#else
1575
1576 /* Location: redirect */
1577 bool disallowport = FALSE;
1578 bool reachedmax = FALSE;
1579 CURLUcode uc;
1580
1581 DEBUGASSERT(type != FOLLOW_NONE);
1582
1583 if(type != FOLLOW_FAKE)
1584 data->state.requests++; /* count all real follows */
1585 if(type == FOLLOW_REDIR) {
1586 if((data->set.maxredirs != -1) &&
1587 (data->state.followlocation >= data->set.maxredirs)) {
1588 reachedmax = TRUE;
1589 type = FOLLOW_FAKE; /* switch to fake to store the would-be-redirected
1590 to URL */
1591 }
1592 else {
1593 /* mark the next request as a followed location: */
1594 data->state.this_is_a_follow = TRUE;
1595
1596 data->state.followlocation++; /* count location-followers */
1597
1598 if(data->set.http_auto_referer) {
1599 CURLU *u;
1600 char *referer = NULL;
1601
1602 /* We are asked to automatically set the previous URL as the referer
1603 when we get the next URL. We pick the ->url field, which may or may
1604 not be 100% correct */
1605
1606 if(data->state.referer_alloc) {
1607 Curl_safefree(data->state.referer);
1608 data->state.referer_alloc = FALSE;
1609 }
1610
1611 /* Make a copy of the URL without credentials and fragment */
1612 u = curl_url();
1613 if(!u)
1614 return CURLE_OUT_OF_MEMORY;
1615
1616 uc = curl_url_set(u, CURLUPART_URL, data->state.url, 0);
1617 if(!uc)
1618 uc = curl_url_set(u, CURLUPART_FRAGMENT, NULL, 0);
1619 if(!uc)
1620 uc = curl_url_set(u, CURLUPART_USER, NULL, 0);
1621 if(!uc)
1622 uc = curl_url_set(u, CURLUPART_PASSWORD, NULL, 0);
1623 if(!uc)
1624 uc = curl_url_get(u, CURLUPART_URL, &referer, 0);
1625
1626 curl_url_cleanup(u);
1627
1628 if(uc || !referer)
1629 return CURLE_OUT_OF_MEMORY;
1630
1631 data->state.referer = referer;
1632 data->state.referer_alloc = TRUE; /* yes, free this later */
1633 }
1634 }
1635 }
1636
1637 if((type != FOLLOW_RETRY) &&
1638 (data->req.httpcode != 401) && (data->req.httpcode != 407) &&
1639 Curl_is_absolute_url(newurl, NULL, 0))
1640 /* If this is not redirect due to a 401 or 407 response and an absolute
1641 URL: don't allow a custom port number */
1642 disallowport = TRUE;
1643
1644 DEBUGASSERT(data->state.uh);
1645 uc = curl_url_set(data->state.uh, CURLUPART_URL, newurl,
1646 (type == FOLLOW_FAKE) ? CURLU_NON_SUPPORT_SCHEME :
1647 ((type == FOLLOW_REDIR) ? CURLU_URLENCODE : 0) |
1648 CURLU_ALLOW_SPACE |
1649 (data->set.path_as_is ? CURLU_PATH_AS_IS : 0));
1650 if(uc) {
1651 if(type != FOLLOW_FAKE)
1652 return Curl_uc_to_curlcode(uc);
1653
1654 /* the URL could not be parsed for some reason, but since this is FAKE
1655 mode, just duplicate the field as-is */
1656 newurl = strdup(newurl);
1657 if(!newurl)
1658 return CURLE_OUT_OF_MEMORY;
1659 }
1660 else {
1661 uc = curl_url_get(data->state.uh, CURLUPART_URL, &newurl, 0);
1662 if(uc)
1663 return Curl_uc_to_curlcode(uc);
1664
1665 /* Clear auth if this redirects to a different port number or protocol,
1666 unless permitted */
1667 if(!data->set.allow_auth_to_other_hosts && (type != FOLLOW_FAKE)) {
1668 char *portnum;
1669 int port;
1670 bool clear = FALSE;
1671
1672 if(data->set.use_port && data->state.allow_port)
1673 /* a custom port is used */
1674 port = (int)data->set.use_port;
1675 else {
1676 uc = curl_url_get(data->state.uh, CURLUPART_PORT, &portnum,
1677 CURLU_DEFAULT_PORT);
1678 if(uc) {
1679 free(newurl);
1680 return Curl_uc_to_curlcode(uc);
1681 }
1682 port = atoi(portnum);
1683 free(portnum);
1684 }
1685 if(port != data->info.conn_remote_port) {
1686 infof(data, "Clear auth, redirects to port from %u to %u",
1687 data->info.conn_remote_port, port);
1688 clear = TRUE;
1689 }
1690 else {
1691 char *scheme;
1692 const struct Curl_handler *p;
1693 uc = curl_url_get(data->state.uh, CURLUPART_SCHEME, &scheme, 0);
1694 if(uc) {
1695 free(newurl);
1696 return Curl_uc_to_curlcode(uc);
1697 }
1698
1699 p = Curl_builtin_scheme(scheme);
1700 if(p && (p->protocol != data->info.conn_protocol)) {
1701 infof(data, "Clear auth, redirects scheme from %s to %s",
1702 data->info.conn_scheme, scheme);
1703 clear = TRUE;
1704 }
1705 free(scheme);
1706 }
1707 if(clear) {
1708 Curl_safefree(data->state.aptr.user);
1709 Curl_safefree(data->state.aptr.passwd);
1710 }
1711 }
1712 }
1713
1714 if(type == FOLLOW_FAKE) {
1715 /* we're only figuring out the new url if we would've followed locations
1716 but now we're done so we can get out! */
1717 data->info.wouldredirect = newurl;
1718
1719 if(reachedmax) {
1720 failf(data, "Maximum (%ld) redirects followed", data->set.maxredirs);
1721 return CURLE_TOO_MANY_REDIRECTS;
1722 }
1723 return CURLE_OK;
1724 }
1725
1726 if(disallowport)
1727 data->state.allow_port = FALSE;
1728
1729 if(data->state.url_alloc)
1730 Curl_safefree(data->state.url);
1731
1732 data->state.url = newurl;
1733 data->state.url_alloc = TRUE;
1734
1735 infof(data, "Issue another request to this URL: '%s'", data->state.url);
1736
1737 /*
1738 * We get here when the HTTP code is 300-399 (and 401). We need to perform
1739 * differently based on exactly what return code there was.
1740 *
1741 * News from 7.10.6: we can also get here on a 401 or 407, in case we act on
1742 * a HTTP (proxy-) authentication scheme other than Basic.
1743 */
1744 switch(data->info.httpcode) {
1745 /* 401 - Act on a WWW-Authenticate, we keep on moving and do the
1746 Authorization: XXXX header in the HTTP request code snippet */
1747 /* 407 - Act on a Proxy-Authenticate, we keep on moving and do the
1748 Proxy-Authorization: XXXX header in the HTTP request code snippet */
1749 /* 300 - Multiple Choices */
1750 /* 306 - Not used */
1751 /* 307 - Temporary Redirect */
1752 default: /* for all above (and the unknown ones) */
1753 /* Some codes are explicitly mentioned since I've checked RFC2616 and they
1754 * seem to be OK to POST to.
1755 */
1756 break;
1757 case 301: /* Moved Permanently */
1758 /* (quote from RFC7231, section 6.4.2)
1759 *
1760 * Note: For historical reasons, a user agent MAY change the request
1761 * method from POST to GET for the subsequent request. If this
1762 * behavior is undesired, the 307 (Temporary Redirect) status code
1763 * can be used instead.
1764 *
1765 * ----
1766 *
1767 * Many webservers expect this, so these servers often answers to a POST
1768 * request with an error page. To be sure that libcurl gets the page that
1769 * most user agents would get, libcurl has to force GET.
1770 *
1771 * This behavior is forbidden by RFC1945 and the obsolete RFC2616, and
1772 * can be overridden with CURLOPT_POSTREDIR.
1773 */
1774 if((data->state.httpreq == HTTPREQ_POST
1775 || data->state.httpreq == HTTPREQ_POST_FORM
1776 || data->state.httpreq == HTTPREQ_POST_MIME)
1777 && !(data->set.keep_post & CURL_REDIR_POST_301)) {
1778 infof(data, "Switch from POST to GET");
1779 data->state.httpreq = HTTPREQ_GET;
1780 }
1781 break;
1782 case 302: /* Found */
1783 /* (quote from RFC7231, section 6.4.3)
1784 *
1785 * Note: For historical reasons, a user agent MAY change the request
1786 * method from POST to GET for the subsequent request. If this
1787 * behavior is undesired, the 307 (Temporary Redirect) status code
1788 * can be used instead.
1789 *
1790 * ----
1791 *
1792 * Many webservers expect this, so these servers often answers to a POST
1793 * request with an error page. To be sure that libcurl gets the page that
1794 * most user agents would get, libcurl has to force GET.
1795 *
1796 * This behavior is forbidden by RFC1945 and the obsolete RFC2616, and
1797 * can be overridden with CURLOPT_POSTREDIR.
1798 */
1799 if((data->state.httpreq == HTTPREQ_POST
1800 || data->state.httpreq == HTTPREQ_POST_FORM
1801 || data->state.httpreq == HTTPREQ_POST_MIME)
1802 && !(data->set.keep_post & CURL_REDIR_POST_302)) {
1803 infof(data, "Switch from POST to GET");
1804 data->state.httpreq = HTTPREQ_GET;
1805 }
1806 break;
1807
1808 case 303: /* See Other */
1809 /* 'See Other' location is not the resource but a substitute for the
1810 * resource. In this case we switch the method to GET/HEAD, unless the
1811 * method is POST and the user specified to keep it as POST.
1812 * https://github.com/curl/curl/issues/5237#issuecomment-614641049
1813 */
1814 if(data->state.httpreq != HTTPREQ_GET &&
1815 ((data->state.httpreq != HTTPREQ_POST &&
1816 data->state.httpreq != HTTPREQ_POST_FORM &&
1817 data->state.httpreq != HTTPREQ_POST_MIME) ||
1818 !(data->set.keep_post & CURL_REDIR_POST_303))) {
1819 data->state.httpreq = HTTPREQ_GET;
1820 data->set.upload = false;
1821 infof(data, "Switch to %s",
1822 data->set.opt_no_body?"HEAD":"GET");
1823 }
1824 break;
1825 case 304: /* Not Modified */
1826 /* 304 means we did a conditional request and it was "Not modified".
1827 * We shouldn't get any Location: header in this response!
1828 */
1829 break;
1830 case 305: /* Use Proxy */
1831 /* (quote from RFC2616, section 10.3.6):
1832 * "The requested resource MUST be accessed through the proxy given
1833 * by the Location field. The Location field gives the URI of the
1834 * proxy. The recipient is expected to repeat this single request
1835 * via the proxy. 305 responses MUST only be generated by origin
1836 * servers."
1837 */
1838 break;
1839 }
1840 Curl_pgrsTime(data, TIMER_REDIRECT);
1841 Curl_pgrsResetTransferSizes(data);
1842
1843 return CURLE_OK;
1844#endif /* CURL_DISABLE_HTTP */
1845}
1846
1847/* Returns CURLE_OK *and* sets '*url' if a request retry is wanted.
1848
1849 NOTE: that the *url is malloc()ed. */
1850CURLcode Curl_retry_request(struct Curl_easy *data, char **url)
1851{
1852 struct connectdata *conn = data->conn;
1853 bool retry = FALSE;
1854 *url = NULL;
1855
1856 /* if we're talking upload, we can't do the checks below, unless the protocol
1857 is HTTP as when uploading over HTTP we will still get a response */
1858 if(data->set.upload &&
1859 !(conn->handler->protocol&(PROTO_FAMILY_HTTP|CURLPROTO_RTSP)))
1860 return CURLE_OK;
1861
1862 if((data->req.bytecount + data->req.headerbytecount == 0) &&
1863 conn->bits.reuse &&
1864 (!data->set.opt_no_body || (conn->handler->protocol & PROTO_FAMILY_HTTP))
1865#ifndef CURL_DISABLE_RTSP
1866 && (data->set.rtspreq != RTSPREQ_RECEIVE)
1867#endif
1868 )
1869 /* We got no data, we attempted to re-use a connection. For HTTP this
1870 can be a retry so we try again regardless if we expected a body.
1871 For other protocols we only try again only if we expected a body.
1872
1873 This might happen if the connection was left alive when we were
1874 done using it before, but that was closed when we wanted to read from
1875 it again. Bad luck. Retry the same request on a fresh connect! */
1876 retry = TRUE;
1877 else if(data->state.refused_stream &&
1878 (data->req.bytecount + data->req.headerbytecount == 0) ) {
1879 /* This was sent on a refused stream, safe to rerun. A refused stream
1880 error can typically only happen on HTTP/2 level if the stream is safe
1881 to issue again, but the nghttp2 API can deliver the message to other
1882 streams as well, which is why this adds the check the data counters
1883 too. */
1884 infof(data, "REFUSED_STREAM, retrying a fresh connect");
1885 data->state.refused_stream = FALSE; /* clear again */
1886 retry = TRUE;
1887 }
1888 if(retry) {
1889#define CONN_MAX_RETRIES 5
1890 if(data->state.retrycount++ >= CONN_MAX_RETRIES) {
1891 failf(data, "Connection died, tried %d times before giving up",
1892 CONN_MAX_RETRIES);
1893 data->state.retrycount = 0;
1894 return CURLE_SEND_ERROR;
1895 }
1896 infof(data, "Connection died, retrying a fresh connect (retry count: %d)",
1897 data->state.retrycount);
1898 *url = strdup(data->state.url);
1899 if(!*url)
1900 return CURLE_OUT_OF_MEMORY;
1901
1902 connclose(conn, "retry"); /* close this connection */
1903 conn->bits.retry = TRUE; /* mark this as a connection we're about
1904 to retry. Marking it this way should
1905 prevent i.e HTTP transfers to return
1906 error just because nothing has been
1907 transferred! */
1908
1909
1910 if(conn->handler->protocol&PROTO_FAMILY_HTTP) {
1911 if(data->req.writebytecount) {
1912 CURLcode result = Curl_readrewind(data);
1913 if(result) {
1914 Curl_safefree(*url);
1915 return result;
1916 }
1917 }
1918 }
1919 }
1920 return CURLE_OK;
1921}
1922
1923/*
1924 * Curl_setup_transfer() is called to setup some basic properties for the
1925 * upcoming transfer.
1926 */
1927void
1928Curl_setup_transfer(
1929 struct Curl_easy *data, /* transfer */
1930 int sockindex, /* socket index to read from or -1 */
1931 curl_off_t size, /* -1 if unknown at this point */
1932 bool getheader, /* TRUE if header parsing is wanted */
1933 int writesockindex /* socket index to write to, it may very well be
1934 the same we read from. -1 disables */
1935 )
1936{
1937 struct SingleRequest *k = &data->req;
1938 struct connectdata *conn = data->conn;
1939 struct HTTP *http = data->req.p.http;
1940 bool httpsending;
1941
1942 DEBUGASSERT(conn != NULL);
1943 DEBUGASSERT((sockindex <= 1) && (sockindex >= -1));
1944
1945 httpsending = ((conn->handler->protocol&PROTO_FAMILY_HTTP) &&
1946 (http->sending == HTTPSEND_REQUEST));
1947
1948 if(conn->bits.multiplex || conn->httpversion == 20 || httpsending) {
1949 /* when multiplexing, the read/write sockets need to be the same! */
1950 conn->sockfd = sockindex == -1 ?
1951 ((writesockindex == -1 ? CURL_SOCKET_BAD : conn->sock[writesockindex])) :
1952 conn->sock[sockindex];
1953 conn->writesockfd = conn->sockfd;
1954 if(httpsending)
1955 /* special and very HTTP-specific */
1956 writesockindex = FIRSTSOCKET;
1957 }
1958 else {
1959 conn->sockfd = sockindex == -1 ?
1960 CURL_SOCKET_BAD : conn->sock[sockindex];
1961 conn->writesockfd = writesockindex == -1 ?
1962 CURL_SOCKET_BAD:conn->sock[writesockindex];
1963 }
1964 k->getheader = getheader;
1965
1966 k->size = size;
1967
1968 /* The code sequence below is placed in this function just because all
1969 necessary input is not always known in do_complete() as this function may
1970 be called after that */
1971
1972 if(!k->getheader) {
1973 k->header = FALSE;
1974 if(size > 0)
1975 Curl_pgrsSetDownloadSize(data, size);
1976 }
1977 /* we want header and/or body, if neither then don't do this! */
1978 if(k->getheader || !data->set.opt_no_body) {
1979
1980 if(sockindex != -1)
1981 k->keepon |= KEEP_RECV;
1982
1983 if(writesockindex != -1) {
1984 /* HTTP 1.1 magic:
1985
1986 Even if we require a 100-return code before uploading data, we might
1987 need to write data before that since the REQUEST may not have been
1988 finished sent off just yet.
1989
1990 Thus, we must check if the request has been sent before we set the
1991 state info where we wait for the 100-return code
1992 */
1993 if((data->state.expect100header) &&
1994 (conn->handler->protocol&PROTO_FAMILY_HTTP) &&
1995 (http->sending == HTTPSEND_BODY)) {
1996 /* wait with write until we either got 100-continue or a timeout */
1997 k->exp100 = EXP100_AWAITING_CONTINUE;
1998 k->start100 = Curl_now();
1999
2000 /* Set a timeout for the multi interface. Add the inaccuracy margin so
2001 that we don't fire slightly too early and get denied to run. */
2002 Curl_expire(data, data->set.expect_100_timeout, EXPIRE_100_TIMEOUT);
2003 }
2004 else {
2005 if(data->state.expect100header)
2006 /* when we've sent off the rest of the headers, we must await a
2007 100-continue but first finish sending the request */
2008 k->exp100 = EXP100_SENDING_REQUEST;
2009
2010 /* enable the write bit when we're not waiting for continue */
2011 k->keepon |= KEEP_SEND;
2012 }
2013 } /* if(writesockindex != -1) */
2014 } /* if(k->getheader || !data->set.opt_no_body) */
2015
2016}
2017