Example #1
0
File: file.c Project: sshyran/curl
 /*
  Check if this is a range download, and if so, set the internal variables
  properly. This code is copied from the FTP implementation and might as
  well be factored out.
 */
static CURLcode file_range(struct connectdata *conn)
{
  curl_off_t from, to;
  curl_off_t totalsize=-1;
  char *ptr;
  char *ptr2;
  struct Curl_easy *data = conn->data;

  if(data->state.use_range && data->state.range) {
    CURLofft from_t;
    CURLofft to_t;
    from_t = curlx_strtoofft(data->state.range, &ptr, 0, &from);
    if(from_t == CURL_OFFT_FLOW)
      return CURLE_RANGE_ERROR;
    while(*ptr && (ISSPACE(*ptr) || (*ptr=='-')))
      ptr++;
    to_t = curlx_strtoofft(ptr, &ptr2, 0, &to);
    if(to_t == CURL_OFFT_FLOW)
      return CURLE_RANGE_ERROR;
    if((to_t == CURL_OFFT_INVAL) && !from_t) {
      /* X - */
      data->state.resume_from = from;
      DEBUGF(infof(data, "RANGE %" CURL_FORMAT_CURL_OFF_T " to end of file\n",
                   from));
    }
    else if((from_t == CURL_OFFT_INVAL) && !to_t) {
      /* -Y */
      data->req.maxdownload = to;
      data->state.resume_from = -to;
      DEBUGF(infof(data, "RANGE the last %" CURL_FORMAT_CURL_OFF_T " bytes\n",
                   to));
    }
    else {
      /* X-Y */
      totalsize = to-from;
      data->req.maxdownload = totalsize+1; /* include last byte */
      data->state.resume_from = from;
      DEBUGF(infof(data, "RANGE from %" CURL_FORMAT_CURL_OFF_T
                   " getting %" CURL_FORMAT_CURL_OFF_T " bytes\n",
                   from, data->req.maxdownload));
    }
    DEBUGF(infof(data, "range-download from %" CURL_FORMAT_CURL_OFF_T
                 " to %" CURL_FORMAT_CURL_OFF_T ", totally %"
                 CURL_FORMAT_CURL_OFF_T " bytes\n",
                 from, to, data->req.maxdownload));
  }
  else
    data->req.maxdownload = -1;
  return CURLE_OK;
}
Example #2
0
File: file.c Project: 0w/moai-dev
 /*
  Check if this is a range download, and if so, set the internal variables
  properly. This code is copied from the FTP implementation and might as
  well be factored out.
 */
static CURLcode file_range(struct connectdata *conn)
{
  curl_off_t from, to;
  curl_off_t totalsize=-1;
  char *ptr;
  char *ptr2;
  struct SessionHandle *data = conn->data;

  if(data->state.use_range && data->state.range) {
    from=curlx_strtoofft(data->state.range, &ptr, 0);
    while(ptr && *ptr && (isspace((int)*ptr) || (*ptr=='-')))
      ptr++;
    to=curlx_strtoofft(ptr, &ptr2, 0);
    if(ptr == ptr2) {
      /* we didn't get any digit */
      to=-1;
    }
    if((-1 == to) && (from>=0)) {
      /* X - */
      data->state.resume_from = from;
      DEBUGF(infof(data, "RANGE %" FORMAT_OFF_T " to end of file\n",
                   from));
    }
    else if(from < 0) {
      /* -Y */
      totalsize = -from;
      data->req.maxdownload = -from;
      data->state.resume_from = from;
      DEBUGF(infof(data, "RANGE the last %" FORMAT_OFF_T " bytes\n",
                   totalsize));
    }
    else {
      /* X-Y */
      totalsize = to-from;
      data->req.maxdownload = totalsize+1; /* include last byte */
      data->state.resume_from = from;
      DEBUGF(infof(data, "RANGE from %" FORMAT_OFF_T
                   " getting %" FORMAT_OFF_T " bytes\n",
                   from, data->req.maxdownload));
    }
    DEBUGF(infof(data, "range-download from %" FORMAT_OFF_T
                 " to %" FORMAT_OFF_T ", totally %" FORMAT_OFF_T " bytes\n",
                 from, to, data->req.maxdownload));
  }
  else
    data->req.maxdownload = -1;
  return CURLE_OK;
}
Example #3
0
/**
 * Parses the given string looking for an offset (which may be a
 * larger-than-integer value). The offset CANNOT be negative!
 *
 * @param val  the offset to populate
 * @param str  the buffer containing the offset
 * @return PARAM_OK if successful, a parameter specific error enum if failure.
 */
ParameterError str2offset(curl_off_t *val, const char *str)
{
  char *endptr;
  if(str[0] == '-')
    /* offsets aren't negative, this indicates weird input */
    return PARAM_NEGATIVE_NUMERIC;

#if(SIZEOF_CURL_OFF_T > SIZEOF_LONG)
  {
    CURLofft offt = curlx_strtoofft(str, &endptr, 0, val);
    if(CURL_OFFT_FLOW == offt)
      return PARAM_NUMBER_TOO_LARGE;
    else if(CURL_OFFT_INVAL == offt)
      return PARAM_BAD_NUMERIC;
  }
#else
  errno = 0;
  *val = strtol(str, &endptr, 0);
  if((*val == LONG_MIN || *val == LONG_MAX) && errno == ERANGE)
    return PARAM_NUMBER_TOO_LARGE;
#endif
  if((endptr != str) && (endptr == str + strlen(str)))
    return PARAM_OK;

  return PARAM_BAD_NUMERIC;
}
/**
 * Parses the given string looking for an offset (which may be
 * a larger-than-integer value).
 *
 * @param val  the offset to populate
 * @param str  the buffer containing the offset
 * @return zero if successful, non-zero if failure.
 */
int str2offset(curl_off_t *val, const char *str)
{
#if(CURL_SIZEOF_CURL_OFF_T > CURL_SIZEOF_LONG)
  *val = curlx_strtoofft(str, NULL, 0);
  if((*val == CURL_OFF_T_MAX || *val == CURL_OFF_T_MIN) && (ERRNO == ERANGE))
    return 1;
#else
  *val = strtol(str, NULL, 0);
  if((*val == LONG_MIN || *val == LONG_MAX) && ERRNO == ERANGE)
    return 1;
#endif
  return 0;
}
Example #5
0
/**
 * Parses the given string looking for an offset (which may be a
 * larger-than-integer value). The offset CANNOT be negative!
 *
 * @param val  the offset to populate
 * @param str  the buffer containing the offset
 * @return PARAM_OK if successful, a parameter specific error enum if failure.
 */
ParameterError str2offset(curl_off_t *val, const char *str)
{
  char *endptr;
  if(str[0] == '-')
    /* offsets aren't negative, this indicates weird input */
    return PARAM_NEGATIVE_NUMERIC;

#if(CURL_SIZEOF_CURL_OFF_T > CURL_SIZEOF_LONG)
  *val = curlx_strtoofft(str, &endptr, 0);
  if((*val == CURL_OFF_T_MAX || *val == CURL_OFF_T_MIN) && (ERRNO == ERANGE))
    return PARAM_BAD_NUMERIC;
#else
  *val = strtol(str, &endptr, 0);
  if((*val == LONG_MIN || *val == LONG_MAX) && ERRNO == ERANGE)
    return PARAM_BAD_NUMERIC;
#endif
  if((endptr != str) && (endptr == str + strlen(str)))
    return PARAM_OK;

  return PARAM_BAD_NUMERIC;
}
Example #6
0
struct Cookie *
Curl_cookie_add(struct SessionHandle *data,
                /* The 'data' pointer here may be NULL at times, and thus
                   must only be used very carefully for things that can deal
                   with data being NULL. Such as infof() and similar */

                struct CookieInfo *c,
                bool httpheader, /* TRUE if HTTP header-style line */
                char *lineptr,   /* first character of the line */
                const char *domain, /* default domain */
                const char *path)   /* full path used when this cookie is set,
                                       used to get default path for the cookie
                                       unless set */
{
  struct Cookie *clist;
  char name[MAX_NAME];
  struct Cookie *co;
  struct Cookie *lastc=NULL;
  time_t now = time(NULL);
  bool replace_old = FALSE;
  bool badcookie = FALSE; /* cookies are good by default. mmmmm yummy */

#ifdef CURL_DISABLE_VERBOSE_STRINGS
  (void)data;
#endif

  /* First, alloc and init a new struct for it */
  co = calloc(1, sizeof(struct Cookie));
  if(!co)
    return NULL; /* bail out if we're this low on memory */

  if(httpheader) {
    /* This line was read off a HTTP-header */
    const char *ptr;
    const char *semiptr;
    char *what;

    what = malloc(MAX_COOKIE_LINE);
    if(!what) {
      free(co);
      return NULL;
    }

    semiptr=strchr(lineptr, ';'); /* first, find a semicolon */

    while(*lineptr && ISBLANK(*lineptr))
      lineptr++;

    ptr = lineptr;
    do {
      /* we have a <what>=<this> pair or a stand-alone word here */
      name[0]=what[0]=0; /* init the buffers */
      if(1 <= sscanf(ptr, "%" MAX_NAME_TXT "[^;\r\n =]=%"
                     MAX_COOKIE_LINE_TXT "[^;\r\n]",
                     name, what)) {
        /* Use strstore() below to properly deal with received cookie
           headers that have the same string property set more than once,
           and then we use the last one. */
        const char *whatptr;
        bool done = FALSE;
        bool sep;
        size_t len=strlen(what);
        const char *endofn = &ptr[ strlen(name) ];

        /* skip trailing spaces in name */
        while(*endofn && ISBLANK(*endofn))
          endofn++;

        /* name ends with a '=' ? */
        sep = (*endofn == '=')?TRUE:FALSE;

        /* Strip off trailing whitespace from the 'what' */
        while(len && ISBLANK(what[len-1])) {
          what[len-1]=0;
          len--;
        }

        /* Skip leading whitespace from the 'what' */
        whatptr=what;
        while(*whatptr && ISBLANK(*whatptr))
          whatptr++;

        if(!len) {
          /* this was a "<name>=" with no content, and we must allow
             'secure' and 'httponly' specified this weirdly */
          done = TRUE;
          if(Curl_raw_equal("secure", name))
            co->secure = TRUE;
          else if(Curl_raw_equal("httponly", name))
            co->httponly = TRUE;
          else if(sep)
            /* there was a '=' so we're not done parsing this field */
            done = FALSE;
        }
        if(done)
          ;
        else if(Curl_raw_equal("path", name)) {
          strstore(&co->path, whatptr);
          if(!co->path) {
            badcookie = TRUE; /* out of memory bad */
            break;
          }
          co->spath = sanitize_cookie_path(co->path);
          if(!co->spath) {
            badcookie = TRUE; /* out of memory bad */
            break;
          }
        }
        else if(Curl_raw_equal("domain", name)) {
          /* Now, we make sure that our host is within the given domain,
             or the given domain is not valid and thus cannot be set. */

          if('.' == whatptr[0])
            whatptr++; /* ignore preceding dot */

          if(!domain || tailmatch(whatptr, domain)) {
            const char *tailptr=whatptr;
            if(tailptr[0] == '.')
              tailptr++;
            strstore(&co->domain, tailptr); /* don't prefix w/dots
                                               internally */
            if(!co->domain) {
              badcookie = TRUE;
              break;
            }
            co->tailmatch=TRUE; /* we always do that if the domain name was
                                   given */
          }
          else {
            /* we did not get a tailmatch and then the attempted set domain
               is not a domain to which the current host belongs. Mark as
               bad. */
            badcookie=TRUE;
            infof(data, "skipped cookie with bad tailmatch domain: %s\n",
                  whatptr);
          }
        }
        else if(Curl_raw_equal("version", name)) {
          strstore(&co->version, whatptr);
          if(!co->version) {
            badcookie = TRUE;
            break;
          }
        }
        else if(Curl_raw_equal("max-age", name)) {
          /* Defined in RFC2109:

             Optional.  The Max-Age attribute defines the lifetime of the
             cookie, in seconds.  The delta-seconds value is a decimal non-
             negative integer.  After delta-seconds seconds elapse, the
             client should discard the cookie.  A value of zero means the
             cookie should be discarded immediately.

          */
          strstore(&co->maxage, whatptr);
          if(!co->maxage) {
            badcookie = TRUE;
            break;
          }
          co->expires =
            strtol((*co->maxage=='\"')?&co->maxage[1]:&co->maxage[0],NULL,10)
            + (long)now;
        }
        else if(Curl_raw_equal("expires", name)) {
          strstore(&co->expirestr, whatptr);
          if(!co->expirestr) {
            badcookie = TRUE;
            break;
          }
          /* Note that if the date couldn't get parsed for whatever reason,
             the cookie will be treated as a session cookie */
          co->expires = curl_getdate(what, &now);

          /* Session cookies have expires set to 0 so if we get that back
             from the date parser let's add a second to make it a
             non-session cookie */
          if(co->expires == 0)
            co->expires = 1;
          else if(co->expires < 0)
            co->expires = 0;
        }
        else if(!co->name) {
          co->name = strdup(name);
          co->value = strdup(whatptr);
          if(!co->name || !co->value) {
            badcookie = TRUE;
            break;
          }
        }
        /*
          else this is the second (or more) name we don't know
          about! */
      }
      else {
        /* this is an "illegal" <what>=<this> pair */
      }

      if(!semiptr || !*semiptr) {
        /* we already know there are no more cookies */
        semiptr = NULL;
        continue;
      }

      ptr=semiptr+1;
      while(*ptr && ISBLANK(*ptr))
        ptr++;
      semiptr=strchr(ptr, ';'); /* now, find the next semicolon */

      if(!semiptr && *ptr)
        /* There are no more semicolons, but there's a final name=value pair
           coming up */
        semiptr=strchr(ptr, '\0');
    } while(semiptr);

    if(!badcookie && !co->domain) {
      if(domain) {
        /* no domain was given in the header line, set the default */
        co->domain=strdup(domain);
        if(!co->domain)
          badcookie = TRUE;
      }
    }

    if(!badcookie && !co->path && path) {
      /* No path was given in the header line, set the default.
         Note that the passed-in path to this function MAY have a '?' and
         following part that MUST not be stored as part of the path. */
      char *queryp = strchr(path, '?');

      /* queryp is where the interesting part of the path ends, so now we
         want to the find the last */
      char *endslash;
      if(!queryp)
        endslash = strrchr(path, '/');
      else
        endslash = memrchr(path, '/', (size_t)(queryp - path));
      if(endslash) {
        size_t pathlen = (size_t)(endslash-path+1); /* include ending slash */
        co->path=malloc(pathlen+1); /* one extra for the zero byte */
        if(co->path) {
          memcpy(co->path, path, pathlen);
          co->path[pathlen]=0; /* zero terminate */
          co->spath = sanitize_cookie_path(co->path);
          if(!co->spath)
            badcookie = TRUE; /* out of memory bad */
        }
        else
          badcookie = TRUE;
      }
    }

    free(what);

    if(badcookie || !co->name) {
      /* we didn't get a cookie name or a bad one,
         this is an illegal line, bail out */
      freecookie(co);
      return NULL;
    }

  }
  else {
    /* This line is NOT a HTTP header style line, we do offer support for
       reading the odd netscape cookies-file format here */
    char *ptr;
    char *firstptr;
    char *tok_buf=NULL;
    int fields;

    /* IE introduced HTTP-only cookies to prevent XSS attacks. Cookies
       marked with httpOnly after the domain name are not accessible
       from javascripts, but since curl does not operate at javascript
       level, we include them anyway. In Firefox's cookie files, these
       lines are preceded with #HttpOnly_ and then everything is
       as usual, so we skip 10 characters of the line..
    */
    if(strncmp(lineptr, "#HttpOnly_", 10) == 0) {
      lineptr += 10;
      co->httponly = TRUE;
    }

    if(lineptr[0]=='#') {
      /* don't even try the comments */
      free(co);
      return NULL;
    }
    /* strip off the possible end-of-line characters */
    ptr=strchr(lineptr, '\r');
    if(ptr)
      *ptr=0; /* clear it */
    ptr=strchr(lineptr, '\n');
    if(ptr)
      *ptr=0; /* clear it */

    firstptr=strtok_r(lineptr, "\t", &tok_buf); /* tokenize it on the TAB */

    /* Now loop through the fields and init the struct we already have
       allocated */
    for(ptr=firstptr, fields=0; ptr && !badcookie;
        ptr=strtok_r(NULL, "\t", &tok_buf), fields++) {
      switch(fields) {
      case 0:
        if(ptr[0]=='.') /* skip preceding dots */
          ptr++;
        co->domain = strdup(ptr);
        if(!co->domain)
          badcookie = TRUE;
        break;
      case 1:
        /* This field got its explanation on the 23rd of May 2001 by
           Andrés García:

           flag: A TRUE/FALSE value indicating if all machines within a given
           domain can access the variable. This value is set automatically by
           the browser, depending on the value you set for the domain.

           As far as I can see, it is set to true when the cookie says
           .domain.com and to false when the domain is complete www.domain.com
        */
        co->tailmatch = Curl_raw_equal(ptr, "TRUE")?TRUE:FALSE;
        break;
      case 2:
        /* It turns out, that sometimes the file format allows the path
           field to remain not filled in, we try to detect this and work
           around it! Andrés García made us aware of this... */
        if(strcmp("TRUE", ptr) && strcmp("FALSE", ptr)) {
          /* only if the path doesn't look like a boolean option! */
          co->path = strdup(ptr);
          if(!co->path)
            badcookie = TRUE;
          else {
            co->spath = sanitize_cookie_path(co->path);
            if(!co->spath) {
              badcookie = TRUE; /* out of memory bad */
            }
          }
          break;
        }
        /* this doesn't look like a path, make one up! */
        co->path = strdup("/");
        if(!co->path)
          badcookie = TRUE;
        co->spath = strdup("/");
        if(!co->spath)
          badcookie = TRUE;
        fields++; /* add a field and fall down to secure */
        /* FALLTHROUGH */
      case 3:
        co->secure = Curl_raw_equal(ptr, "TRUE")?TRUE:FALSE;
        break;
      case 4:
        co->expires = curlx_strtoofft(ptr, NULL, 10);
        break;
      case 5:
        co->name = strdup(ptr);
        if(!co->name)
          badcookie = TRUE;
        break;
      case 6:
        co->value = strdup(ptr);
        if(!co->value)
          badcookie = TRUE;
        break;
      }
    }
    if(6 == fields) {
      /* we got a cookie with blank contents, fix it */
      co->value = strdup("");
      if(!co->value)
        badcookie = TRUE;
      else
        fields++;
    }

    if(!badcookie && (7 != fields))
      /* we did not find the sufficient number of fields */
      badcookie = TRUE;

    if(badcookie) {
      freecookie(co);
      return NULL;
    }

  }

  if(!c->running &&    /* read from a file */
     c->newsession &&  /* clean session cookies */
     !co->expires) {   /* this is a session cookie since it doesn't expire! */
    freecookie(co);
    return NULL;
  }

  co->livecookie = c->running;

  /* now, we have parsed the incoming line, we must now check if this
     superceeds an already existing cookie, which it may if the previous have
     the same domain and path as this */

  clist = c->cookies;
  replace_old = FALSE;
  while(clist) {
    if(Curl_raw_equal(clist->name, co->name)) {
      /* the names are identical */

      if(clist->domain && co->domain) {
        if(Curl_raw_equal(clist->domain, co->domain))
          /* The domains are identical */
          replace_old=TRUE;
      }
      else if(!clist->domain && !co->domain)
        replace_old = TRUE;

      if(replace_old) {
        /* the domains were identical */

        if(clist->spath && co->spath) {
          if(Curl_raw_equal(clist->spath, co->spath)) {
            replace_old = TRUE;
          }
          else
            replace_old = FALSE;
        }
        else if(!clist->spath && !co->spath)
          replace_old = TRUE;
        else
          replace_old = FALSE;

      }

      if(replace_old && !co->livecookie && clist->livecookie) {
        /* Both cookies matched fine, except that the already present
           cookie is "live", which means it was set from a header, while
           the new one isn't "live" and thus only read from a file. We let
           live cookies stay alive */

        /* Free the newcomer and get out of here! */
        freecookie(co);
        return NULL;
      }

      if(replace_old) {
        co->next = clist->next; /* get the next-pointer first */

        /* then free all the old pointers */
        free(clist->name);
        if(clist->value)
          free(clist->value);
        if(clist->domain)
          free(clist->domain);
        if(clist->path)
          free(clist->path);
        if(clist->spath)
          free(clist->spath);
        if(clist->expirestr)
          free(clist->expirestr);

        if(clist->version)
          free(clist->version);
        if(clist->maxage)
          free(clist->maxage);

        *clist = *co;  /* then store all the new data */

        free(co);   /* free the newly alloced memory */
        co = clist; /* point to the previous struct instead */

        /* We have replaced a cookie, now skip the rest of the list but
           make sure the 'lastc' pointer is properly set */
        do {
          lastc = clist;
          clist = clist->next;
        } while(clist);
        break;
      }
    }
    lastc = clist;
    clist = clist->next;
  }

  if(c->running)
    /* Only show this when NOT reading the cookies from a file */
    infof(data, "%s cookie %s=\"%s\" for domain %s, path %s, "
          "expire %" FORMAT_OFF_T "\n",
          replace_old?"Replaced":"Added", co->name, co->value,
          co->domain, co->path, co->expires);

  if(!replace_old) {
    /* then make the last item point on this new one */
    if(lastc)
      lastc->next = co;
    else
      c->cookies = co;
    c->numcookies++; /* one more cookie in the jar */
  }

  return co;
}
Example #7
0
size_t Curl_ftp_parselist(char *buffer, size_t size, size_t nmemb,
                          void *connptr)
{
  size_t bufflen = size*nmemb;
  struct connectdata *conn = (struct connectdata *)connptr;
  struct ftp_wc_tmpdata *tmpdata = conn->data->wildcard.tmp;
  struct ftp_parselist_data *parser = tmpdata->parser;
  struct curl_fileinfo *finfo;
  unsigned long i = 0;
  CURLcode result;

  if(parser->error) { /* error in previous call */
    /* scenario:
     * 1. call => OK..
     * 2. call => OUT_OF_MEMORY (or other error)
     * 3. (last) call => is skipped RIGHT HERE and the error is hadled later
     *    in wc_statemach()
     */
    return bufflen;
  }

  if(parser->os_type == OS_TYPE_UNKNOWN && bufflen > 0) {
    /* considering info about FILE response format */
    parser->os_type = (buffer[0] >= '0' && buffer[0] <= '9') ?
                       OS_TYPE_WIN_NT : OS_TYPE_UNIX;
  }

  while(i < bufflen) { /* FSM */

    char c = buffer[i];
    if(!parser->file_data) { /* tmp file data is not allocated yet */
      parser->file_data = Curl_fileinfo_alloc();
      if(!parser->file_data) {
        parser->error = CURLE_OUT_OF_MEMORY;
        return bufflen;
      }
      parser->file_data->b_data = malloc(FTP_BUFFER_ALLOCSIZE);
      if(!parser->file_data->b_data) {
        PL_ERROR(conn, CURLE_OUT_OF_MEMORY);
        return bufflen;
      }
      parser->file_data->b_size = FTP_BUFFER_ALLOCSIZE;
      parser->item_offset = 0;
      parser->item_length = 0;
    }

    finfo = parser->file_data;
    finfo->b_data[finfo->b_used++] = c;

    if(finfo->b_used >= finfo->b_size - 1) {
      /* if it is important, extend buffer space for file data */
      char *tmp = realloc(finfo->b_data,
                          finfo->b_size + FTP_BUFFER_ALLOCSIZE);
      if(tmp) {
        finfo->b_size += FTP_BUFFER_ALLOCSIZE;
        finfo->b_data = tmp;
      }
      else {
        Curl_fileinfo_dtor(NULL, parser->file_data);
        parser->file_data = NULL;
        parser->error = CURLE_OUT_OF_MEMORY;
        PL_ERROR(conn, CURLE_OUT_OF_MEMORY);
        return bufflen;
      }
    }

    switch (parser->os_type) {
    case OS_TYPE_UNIX:
      switch (parser->state.UNIX.main) {
      case PL_UNIX_TOTALSIZE:
        switch(parser->state.UNIX.sub.total_dirsize) {
        case PL_UNIX_TOTALSIZE_INIT:
          if(c == 't') {
            parser->state.UNIX.sub.total_dirsize = PL_UNIX_TOTALSIZE_READING;
            parser->item_length++;
          }
          else {
            parser->state.UNIX.main = PL_UNIX_FILETYPE;
            /* start FSM again not considering size of directory */
            finfo->b_used = 0;
            i--;
          }
          break;
        case PL_UNIX_TOTALSIZE_READING:
          parser->item_length++;
          if(c == '\r') {
            parser->item_length--;
            finfo->b_used--;
          }
          else if(c == '\n') {
            finfo->b_data[parser->item_length - 1] = 0;
            if(strncmp("total ", finfo->b_data, 6) == 0) {
              char *endptr = finfo->b_data+6;
              /* here we can deal with directory size, pass the leading white
                 spaces and then the digits */
              while(ISSPACE(*endptr))
                endptr++;
              while(ISDIGIT(*endptr))
                endptr++;
              if(*endptr != 0) {
                PL_ERROR(conn, CURLE_FTP_BAD_FILE_LIST);
                return bufflen;
              }
              else {
                parser->state.UNIX.main = PL_UNIX_FILETYPE;
                finfo->b_used = 0;
              }
            }
            else {
              PL_ERROR(conn, CURLE_FTP_BAD_FILE_LIST);
              return bufflen;
            }
          }
          break;
        }
        break;
      case PL_UNIX_FILETYPE:
        switch (c) {
        case '-':
          finfo->filetype = CURLFILETYPE_FILE;
          break;
        case 'd':
          finfo->filetype = CURLFILETYPE_DIRECTORY;
          break;
        case 'l':
          finfo->filetype = CURLFILETYPE_SYMLINK;
          break;
        case 'p':
          finfo->filetype = CURLFILETYPE_NAMEDPIPE;
          break;
        case 's':
          finfo->filetype = CURLFILETYPE_SOCKET;
          break;
        case 'c':
          finfo->filetype = CURLFILETYPE_DEVICE_CHAR;
          break;
        case 'b':
          finfo->filetype = CURLFILETYPE_DEVICE_BLOCK;
          break;
        case 'D':
          finfo->filetype = CURLFILETYPE_DOOR;
          break;
        default:
          PL_ERROR(conn, CURLE_FTP_BAD_FILE_LIST);
          return bufflen;
        }
        parser->state.UNIX.main = PL_UNIX_PERMISSION;
        parser->item_length = 0;
        parser->item_offset = 1;
        break;
      case PL_UNIX_PERMISSION:
        parser->item_length++;
        if(parser->item_length <= 9) {
          if(!strchr("rwx-tTsS", c)) {
            PL_ERROR(conn, CURLE_FTP_BAD_FILE_LIST);
            return bufflen;
          }
        }
        else if(parser->item_length == 10) {
          unsigned int perm;
          if(c != ' ') {
            PL_ERROR(conn, CURLE_FTP_BAD_FILE_LIST);
            return bufflen;
          }
          finfo->b_data[10] = 0; /* terminate permissions */
          perm = ftp_pl_get_permission(finfo->b_data + parser->item_offset);
          if(perm & FTP_LP_MALFORMATED_PERM) {
            PL_ERROR(conn, CURLE_FTP_BAD_FILE_LIST);
            return bufflen;
          }
          parser->file_data->flags |= CURLFINFOFLAG_KNOWN_PERM;
          parser->file_data->perm = perm;
          parser->offsets.perm = parser->item_offset;

          parser->item_length = 0;
          parser->state.UNIX.main = PL_UNIX_HLINKS;
          parser->state.UNIX.sub.hlinks = PL_UNIX_HLINKS_PRESPACE;
        }
        break;
      case PL_UNIX_HLINKS:
        switch(parser->state.UNIX.sub.hlinks) {
        case PL_UNIX_HLINKS_PRESPACE:
          if(c != ' ') {
            if(c >= '0' && c <= '9') {
              parser->item_offset = finfo->b_used - 1;
              parser->item_length = 1;
              parser->state.UNIX.sub.hlinks = PL_UNIX_HLINKS_NUMBER;
            }
            else {
              PL_ERROR(conn, CURLE_FTP_BAD_FILE_LIST);
              return bufflen;
            }
          }
          break;
        case PL_UNIX_HLINKS_NUMBER:
          parser->item_length ++;
          if(c == ' ') {
            char *p;
            long int hlinks;
            finfo->b_data[parser->item_offset + parser->item_length - 1] = 0;
            hlinks = strtol(finfo->b_data + parser->item_offset, &p, 10);
            if(p[0] == '\0' && hlinks != LONG_MAX && hlinks != LONG_MIN) {
              parser->file_data->flags |= CURLFINFOFLAG_KNOWN_HLINKCOUNT;
              parser->file_data->hardlinks = hlinks;
            }
            parser->item_length = 0;
            parser->item_offset = 0;
            parser->state.UNIX.main = PL_UNIX_USER;
            parser->state.UNIX.sub.user = PL_UNIX_USER_PRESPACE;
          }
          else if(c < '0' || c > '9') {
            PL_ERROR(conn, CURLE_FTP_BAD_FILE_LIST);
            return bufflen;
          }
          break;
        }
        break;
      case PL_UNIX_USER:
        switch(parser->state.UNIX.sub.user) {
        case PL_UNIX_USER_PRESPACE:
          if(c != ' ') {
            parser->item_offset = finfo->b_used - 1;
            parser->item_length = 1;
            parser->state.UNIX.sub.user = PL_UNIX_USER_PARSING;
          }
          break;
        case PL_UNIX_USER_PARSING:
          parser->item_length++;
          if(c == ' ') {
            finfo->b_data[parser->item_offset + parser->item_length - 1] = 0;
            parser->offsets.user = parser->item_offset;
            parser->state.UNIX.main = PL_UNIX_GROUP;
            parser->state.UNIX.sub.group = PL_UNIX_GROUP_PRESPACE;
            parser->item_offset = 0;
            parser->item_length = 0;
          }
          break;
        }
        break;
      case PL_UNIX_GROUP:
        switch(parser->state.UNIX.sub.group) {
        case PL_UNIX_GROUP_PRESPACE:
          if(c != ' ') {
            parser->item_offset = finfo->b_used - 1;
            parser->item_length = 1;
            parser->state.UNIX.sub.group = PL_UNIX_GROUP_NAME;
          }
          break;
        case PL_UNIX_GROUP_NAME:
          parser->item_length++;
          if(c == ' ') {
            finfo->b_data[parser->item_offset + parser->item_length - 1] = 0;
            parser->offsets.group = parser->item_offset;
            parser->state.UNIX.main = PL_UNIX_SIZE;
            parser->state.UNIX.sub.size = PL_UNIX_SIZE_PRESPACE;
            parser->item_offset = 0;
            parser->item_length = 0;
          }
          break;
        }
        break;
      case PL_UNIX_SIZE:
        switch(parser->state.UNIX.sub.size) {
        case PL_UNIX_SIZE_PRESPACE:
          if(c != ' ') {
            if(c >= '0' && c <= '9') {
              parser->item_offset = finfo->b_used - 1;
              parser->item_length = 1;
              parser->state.UNIX.sub.size = PL_UNIX_SIZE_NUMBER;
            }
            else {
              PL_ERROR(conn, CURLE_FTP_BAD_FILE_LIST);
              return bufflen;
            }
          }
          break;
        case PL_UNIX_SIZE_NUMBER:
          parser->item_length++;
          if(c == ' ') {
            char *p;
            curl_off_t fsize;
            finfo->b_data[parser->item_offset + parser->item_length - 1] = 0;
            fsize = curlx_strtoofft(finfo->b_data+parser->item_offset, &p, 10);
            if(p[0] == '\0' && fsize != CURL_OFF_T_MAX &&
                               fsize != CURL_OFF_T_MIN) {
              parser->file_data->flags |= CURLFINFOFLAG_KNOWN_SIZE;
              parser->file_data->size = fsize;
            }
            parser->item_length = 0;
            parser->item_offset = 0;
            parser->state.UNIX.main = PL_UNIX_TIME;
            parser->state.UNIX.sub.time = PL_UNIX_TIME_PREPART1;
          }
          else if(!ISDIGIT(c)) {
            PL_ERROR(conn, CURLE_FTP_BAD_FILE_LIST);
            return bufflen;
          }
          break;
        }
        break;
      case PL_UNIX_TIME:
        switch(parser->state.UNIX.sub.time) {
        case PL_UNIX_TIME_PREPART1:
          if(c != ' ') {
            if(ISALNUM(c)) {
              parser->item_offset = finfo->b_used -1;
              parser->item_length = 1;
              parser->state.UNIX.sub.time = PL_UNIX_TIME_PART1;
            }
            else {
              PL_ERROR(conn, CURLE_FTP_BAD_FILE_LIST);
              return bufflen;
            }
          }
          break;
        case PL_UNIX_TIME_PART1:
          parser->item_length++;
          if(c == ' ') {
            parser->state.UNIX.sub.time = PL_UNIX_TIME_PREPART2;
          }
          else if(!ISALNUM(c) && c != '.') {
            PL_ERROR(conn, CURLE_FTP_BAD_FILE_LIST);
            return bufflen;
          }
          break;
        case PL_UNIX_TIME_PREPART2:
          parser->item_length++;
          if(c != ' ') {
            if(ISALNUM(c)) {
              parser->state.UNIX.sub.time = PL_UNIX_TIME_PART2;
            }
            else {
              PL_ERROR(conn, CURLE_FTP_BAD_FILE_LIST);
              return bufflen;
            }
          }
          break;
        case PL_UNIX_TIME_PART2:
          parser->item_length++;
          if(c == ' ') {
            parser->state.UNIX.sub.time = PL_UNIX_TIME_PREPART3;
          }
          else if(!ISALNUM(c) && c != '.') {
            PL_ERROR(conn, CURLE_FTP_BAD_FILE_LIST);
            return bufflen;
          }
          break;
        case PL_UNIX_TIME_PREPART3:
          parser->item_length++;
          if(c != ' ') {
            if(ISALNUM(c)) {
              parser->state.UNIX.sub.time = PL_UNIX_TIME_PART3;
            }
            else {
              PL_ERROR(conn, CURLE_FTP_BAD_FILE_LIST);
              return bufflen;
            }
          }
          break;
        case PL_UNIX_TIME_PART3:
          parser->item_length++;
          if(c == ' ') {
            finfo->b_data[parser->item_offset + parser->item_length -1] = 0;
            parser->offsets.time = parser->item_offset;
            if(ftp_pl_gettime(parser, finfo->b_data + parser->item_offset)) {
              parser->file_data->flags |= CURLFINFOFLAG_KNOWN_TIME;
            }
            if(finfo->filetype == CURLFILETYPE_SYMLINK) {
              parser->state.UNIX.main = PL_UNIX_SYMLINK;
              parser->state.UNIX.sub.symlink = PL_UNIX_SYMLINK_PRESPACE;
            }
            else {
              parser->state.UNIX.main = PL_UNIX_FILENAME;
              parser->state.UNIX.sub.filename = PL_UNIX_FILENAME_PRESPACE;
            }
          }
          else if(!ISALNUM(c) && c != '.' && c != ':') {
            PL_ERROR(conn, CURLE_FTP_BAD_FILE_LIST);
            return bufflen;
          }
          break;
        }
        break;
      case PL_UNIX_FILENAME:
        switch(parser->state.UNIX.sub.filename) {
        case PL_UNIX_FILENAME_PRESPACE:
          if(c != ' ') {
            parser->item_offset = finfo->b_used - 1;
            parser->item_length = 1;
            parser->state.UNIX.sub.filename = PL_UNIX_FILENAME_NAME;
          }
          break;
        case PL_UNIX_FILENAME_NAME:
          parser->item_length++;
          if(c == '\r') {
            parser->item_length--;
            parser->state.UNIX.sub.filename = PL_UNIX_FILENAME_WINDOWSEOL;
          }
          else if(c == '\n') {
            finfo->b_data[parser->item_offset + parser->item_length - 1] = 0;
            parser->offsets.filename = parser->item_offset;
            parser->state.UNIX.main = PL_UNIX_FILETYPE;
            result = ftp_pl_insert_finfo(conn, finfo);
            if(result) {
              PL_ERROR(conn, result);
              return bufflen;
            }
          }
          break;
        case PL_UNIX_FILENAME_WINDOWSEOL:
          if(c == '\n') {
            finfo->b_data[parser->item_offset + parser->item_length] = 0;
            parser->offsets.filename = parser->item_offset;
            parser->state.UNIX.main = PL_UNIX_FILETYPE;
            result = ftp_pl_insert_finfo(conn, finfo);
            if(result) {
              PL_ERROR(conn, result);
              return bufflen;
            }
          }
          else {
            PL_ERROR(conn, CURLE_FTP_BAD_FILE_LIST);
            return bufflen;
          }
          break;
        }
        break;
      case PL_UNIX_SYMLINK:
        switch(parser->state.UNIX.sub.symlink) {
        case PL_UNIX_SYMLINK_PRESPACE:
          if(c != ' ') {
            parser->item_offset = finfo->b_used - 1;
            parser->item_length = 1;
            parser->state.UNIX.sub.symlink = PL_UNIX_SYMLINK_NAME;
          }
          break;
        case PL_UNIX_SYMLINK_NAME:
          parser->item_length++;
          if(c == ' ') {
            parser->state.UNIX.sub.symlink = PL_UNIX_SYMLINK_PRETARGET1;
          }
          else if(c == '\r' || c == '\n') {
            PL_ERROR(conn, CURLE_FTP_BAD_FILE_LIST);
            return bufflen;
          }
          break;
        case PL_UNIX_SYMLINK_PRETARGET1:
          parser->item_length++;
          if(c == '-') {
            parser->state.UNIX.sub.symlink = PL_UNIX_SYMLINK_PRETARGET2;
          }
          else if(c == '\r' || c == '\n') {
            PL_ERROR(conn, CURLE_FTP_BAD_FILE_LIST);
            return bufflen;
          }
          else {
            parser->state.UNIX.sub.symlink = PL_UNIX_SYMLINK_NAME;
          }
          break;
        case PL_UNIX_SYMLINK_PRETARGET2:
          parser->item_length++;
          if(c == '>') {
            parser->state.UNIX.sub.symlink = PL_UNIX_SYMLINK_PRETARGET3;
          }
          else if(c == '\r' || c == '\n') {
            PL_ERROR(conn, CURLE_FTP_BAD_FILE_LIST);
            return bufflen;
          }
          else {
            parser->state.UNIX.sub.symlink = PL_UNIX_SYMLINK_NAME;
          }
          break;
        case PL_UNIX_SYMLINK_PRETARGET3:
          parser->item_length++;
          if(c == ' ') {
            parser->state.UNIX.sub.symlink = PL_UNIX_SYMLINK_PRETARGET4;
            /* now place where is symlink following */
            finfo->b_data[parser->item_offset + parser->item_length - 4] = 0;
            parser->offsets.filename = parser->item_offset;
            parser->item_length = 0;
            parser->item_offset = 0;
          }
          else if(c == '\r' || c == '\n') {
            PL_ERROR(conn, CURLE_FTP_BAD_FILE_LIST);
            return bufflen;
          }
          else {
            parser->state.UNIX.sub.symlink = PL_UNIX_SYMLINK_NAME;
          }
          break;
        case PL_UNIX_SYMLINK_PRETARGET4:
          if(c != '\r' && c != '\n') {
            parser->state.UNIX.sub.symlink = PL_UNIX_SYMLINK_TARGET;
            parser->item_offset = finfo->b_used - 1;
            parser->item_length = 1;
          }
          else {
            PL_ERROR(conn, CURLE_FTP_BAD_FILE_LIST);
            return bufflen;
          }
          break;
        case PL_UNIX_SYMLINK_TARGET:
          parser->item_length ++;
          if(c == '\r') {
            parser->item_length --;
            parser->state.UNIX.sub.symlink = PL_UNIX_SYMLINK_WINDOWSEOL;
          }
          else if(c == '\n') {
            finfo->b_data[parser->item_offset + parser->item_length - 1] = 0;
            parser->offsets.symlink_target = parser->item_offset;
            result = ftp_pl_insert_finfo(conn, finfo);
            if(result) {
              PL_ERROR(conn, result);
              return bufflen;
            }
            parser->state.UNIX.main = PL_UNIX_FILETYPE;
          }
          break;
        case PL_UNIX_SYMLINK_WINDOWSEOL:
          if(c == '\n') {
            finfo->b_data[parser->item_offset + parser->item_length - 1] = 0;
            parser->offsets.symlink_target = parser->item_offset;
            result = ftp_pl_insert_finfo(conn, finfo);
            if(result) {
              PL_ERROR(conn, result);
              return bufflen;
            }
            parser->state.UNIX.main = PL_UNIX_FILETYPE;
          }
          else {
            PL_ERROR(conn, CURLE_FTP_BAD_FILE_LIST);
            return bufflen;
          }
          break;
        }
        break;
      }
      break;
    case OS_TYPE_WIN_NT:
      switch(parser->state.NT.main) {
      case PL_WINNT_DATE:
        parser->item_length++;
        if(parser->item_length < 9) {
          if(!strchr("0123456789-", c)) { /* only simple control */
            PL_ERROR(conn, CURLE_FTP_BAD_FILE_LIST);
            return bufflen;
          }
        }
        else if(parser->item_length == 9) {
          if(c == ' ') {
            parser->state.NT.main = PL_WINNT_TIME;
            parser->state.NT.sub.time = PL_WINNT_TIME_PRESPACE;
          }
          else {
            PL_ERROR(conn, CURLE_FTP_BAD_FILE_LIST);
            return bufflen;
          }
        }
        else {
          PL_ERROR(conn, CURLE_FTP_BAD_FILE_LIST);
          return bufflen;
        }
        break;
      case PL_WINNT_TIME:
        parser->item_length++;
        switch(parser->state.NT.sub.time) {
        case PL_WINNT_TIME_PRESPACE:
          if(!ISSPACE(c)) {
            parser->state.NT.sub.time = PL_WINNT_TIME_TIME;
          }
          break;
        case PL_WINNT_TIME_TIME:
          if(c == ' ') {
            parser->offsets.time = parser->item_offset;
            finfo->b_data[parser->item_offset + parser->item_length -1] = 0;
            parser->state.NT.main = PL_WINNT_DIRORSIZE;
            parser->state.NT.sub.dirorsize = PL_WINNT_DIRORSIZE_PRESPACE;
            parser->item_length = 0;
          }
          else if(!strchr("APM0123456789:", c)) {
            PL_ERROR(conn, CURLE_FTP_BAD_FILE_LIST);
            return bufflen;
          }
          break;
        }
        break;
      case PL_WINNT_DIRORSIZE:
        switch(parser->state.NT.sub.dirorsize) {
        case PL_WINNT_DIRORSIZE_PRESPACE:
          if(c == ' ') {

          }
          else {
            parser->item_offset = finfo->b_used - 1;
            parser->item_length = 1;
            parser->state.NT.sub.dirorsize = PL_WINNT_DIRORSIZE_CONTENT;
          }
          break;
        case PL_WINNT_DIRORSIZE_CONTENT:
          parser->item_length ++;
          if(c == ' ') {
            finfo->b_data[parser->item_offset + parser->item_length - 1] = 0;
            if(strcmp("<DIR>", finfo->b_data + parser->item_offset) == 0) {
              finfo->filetype = CURLFILETYPE_DIRECTORY;
              finfo->size = 0;
            }
            else {
              char *endptr;
              finfo->size = curlx_strtoofft(finfo->b_data +
                                            parser->item_offset,
                                            &endptr, 10);
              if(!*endptr) {
                if(finfo->size == CURL_OFF_T_MAX ||
                   finfo->size == CURL_OFF_T_MIN) {
                  if(errno == ERANGE) {
                    PL_ERROR(conn, CURLE_FTP_BAD_FILE_LIST);
                    return bufflen;
                  }
                }
              }
              else {
                PL_ERROR(conn, CURLE_FTP_BAD_FILE_LIST);
                return bufflen;
              }
              /* correct file type */
              parser->file_data->filetype = CURLFILETYPE_FILE;
            }

            parser->file_data->flags |= CURLFINFOFLAG_KNOWN_SIZE;
            parser->item_length = 0;
            parser->state.NT.main = PL_WINNT_FILENAME;
            parser->state.NT.sub.filename = PL_WINNT_FILENAME_PRESPACE;
          }
          break;
        }
        break;
      case PL_WINNT_FILENAME:
        switch (parser->state.NT.sub.filename) {
        case PL_WINNT_FILENAME_PRESPACE:
          if(c != ' ') {
            parser->item_offset = finfo->b_used -1;
            parser->item_length = 1;
            parser->state.NT.sub.filename = PL_WINNT_FILENAME_CONTENT;
          }
          break;
        case PL_WINNT_FILENAME_CONTENT:
          parser->item_length++;
          if(c == '\r') {
            parser->state.NT.sub.filename = PL_WINNT_FILENAME_WINEOL;
            finfo->b_data[finfo->b_used - 1] = 0;
          }
          else if(c == '\n') {
            parser->offsets.filename = parser->item_offset;
            finfo->b_data[finfo->b_used - 1] = 0;
            parser->offsets.filename = parser->item_offset;
            result = ftp_pl_insert_finfo(conn, finfo);
            if(result) {
              PL_ERROR(conn, result);
              return bufflen;
            }
            parser->state.NT.main = PL_WINNT_DATE;
            parser->state.NT.sub.filename = PL_WINNT_FILENAME_PRESPACE;
          }
          break;
        case PL_WINNT_FILENAME_WINEOL:
          if(c == '\n') {
            parser->offsets.filename = parser->item_offset;
            result = ftp_pl_insert_finfo(conn, finfo);
            if(result) {
              PL_ERROR(conn, result);
              return bufflen;
            }
            parser->state.NT.main = PL_WINNT_DATE;
            parser->state.NT.sub.filename = PL_WINNT_FILENAME_PRESPACE;
          }
          else {
            PL_ERROR(conn, CURLE_FTP_BAD_FILE_LIST);
            return bufflen;
          }
          break;
        }
        break;
      }
      break;
    default:
      return bufflen + 1;
    }

    i++;
  }

  return bufflen;
}
Example #8
0
CURLcode Curl_proxyCONNECT(struct connectdata *conn,
                           int sockindex,
                           const char *hostname,
                           unsigned short remote_port)
{
  int subversion=0;
  struct SessionHandle *data=conn->data;
  struct SingleRequest *k = &data->req;
  CURLcode result;
  long timeout =
    data->set.timeout?data->set.timeout:PROXY_TIMEOUT; /* in milliseconds */
  curl_socket_t tunnelsocket = conn->sock[sockindex];
  curl_off_t cl=0;
  bool closeConnection = FALSE;
  bool chunked_encoding = FALSE;
  long check;

#define SELECT_OK      0
#define SELECT_ERROR   1
#define SELECT_TIMEOUT 2
  int error = SELECT_OK;

  if(conn->tunnel_state[sockindex] == TUNNEL_COMPLETE)
    return CURLE_OK; /* CONNECT is already completed */

  conn->bits.proxy_connect_closed = FALSE;

  do {
    if(TUNNEL_INIT == conn->tunnel_state[sockindex]) {
      /* BEGIN CONNECT PHASE */
      char *host_port;
      Curl_send_buffer *req_buffer;

      infof(data, "Establish HTTP proxy tunnel to %s:%hu\n",
            hostname, remote_port);

      if(data->req.newurl) {
        /* This only happens if we've looped here due to authentication
           reasons, and we don't really use the newly cloned URL here
           then. Just free() it. */
        free(data->req.newurl);
        data->req.newurl = NULL;
      }

      /* initialize a dynamic send-buffer */
      req_buffer = Curl_add_buffer_init();

      if(!req_buffer)
        return CURLE_OUT_OF_MEMORY;

      host_port = aprintf("%s:%hu", hostname, remote_port);
      if(!host_port) {
        free(req_buffer);
        return CURLE_OUT_OF_MEMORY;
      }

      /* Setup the proxy-authorization header, if any */
      result = Curl_http_output_auth(conn, "CONNECT", host_port, TRUE);

      free(host_port);

      if(CURLE_OK == result) {
        char *host=(char *)"";
        const char *proxyconn="";
        const char *useragent="";
        const char *http = (conn->proxytype == CURLPROXY_HTTP_1_0) ?
          "1.0" : "1.1";
        char *hostheader= /* host:port with IPv6 support */
          aprintf("%s%s%s:%hu", conn->bits.ipv6_ip?"[":"",
                  hostname, conn->bits.ipv6_ip?"]":"",
                  remote_port);
        if(!hostheader) {
          free(req_buffer);
          return CURLE_OUT_OF_MEMORY;
        }

        if(!Curl_checkheaders(data, "Host:")) {
          host = aprintf("Host: %s\r\n", hostheader);
          if(!host) {
            free(hostheader);
            free(req_buffer);
            return CURLE_OUT_OF_MEMORY;
          }
        }
        if(!Curl_checkheaders(data, "Proxy-Connection:"))
          proxyconn = "Proxy-Connection: Keep-Alive\r\n";

        if(!Curl_checkheaders(data, "User-Agent:") &&
           data->set.str[STRING_USERAGENT])
          useragent = conn->allocptr.uagent;

        result =
          Curl_add_bufferf(req_buffer,
                           "CONNECT %s HTTP/%s\r\n"
                           "%s"  /* Host: */
                           "%s"  /* Proxy-Authorization */
                           "%s"  /* User-Agent */
                           "%s", /* Proxy-Connection */
                           hostheader,
                           http,
                           host,
                           conn->allocptr.proxyuserpwd?
                           conn->allocptr.proxyuserpwd:"",
                           useragent,
                           proxyconn);

        if(host && *host)
          free(host);
        free(hostheader);

        if(CURLE_OK == result)
          result = Curl_add_custom_headers(conn, req_buffer);

        if(CURLE_OK == result)
          /* CRLF terminate the request */
          result = Curl_add_bufferf(req_buffer, "\r\n");

        if(CURLE_OK == result) {
          /* Send the connect request to the proxy */
          /* BLOCKING */
          result =
            Curl_add_buffer_send(req_buffer, conn,
                                 &data->info.request_size, 0, sockindex);
        }
        req_buffer = NULL;
        if(result)
          failf(data, "Failed sending CONNECT to proxy");
      }

      Curl_safefree(req_buffer);
      if(result)
        return result;

      conn->tunnel_state[sockindex] = TUNNEL_CONNECT;

      /* now we've issued the CONNECT and we're waiting to hear back, return
         and get called again polling-style */
      return CURLE_OK;

    } /* END CONNECT PHASE */

    { /* BEGIN NEGOTIATION PHASE */
      size_t nread;   /* total size read */
      int perline; /* count bytes per line */
      int keepon=TRUE;
      ssize_t gotbytes;
      char *ptr;
      char *line_start;

      ptr=data->state.buffer;
      line_start = ptr;

      nread=0;
      perline=0;
      keepon=TRUE;

      while((nread<BUFSIZE) && (keepon && !error)) {

        /* if timeout is requested, find out how much remaining time we have */
        check = timeout - /* timeout time */
          Curl_tvdiff(Curl_tvnow(), conn->now); /* spent time */
        if(check <= 0) {
          failf(data, "Proxy CONNECT aborted due to timeout");
          error = SELECT_TIMEOUT; /* already too little time */
          break;
        }

        /* loop every second at least, less if the timeout is near */
        switch (Curl_socket_ready(tunnelsocket, CURL_SOCKET_BAD,
                                  check<1000L?check:1000)) {
        case -1: /* select() error, stop reading */
          error = SELECT_ERROR;
          failf(data, "Proxy CONNECT aborted due to select/poll error");
          break;
        case 0: /* timeout */
          break;
        default:
          DEBUGASSERT(ptr+BUFSIZE-nread <= data->state.buffer+BUFSIZE+1);
          result = Curl_read(conn, tunnelsocket, ptr, BUFSIZE-nread,
                             &gotbytes);
          if(result==CURLE_AGAIN)
            continue; /* go loop yourself */
          else if(result)
            keepon = FALSE;
          else if(gotbytes <= 0) {
            keepon = FALSE;
            if(data->set.proxyauth && data->state.authproxy.avail) {
              /* proxy auth was requested and there was proxy auth available,
                 then deem this as "mere" proxy disconnect */
              conn->bits.proxy_connect_closed = TRUE;
            }
            else {
              error = SELECT_ERROR;
              failf(data, "Proxy CONNECT aborted");
            }
          }
          else {
            /*
             * We got a whole chunk of data, which can be anything from one
             * byte to a set of lines and possibly just a piece of the last
             * line.
             */
            int i;

            nread += gotbytes;

            if(keepon > TRUE) {
              /* This means we are currently ignoring a response-body */

              nread = 0; /* make next read start over in the read buffer */
              ptr=data->state.buffer;
              if(cl) {
                /* A Content-Length based body: simply count down the counter
                   and make sure to break out of the loop when we're done! */
                cl -= gotbytes;
                if(cl<=0) {
                  keepon = FALSE;
                  break;
                }
              }
              else {
                /* chunked-encoded body, so we need to do the chunked dance
                   properly to know when the end of the body is reached */
                CHUNKcode r;
                ssize_t tookcareof=0;

                /* now parse the chunked piece of data so that we can
                   properly tell when the stream ends */
                r = Curl_httpchunk_read(conn, ptr, gotbytes, &tookcareof);
                if(r == CHUNKE_STOP) {
                  /* we're done reading chunks! */
                  infof(data, "chunk reading DONE\n");
                  keepon = FALSE;
                  /* we did the full CONNECT treatment, go COMPLETE */
                  conn->tunnel_state[sockindex] = TUNNEL_COMPLETE;
                }
                else
                  infof(data, "Read %zd bytes of chunk, continue\n",
                        tookcareof);
              }
            }
            else
              for(i = 0; i < gotbytes; ptr++, i++) {
                perline++; /* amount of bytes in this line so far */
                if(*ptr == 0x0a) {
                  char letter;
                  int writetype;

                  /* convert from the network encoding */
                  result = Curl_convert_from_network(data, line_start,
                                                     perline);
                  /* Curl_convert_from_network calls failf if unsuccessful */
                  if(result)
                    return result;

                  /* output debug if that is requested */
                  if(data->set.verbose)
                    Curl_debug(data, CURLINFO_HEADER_IN,
                               line_start, (size_t)perline, conn);

                  /* send the header to the callback */
                  writetype = CLIENTWRITE_HEADER;
                  if(data->set.include_header)
                    writetype |= CLIENTWRITE_BODY;

                  result = Curl_client_write(conn, writetype, line_start,
                                             perline);

                  data->info.header_size += (long)perline;
                  data->req.headerbytecount += (long)perline;

                  if(result)
                    return result;

                  /* Newlines are CRLF, so the CR is ignored as the line isn't
                     really terminated until the LF comes. Treat a following CR
                     as end-of-headers as well.*/

                  if(('\r' == line_start[0]) ||
                     ('\n' == line_start[0])) {
                    /* end of response-headers from the proxy */
                    nread = 0; /* make next read start over in the read
                                  buffer */
                    ptr=data->state.buffer;
                    if((407 == k->httpcode) && !data->state.authproblem) {
                      /* If we get a 407 response code with content length
                         when we have no auth problem, we must ignore the
                         whole response-body */
                      keepon = 2;

                      if(cl) {

                        infof(data, "Ignore %" FORMAT_OFF_T
                              " bytes of response-body\n", cl);
                        /* remove the remaining chunk of what we already
                           read */
                        cl -= (gotbytes - i);

                        if(cl<=0)
                          /* if the whole thing was already read, we are done!
                           */
                          keepon=FALSE;
                      }
                      else if(chunked_encoding) {
                        CHUNKcode r;
                        /* We set ignorebody true here since the chunked
                           decoder function will acknowledge that. Pay
                           attention so that this is cleared again when this
                           function returns! */
                        k->ignorebody = TRUE;
                        infof(data, "%zd bytes of chunk left\n", gotbytes-i);

                        if(line_start[1] == '\n') {
                          /* this can only be a LF if the letter at index 0
                             was a CR */
                          line_start++;
                          i++;
                        }

                        /* now parse the chunked piece of data so that we can
                           properly tell when the stream ends */
                        r = Curl_httpchunk_read(conn, line_start+1,
                                                  gotbytes -i, &gotbytes);
                        if(r == CHUNKE_STOP) {
                          /* we're done reading chunks! */
                          infof(data, "chunk reading DONE\n");
                          keepon = FALSE;
                          /* we did the full CONNECT treatment, go to
                             COMPLETE */
                          conn->tunnel_state[sockindex] = TUNNEL_COMPLETE;
                        }
                        else
                          infof(data, "Read %zd bytes of chunk, continue\n",
                                gotbytes);
                      }
                      else {
                        /* without content-length or chunked encoding, we
                           can't keep the connection alive since the close is
                           the end signal so we bail out at once instead */
                        keepon=FALSE;
                      }
                    }
                    else {
                      keepon = FALSE;
                      if(200 == data->info.httpproxycode) {
                        if(gotbytes - (i+1))
                          failf(data, "Proxy CONNECT followed by %zd bytes "
                                "of opaque data. Data ignored (known bug #39)",
                                gotbytes - (i+1));
                      }
                    }
                    /* we did the full CONNECT treatment, go to COMPLETE */
                    conn->tunnel_state[sockindex] = TUNNEL_COMPLETE;
                    break; /* breaks out of for-loop, not switch() */
                  }

                  /* keep a backup of the position we are about to blank */
                  letter = line_start[perline];
                  line_start[perline]=0; /* zero terminate the buffer */
                  if((checkprefix("WWW-Authenticate:", line_start) &&
                      (401 == k->httpcode)) ||
                     (checkprefix("Proxy-authenticate:", line_start) &&
                      (407 == k->httpcode))) {
                    result = Curl_http_input_auth(conn, k->httpcode,
                                                  line_start);
                    if(result)
                      return result;
                  }
                  else if(checkprefix("Content-Length:", line_start)) {
                    cl = curlx_strtoofft(line_start +
                                         strlen("Content-Length:"), NULL, 10);
                  }
                  else if(Curl_compareheader(line_start,
                                             "Connection:", "close"))
                    closeConnection = TRUE;
                  else if(Curl_compareheader(line_start,
                                             "Transfer-Encoding:",
                                             "chunked")) {
                    infof(data, "CONNECT responded chunked\n");
                    chunked_encoding = TRUE;
                    /* init our chunky engine */
                    Curl_httpchunk_init(conn);
                  }
                  else if(Curl_compareheader(line_start,
                                             "Proxy-Connection:", "close"))
                    closeConnection = TRUE;
                  else if(2 == sscanf(line_start, "HTTP/1.%d %d",
                                      &subversion,
                                      &k->httpcode)) {
                    /* store the HTTP code from the proxy */
                    data->info.httpproxycode = k->httpcode;
                  }
                  /* put back the letter we blanked out before */
                  line_start[perline]= letter;

                  perline=0; /* line starts over here */
                  line_start = ptr+1; /* this skips the zero byte we wrote */
                }
              }
          }
          break;
        } /* switch */
        if(Curl_pgrsUpdate(conn))
          return CURLE_ABORTED_BY_CALLBACK;
      } /* while there's buffer left and loop is requested */

      if(error)
        return CURLE_RECV_ERROR;

      if(data->info.httpproxycode != 200) {
        /* Deal with the possibly already received authenticate
           headers. 'newurl' is set to a new URL if we must loop. */
        result = Curl_http_auth_act(conn);
        if(result)
          return result;

        if(conn->bits.close)
          /* the connection has been marked for closure, most likely in the
             Curl_http_auth_act() function and thus we can kill it at once
             below
          */
          closeConnection = TRUE;
      }

      if(closeConnection && data->req.newurl) {
        /* Connection closed by server. Don't use it anymore */
        Curl_closesocket(conn, conn->sock[sockindex]);
        conn->sock[sockindex] = CURL_SOCKET_BAD;
        break;
      }
    } /* END NEGOTIATION PHASE */

    /* If we are supposed to continue and request a new URL, which basically
     * means the HTTP authentication is still going on so if the tunnel
     * is complete we start over in INIT state */
    if(data->req.newurl &&
       (TUNNEL_COMPLETE == conn->tunnel_state[sockindex])) {
      conn->tunnel_state[sockindex] = TUNNEL_INIT;
      infof(data, "TUNNEL_STATE switched to: %d\n",
            conn->tunnel_state[sockindex]);
    }

  } while(data->req.newurl);

  if(200 != data->req.httpcode) {
    failf(data, "Received HTTP code %d from proxy after CONNECT",
          data->req.httpcode);

    if(closeConnection && data->req.newurl)
      conn->bits.proxy_connect_closed = TRUE;

    if(data->req.newurl) {
      /* this won't be used anymore for the CONNECT so free it now */
      free(data->req.newurl);
      data->req.newurl = NULL;
    }

    /* to back to init state */
    conn->tunnel_state[sockindex] = TUNNEL_INIT;

    return CURLE_RECV_ERROR;
  }

  conn->tunnel_state[sockindex] = TUNNEL_COMPLETE;

  /* If a proxy-authorization header was used for the proxy, then we should
     make sure that it isn't accidentally used for the document request
     after we've connected. So let's free and clear it here. */
  Curl_safefree(conn->allocptr.proxyuserpwd);
  conn->allocptr.proxyuserpwd = NULL;

  data->state.authproxy.done = TRUE;

  infof (data, "Proxy replied OK to CONNECT request\n");
  data->req.ignorebody = FALSE; /* put it (back) to non-ignore state */
  conn->bits.rewindaftersend = FALSE; /* make sure this isn't set for the
                                         document request  */
  return CURLE_OK;
}
Example #9
0
static CURLcode CONNECT(struct connectdata *conn,
                        int sockindex,
                        const char *hostname,
                        int remote_port)
{
  int subversion = 0;
  struct Curl_easy *data = conn->data;
  struct SingleRequest *k = &data->req;
  CURLcode result;
  curl_socket_t tunnelsocket = conn->sock[sockindex];
  struct http_connect_state *s = conn->connect_state;

#define SELECT_OK      0
#define SELECT_ERROR   1

  if(Curl_connect_complete(conn))
    return CURLE_OK; /* CONNECT is already completed */

  conn->bits.proxy_connect_closed = FALSE;

  do {
    timediff_t check;
    if(TUNNEL_INIT == s->tunnel_state) {
      /* BEGIN CONNECT PHASE */
      char *host_port;
      Curl_send_buffer *req_buffer;

      infof(data, "Establish HTTP proxy tunnel to %s:%d\n",
            hostname, remote_port);

        /* This only happens if we've looped here due to authentication
           reasons, and we don't really use the newly cloned URL here
           then. Just free() it. */
      free(data->req.newurl);
      data->req.newurl = NULL;

      /* initialize a dynamic send-buffer */
      req_buffer = Curl_add_buffer_init();

      if(!req_buffer)
        return CURLE_OUT_OF_MEMORY;

      host_port = aprintf("%s:%d", hostname, remote_port);
      if(!host_port) {
        Curl_add_buffer_free(req_buffer);
        return CURLE_OUT_OF_MEMORY;
      }

      /* Setup the proxy-authorization header, if any */
      result = Curl_http_output_auth(conn, "CONNECT", host_port, TRUE);

      free(host_port);

      if(!result) {
        char *host = NULL;
        const char *proxyconn = "";
        const char *useragent = "";
        const char *http = (conn->http_proxy.proxytype == CURLPROXY_HTTP_1_0) ?
          "1.0" : "1.1";
        bool ipv6_ip = conn->bits.ipv6_ip;
        char *hostheader;

        /* the hostname may be different */
        if(hostname != conn->host.name)
          ipv6_ip = (strchr(hostname, ':') != NULL);
        hostheader = /* host:port with IPv6 support */
          aprintf("%s%s%s:%d", ipv6_ip?"[":"", hostname, ipv6_ip?"]":"",
                  remote_port);
        if(!hostheader) {
          Curl_add_buffer_free(req_buffer);
          return CURLE_OUT_OF_MEMORY;
        }

        if(!Curl_checkProxyheaders(conn, "Host")) {
          host = aprintf("Host: %s\r\n", hostheader);
          if(!host) {
            free(hostheader);
            Curl_add_buffer_free(req_buffer);
            return CURLE_OUT_OF_MEMORY;
          }
        }
        if(!Curl_checkProxyheaders(conn, "Proxy-Connection"))
          proxyconn = "Proxy-Connection: Keep-Alive\r\n";

        if(!Curl_checkProxyheaders(conn, "User-Agent") &&
           data->set.str[STRING_USERAGENT])
          useragent = conn->allocptr.uagent;

        result =
          Curl_add_bufferf(req_buffer,
                           "CONNECT %s HTTP/%s\r\n"
                           "%s"  /* Host: */
                           "%s"  /* Proxy-Authorization */
                           "%s"  /* User-Agent */
                           "%s", /* Proxy-Connection */
                           hostheader,
                           http,
                           host?host:"",
                           conn->allocptr.proxyuserpwd?
                           conn->allocptr.proxyuserpwd:"",
                           useragent,
                           proxyconn);

        if(host)
          free(host);
        free(hostheader);

        if(!result)
          result = Curl_add_custom_headers(conn, TRUE, req_buffer);

        if(!result)
          /* CRLF terminate the request */
          result = Curl_add_bufferf(req_buffer, "\r\n");

        if(!result) {
          /* Send the connect request to the proxy */
          /* BLOCKING */
          result =
            Curl_add_buffer_send(req_buffer, conn,
                                 &data->info.request_size, 0, sockindex);
        }
        req_buffer = NULL;
        if(result)
          failf(data, "Failed sending CONNECT to proxy");
      }

      Curl_add_buffer_free(req_buffer);
      if(result)
        return result;

      s->tunnel_state = TUNNEL_CONNECT;
      s->perline = 0;
    } /* END CONNECT PHASE */

    check = Curl_timeleft(data, NULL, TRUE);
    if(check <= 0) {
      failf(data, "Proxy CONNECT aborted due to timeout");
      return CURLE_OPERATION_TIMEDOUT;
    }

    if(!Curl_conn_data_pending(conn, sockindex))
      /* return so we'll be called again polling-style */
      return CURLE_OK;

    /* at this point, the tunnel_connecting phase is over. */

    { /* READING RESPONSE PHASE */
      int error = SELECT_OK;

      while(s->keepon && !error) {
        ssize_t gotbytes;

        /* make sure we have space to read more data */
        if(s->ptr >= &s->connect_buffer[CONNECT_BUFFER_SIZE]) {
          failf(data, "CONNECT response too large!");
          return CURLE_RECV_ERROR;
        }

        /* Read one byte at a time to avoid a race condition. Wait at most one
           second before looping to ensure continuous pgrsUpdates. */
        result = Curl_read(conn, tunnelsocket, s->ptr, 1, &gotbytes);
        if(result == CURLE_AGAIN)
          /* socket buffer drained, return */
          return CURLE_OK;

        if(Curl_pgrsUpdate(conn))
          return CURLE_ABORTED_BY_CALLBACK;

        if(result) {
          s->keepon = FALSE;
          break;
        }
        else if(gotbytes <= 0) {
          if(data->set.proxyauth && data->state.authproxy.avail) {
            /* proxy auth was requested and there was proxy auth available,
               then deem this as "mere" proxy disconnect */
            conn->bits.proxy_connect_closed = TRUE;
            infof(data, "Proxy CONNECT connection closed\n");
          }
          else {
            error = SELECT_ERROR;
            failf(data, "Proxy CONNECT aborted");
          }
          s->keepon = FALSE;
          break;
        }


        if(s->keepon > TRUE) {
          /* This means we are currently ignoring a response-body */

          s->ptr = s->connect_buffer;
          if(s->cl) {
            /* A Content-Length based body: simply count down the counter
               and make sure to break out of the loop when we're done! */
            s->cl--;
            if(s->cl <= 0) {
              s->keepon = FALSE;
              s->tunnel_state = TUNNEL_COMPLETE;
              break;
            }
          }
          else {
            /* chunked-encoded body, so we need to do the chunked dance
               properly to know when the end of the body is reached */
            CHUNKcode r;
            ssize_t tookcareof = 0;

            /* now parse the chunked piece of data so that we can
               properly tell when the stream ends */
            r = Curl_httpchunk_read(conn, s->ptr, 1, &tookcareof);
            if(r == CHUNKE_STOP) {
              /* we're done reading chunks! */
              infof(data, "chunk reading DONE\n");
              s->keepon = FALSE;
              /* we did the full CONNECT treatment, go COMPLETE */
              s->tunnel_state = TUNNEL_COMPLETE;
            }
          }
          continue;
        }

        s->perline++; /* amount of bytes in this line so far */

        /* if this is not the end of a header line then continue */
        if(*s->ptr != 0x0a) {
          s->ptr++;
          continue;
        }

        /* convert from the network encoding */
        result = Curl_convert_from_network(data, s->line_start,
                                           (size_t)s->perline);
        /* Curl_convert_from_network calls failf if unsuccessful */
        if(result)
          return result;

        /* output debug if that is requested */
        if(data->set.verbose)
          Curl_debug(data, CURLINFO_HEADER_IN,
                     s->line_start, (size_t)s->perline);

        if(!data->set.suppress_connect_headers) {
          /* send the header to the callback */
          int writetype = CLIENTWRITE_HEADER;
          if(data->set.include_header)
            writetype |= CLIENTWRITE_BODY;

          result = Curl_client_write(conn, writetype,
                                     s->line_start, s->perline);
          if(result)
            return result;
        }

        data->info.header_size += (long)s->perline;
        data->req.headerbytecount += (long)s->perline;

        /* Newlines are CRLF, so the CR is ignored as the line isn't
           really terminated until the LF comes. Treat a following CR
           as end-of-headers as well.*/

        if(('\r' == s->line_start[0]) ||
           ('\n' == s->line_start[0])) {
          /* end of response-headers from the proxy */
          s->ptr = s->connect_buffer;
          if((407 == k->httpcode) && !data->state.authproblem) {
            /* If we get a 407 response code with content length
               when we have no auth problem, we must ignore the
               whole response-body */
            s->keepon = 2;

            if(s->cl) {
              infof(data, "Ignore %" CURL_FORMAT_CURL_OFF_T
                    " bytes of response-body\n", s->cl);
            }
            else if(s->chunked_encoding) {
              CHUNKcode r;

              infof(data, "Ignore chunked response-body\n");

              /* We set ignorebody true here since the chunked
                 decoder function will acknowledge that. Pay
                 attention so that this is cleared again when this
                 function returns! */
              k->ignorebody = TRUE;

              if(s->line_start[1] == '\n') {
                /* this can only be a LF if the letter at index 0
                   was a CR */
                s->line_start++;
              }

              /* now parse the chunked piece of data so that we can
                 properly tell when the stream ends */
              r = Curl_httpchunk_read(conn, s->line_start + 1, 1, &gotbytes);
              if(r == CHUNKE_STOP) {
                /* we're done reading chunks! */
                infof(data, "chunk reading DONE\n");
                s->keepon = FALSE;
                /* we did the full CONNECT treatment, go to COMPLETE */
                s->tunnel_state = TUNNEL_COMPLETE;
              }
            }
            else {
              /* without content-length or chunked encoding, we
                 can't keep the connection alive since the close is
                 the end signal so we bail out at once instead */
              s->keepon = FALSE;
            }
          }
          else
            s->keepon = FALSE;
          if(!s->cl)
            /* we did the full CONNECT treatment, go to COMPLETE */
            s->tunnel_state = TUNNEL_COMPLETE;
          continue;
        }

        s->line_start[s->perline] = 0; /* zero terminate the buffer */
        if((checkprefix("WWW-Authenticate:", s->line_start) &&
            (401 == k->httpcode)) ||
           (checkprefix("Proxy-authenticate:", s->line_start) &&
            (407 == k->httpcode))) {

          bool proxy = (k->httpcode == 407) ? TRUE : FALSE;
          char *auth = Curl_copy_header_value(s->line_start);
          if(!auth)
            return CURLE_OUT_OF_MEMORY;

          result = Curl_http_input_auth(conn, proxy, auth);

          free(auth);

          if(result)
            return result;
        }
        else if(checkprefix("Content-Length:", s->line_start)) {
          if(k->httpcode/100 == 2) {
            /* A client MUST ignore any Content-Length or Transfer-Encoding
               header fields received in a successful response to CONNECT.
               "Successful" described as: 2xx (Successful). RFC 7231 4.3.6 */
            infof(data, "Ignoring Content-Length in CONNECT %03d response\n",
                  k->httpcode);
          }
          else {
            (void)curlx_strtoofft(s->line_start +
                                  strlen("Content-Length:"), NULL, 10, &s->cl);
          }
        }
        else if(Curl_compareheader(s->line_start, "Connection:", "close"))
          s->close_connection = TRUE;
        else if(checkprefix("Transfer-Encoding:", s->line_start)) {
          if(k->httpcode/100 == 2) {
            /* A client MUST ignore any Content-Length or Transfer-Encoding
               header fields received in a successful response to CONNECT.
               "Successful" described as: 2xx (Successful). RFC 7231 4.3.6 */
            infof(data, "Ignoring Transfer-Encoding in "
                  "CONNECT %03d response\n", k->httpcode);
          }
          else if(Curl_compareheader(s->line_start,
                                     "Transfer-Encoding:", "chunked")) {
            infof(data, "CONNECT responded chunked\n");
            s->chunked_encoding = TRUE;
            /* init our chunky engine */
            Curl_httpchunk_init(conn);
          }
        }
        else if(Curl_compareheader(s->line_start,
                                   "Proxy-Connection:", "close"))
          s->close_connection = TRUE;
        else if(2 == sscanf(s->line_start, "HTTP/1.%d %d",
                            &subversion,
                            &k->httpcode)) {
          /* store the HTTP code from the proxy */
          data->info.httpproxycode = k->httpcode;
        }

        s->perline = 0; /* line starts over here */
        s->ptr = s->connect_buffer;
        s->line_start = s->ptr;
      } /* while there's buffer left and loop is requested */

      if(Curl_pgrsUpdate(conn))
        return CURLE_ABORTED_BY_CALLBACK;

      if(error)
        return CURLE_RECV_ERROR;

      if(data->info.httpproxycode/100 != 2) {
        /* Deal with the possibly already received authenticate
           headers. 'newurl' is set to a new URL if we must loop. */
        result = Curl_http_auth_act(conn);
        if(result)
          return result;

        if(conn->bits.close)
          /* the connection has been marked for closure, most likely in the
             Curl_http_auth_act() function and thus we can kill it at once
             below */
          s->close_connection = TRUE;
      }

      if(s->close_connection && data->req.newurl) {
        /* Connection closed by server. Don't use it anymore */
        Curl_closesocket(conn, conn->sock[sockindex]);
        conn->sock[sockindex] = CURL_SOCKET_BAD;
        break;
      }
    } /* END READING RESPONSE PHASE */

    /* If we are supposed to continue and request a new URL, which basically
     * means the HTTP authentication is still going on so if the tunnel
     * is complete we start over in INIT state */
    if(data->req.newurl && (TUNNEL_COMPLETE == s->tunnel_state)) {
      connect_init(conn, TRUE); /* reinit */
    }

  } while(data->req.newurl);

  if(data->info.httpproxycode/100 != 2) {
    if(s->close_connection && data->req.newurl) {
      conn->bits.proxy_connect_closed = TRUE;
      infof(data, "Connect me again please\n");
      connect_done(conn);
    }
    else {
      free(data->req.newurl);
      data->req.newurl = NULL;
      /* failure, close this connection to avoid re-use */
      streamclose(conn, "proxy CONNECT failure");
      Curl_closesocket(conn, conn->sock[sockindex]);
      conn->sock[sockindex] = CURL_SOCKET_BAD;
    }

    /* to back to init state */
    s->tunnel_state = TUNNEL_INIT;

    if(conn->bits.proxy_connect_closed)
      /* this is not an error, just part of the connection negotiation */
      return CURLE_OK;
    failf(data, "Received HTTP code %d from proxy after CONNECT",
          data->req.httpcode);
    return CURLE_RECV_ERROR;
  }

  s->tunnel_state = TUNNEL_COMPLETE;

  /* If a proxy-authorization header was used for the proxy, then we should
     make sure that it isn't accidentally used for the document request
     after we've connected. So let's free and clear it here. */
  Curl_safefree(conn->allocptr.proxyuserpwd);
  conn->allocptr.proxyuserpwd = NULL;

  data->state.authproxy.done = TRUE;

  infof(data, "Proxy replied %d to CONNECT request\n",
        data->info.httpproxycode);
  data->req.ignorebody = FALSE; /* put it (back) to non-ignore state */
  conn->bits.rewindaftersend = FALSE; /* make sure this isn't set for the
                                         document request  */
  return CURLE_OK;
}
Example #10
0
/*
 * chunk_read() returns a OK for normal operations, or a positive return code
 * for errors. STOP means this sequence of chunks is complete.  The 'wrote'
 * argument is set to tell the caller how many bytes we actually passed to the
 * client (for byte-counting and whatever).
 *
 * The states and the state-machine is further explained in the header file.
 *
 * This function always uses ASCII hex values to accommodate non-ASCII hosts.
 * For example, 0x0d and 0x0a are used instead of '\r' and '\n'.
 */
CHUNKcode Curl_httpchunk_read(struct connectdata *conn,
                              char *datap,
                              ssize_t datalen,
                              ssize_t *wrotep)
{
  CURLcode result=CURLE_OK;
  struct SessionHandle *data = conn->data;
  struct Curl_chunker *ch = &conn->chunk;
  struct SingleRequest *k = &data->req;
  size_t piece;
  curl_off_t length = (curl_off_t)datalen;
  size_t *wrote = (size_t *)wrotep;

  *wrote = 0; /* nothing's written yet */

  /* the original data is written to the client, but we go on with the
     chunk read process, to properly calculate the content length*/
  if(data->set.http_te_skip && !k->ignorebody) {
    result = Curl_client_write(conn, CLIENTWRITE_BODY, datap, datalen);
    if(result)
      return CHUNKE_WRITE_ERROR;
  }

  while(length) {
    switch(ch->state) {
    case CHUNK_HEX:
      if(Curl_isxdigit(*datap)) {
        if(ch->hexindex < MAXNUM_SIZE) {
          ch->hexbuffer[ch->hexindex] = *datap;
          datap++;
          length--;
          ch->hexindex++;
        }
        else {
          return CHUNKE_TOO_LONG_HEX; /* longer hex than we support */
        }
      }
      else {
        char *endptr;
        if(0 == ch->hexindex)
          /* This is illegal data, we received junk where we expected
             a hexadecimal digit. */
          return CHUNKE_ILLEGAL_HEX;

        /* length and datap are unmodified */
        ch->hexbuffer[ch->hexindex]=0;

        /* convert to host encoding before calling strtoul */
        result = Curl_convert_from_network(conn->data, ch->hexbuffer,
                                           ch->hexindex);
        if(result) {
          /* Curl_convert_from_network calls failf if unsuccessful */
          /* Treat it as a bad hex character */
          return CHUNKE_ILLEGAL_HEX ;
        }

        ch->datasize=curlx_strtoofft(ch->hexbuffer, &endptr, 16);
        if((ch->datasize == CURL_OFF_T_MAX) && (errno == ERANGE))
          /* overflow is an error */
          return CHUNKE_ILLEGAL_HEX;
        ch->state = CHUNK_LF; /* now wait for the CRLF */
      }
      break;

    case CHUNK_LF:
      /* waiting for the LF after a chunk size */
      if(*datap == 0x0a) {
        /* we're now expecting data to come, unless size was zero! */
        if(0 == ch->datasize) {
          ch->state = CHUNK_TRAILER; /* now check for trailers */
          conn->trlPos=0;
        }
        else
          ch->state = CHUNK_DATA;
      }

      datap++;
      length--;
      break;

    case CHUNK_DATA:
      /* We expect 'datasize' of data. We have 'length' right now, it can be
         more or less than 'datasize'. Get the smallest piece.
      */
      piece = curlx_sotouz((ch->datasize >= length)?length:ch->datasize);

      /* Write the data portion available */
#ifdef HAVE_LIBZ
      switch (conn->data->set.http_ce_skip?
              IDENTITY : data->req.auto_decoding) {
      case IDENTITY:
#endif
        if(!k->ignorebody) {
          if(!data->set.http_te_skip)
            result = Curl_client_write(conn, CLIENTWRITE_BODY, datap,
                                       piece);
          else
            result = CURLE_OK;
        }
#ifdef HAVE_LIBZ
        break;

      case DEFLATE:
        /* update data->req.keep.str to point to the chunk data. */
        data->req.str = datap;
        result = Curl_unencode_deflate_write(conn, &data->req,
                                             (ssize_t)piece);
        break;

      case GZIP:
        /* update data->req.keep.str to point to the chunk data. */
        data->req.str = datap;
        result = Curl_unencode_gzip_write(conn, &data->req,
                                          (ssize_t)piece);
        break;

      case COMPRESS:
      default:
        failf (conn->data,
               "Unrecognized content encoding type. "
               "libcurl understands `identity', `deflate' and `gzip' "
               "content encodings.");
        return CHUNKE_BAD_ENCODING;
      }
#endif

      if(result)
        return CHUNKE_WRITE_ERROR;

      *wrote += piece;

      ch->datasize -= piece; /* decrease amount left to expect */
      datap += piece;    /* move read pointer forward */
      length -= piece;   /* decrease space left in this round */

      if(0 == ch->datasize)
        /* end of data this round, we now expect a trailing CRLF */
        ch->state = CHUNK_POSTLF;
      break;

    case CHUNK_POSTLF:
      if(*datap == 0x0a) {
        /* The last one before we go back to hex state and start all over. */
        Curl_httpchunk_init(conn); /* sets state back to CHUNK_HEX */
      }
      else if(*datap != 0x0d)
        return CHUNKE_BAD_CHUNK;
      datap++;
      length--;
      break;

    case CHUNK_TRAILER:
      if((*datap == 0x0d) || (*datap == 0x0a)) {
        /* this is the end of a trailer, but if the trailer was zero bytes
           there was no trailer and we move on */

        if(conn->trlPos) {
          /* we allocate trailer with 3 bytes extra room to fit this */
          conn->trailer[conn->trlPos++]=0x0d;
          conn->trailer[conn->trlPos++]=0x0a;
          conn->trailer[conn->trlPos]=0;

          /* Convert to host encoding before calling Curl_client_write */
          result = Curl_convert_from_network(conn->data, conn->trailer,
                                             conn->trlPos);
          if(result)
            /* Curl_convert_from_network calls failf if unsuccessful */
            /* Treat it as a bad chunk */
            return CHUNKE_BAD_CHUNK;

          if(!data->set.http_te_skip) {
            result = Curl_client_write(conn, CLIENTWRITE_HEADER,
                                       conn->trailer, conn->trlPos);
            if(result)
              return CHUNKE_WRITE_ERROR;
          }
          conn->trlPos=0;
          ch->state = CHUNK_TRAILER_CR;
          if(*datap == 0x0a)
            /* already on the LF */
            break;
        }
        else {
          /* no trailer, we're on the final CRLF pair */
          ch->state = CHUNK_TRAILER_POSTCR;
          break; /* don't advance the pointer */
        }
      }
      else {
        /* conn->trailer is assumed to be freed in url.c on a
           connection basis */
        if(conn->trlPos >= conn->trlMax) {
          /* we always allocate three extra bytes, just because when the full
             header has been received we append CRLF\0 */
          char *ptr;
          if(conn->trlMax) {
            conn->trlMax *= 2;
            ptr = realloc(conn->trailer, conn->trlMax + 3);
          }
          else {
            conn->trlMax=128;
            ptr = malloc(conn->trlMax + 3);
          }
          if(!ptr)
            return CHUNKE_OUT_OF_MEMORY;
          conn->trailer = ptr;
        }
        conn->trailer[conn->trlPos++]=*datap;
      }
      datap++;
      length--;
      break;

    case CHUNK_TRAILER_CR:
      if(*datap == 0x0a) {
        ch->state = CHUNK_TRAILER_POSTCR;
        datap++;
        length--;
      }
      else
        return CHUNKE_BAD_CHUNK;
      break;

    case CHUNK_TRAILER_POSTCR:
      /* We enter this state when a CR should arrive so we expect to
         have to first pass a CR before we wait for LF */
      if((*datap != 0x0d) && (*datap != 0x0a)) {
        /* not a CR then it must be another header in the trailer */
        ch->state = CHUNK_TRAILER;
        break;
      }
      if(*datap == 0x0d) {
        /* skip if CR */
        datap++;
        length--;
      }
      /* now wait for the final LF */
      ch->state = CHUNK_STOP;
      break;

    case CHUNK_STOP:
      if(*datap == 0x0a) {
        length--;

        /* Record the length of any data left in the end of the buffer
           even if there's no more chunks to read */
        ch->dataleft = curlx_sotouz(length);

        return CHUNKE_STOP; /* return stop */
      }
      else
        return CHUNKE_BAD_CHUNK;
    }
  }
  return CHUNKE_OK;
}
Example #11
0
/* For the (first line of) FETCH BODY[TEXT] response */
static CURLcode imap_state_fetch_resp(struct connectdata *conn,
                                      int imapcode,
                                      imapstate instate)
{
  CURLcode result = CURLE_OK;
  struct SessionHandle *data = conn->data;
  struct imap_conn *imapc = &conn->proto.imapc;
  struct FTP *imap = data->state.proto.imap;
  struct pingpong *pp = &imapc->pp;
  const char *ptr = data->state.buffer;

  (void)instate; /* no use for this yet */

  if('*' != imapcode) {
    Curl_pgrsSetDownloadSize(data, 0);
    state(conn, IMAP_STOP);
    return CURLE_OK;
  }

  /* Something like this comes "* 1 FETCH (BODY[TEXT] {2021}\r" */
  while(*ptr && (*ptr != '{'))
    ptr++;

  if(*ptr == '{') {
    curl_off_t filesize = curlx_strtoofft(ptr + 1, NULL, 10);
    if(filesize)
      Curl_pgrsSetDownloadSize(data, filesize);

    infof(data, "Found %" FORMAT_OFF_TU " bytes to download\n", filesize);

    if(pp->cache) {
      /* At this point there is a bunch of data in the header "cache" that is
         actually body content, send it as body and then skip it. Do note
         that there may even be additional "headers" after the body. */
      size_t chunk = pp->cache_size;

      if(chunk > (size_t)filesize)
        /* the conversion from curl_off_t to size_t is always fine here */
        chunk = (size_t)filesize;

      result = Curl_client_write(conn, CLIENTWRITE_BODY, pp->cache, chunk);
      if(result)
        return result;

      filesize -= chunk;

      /* we've now used parts of or the entire cache */
      if(pp->cache_size > chunk) {
        /* part of, move the trailing data to the start and reduce the size */
        memmove(pp->cache, pp->cache+chunk,
                pp->cache_size - chunk);
        pp->cache_size -= chunk;
      }
      else {
        /* cache is drained */
        Curl_safefree(pp->cache);
        pp->cache = NULL;
        pp->cache_size = 0;
      }
    }

    infof(data, "Filesize left: %" FORMAT_OFF_T "\n", filesize);

    if(!filesize)
      /* the entire data is already transferred! */
      Curl_setup_transfer(conn, -1, -1, FALSE, NULL, -1, NULL);
    else
      /* IMAP download */
      Curl_setup_transfer(conn, FIRSTSOCKET, filesize, FALSE,
                          imap->bytecountp, -1, NULL); /* no upload here */

    data->req.maxdownload = filesize;
  }
  else
    /* We don't know how to parse this line */
    result = CURLE_FTP_WEIRD_SERVER_REPLY; /* TODO: fix this code */

  /* End of do phase */
  state(conn, IMAP_STOP);

  return result;
}
Example #12
0
struct Cookie *
Curl_cookie_add(struct SessionHandle *data,
                /* The 'data' pointer here may be NULL at times, and thus
                   must only be used very carefully for things that can deal
                   with data being NULL. Such as infof() and similar */

                struct CookieInfo *c,
                bool httpheader, /* TRUE if HTTP header-style line */
                char *lineptr,   /* first character of the line */
                char *domain,    /* default domain */
                char *path)      /* full path used when this cookie is set,
                                    used to get default path for the cookie
                                    unless set */
{
  struct Cookie *clist;
  char *what;
  char name[MAX_NAME];
  char *ptr;
  char *semiptr;
  struct Cookie *co;
  struct Cookie *lastc=NULL;
  time_t now = time(NULL);
  bool replace_old = FALSE;
  bool badcookie = FALSE; /* cookies are good by default. mmmmm yummy */

  /* First, alloc and init a new struct for it */
  co = (struct Cookie *)calloc(sizeof(struct Cookie), 1);
  if(!co)
    return NULL; /* bail out if we're this low on memory */

  if(httpheader) {
    /* This line was read off a HTTP-header */
    char *sep;

    what = malloc(MAX_COOKIE_LINE);
    if(!what) {
      free(co);
      return NULL;
    }

    semiptr=strchr(lineptr, ';'); /* first, find a semicolon */

    while(*lineptr && my_isspace(*lineptr))
      lineptr++;

    ptr = lineptr;
    do {
      /* we have a <what>=<this> pair or a 'secure' word here */
      sep = strchr(ptr, '=');
      if(sep && (!semiptr || (semiptr>sep)) ) {
        /*
         * There is a = sign and if there was a semicolon too, which make sure
         * that the semicolon comes _after_ the equal sign.
         */

        name[0]=what[0]=0; /* init the buffers */
        if(1 <= sscanf(ptr, "%" MAX_NAME_TXT "[^;=]=%"
                       MAX_COOKIE_LINE_TXT "[^;\r\n]",
                       name, what)) {
          /* this is a <name>=<what> pair */

          char *whatptr;

          /* Strip off trailing whitespace from the 'what' */
          size_t len=strlen(what);
          while(len && my_isspace(what[len-1])) {
            what[len-1]=0;
            len--;
          }

          /* Skip leading whitespace from the 'what' */
          whatptr=what;
          while(my_isspace(*whatptr)) {
            whatptr++;
          }

          if(strequal("path", name)) {
            co->path=strdup(whatptr);
            if(!co->path) {
              badcookie = TRUE; /* out of memory bad */
              break;
            }
          }
          else if(strequal("domain", name)) {
            /* note that this name may or may not have a preceeding dot, but
               we don't care about that, we treat the names the same anyway */

            const char *domptr=whatptr;
            int dotcount=1;

            /* Count the dots, we need to make sure that there are enough
               of them. */

            if('.' == whatptr[0])
              /* don't count the initial dot, assume it */
              domptr++;

            do {
              domptr = strchr(domptr, '.');
              if(domptr) {
                domptr++;
                dotcount++;
              }
            } while(domptr);

            /* The original Netscape cookie spec defined that this domain name
               MUST have three dots (or two if one of the seven holy TLDs),
               but it seems that these kinds of cookies are in use "out there"
               so we cannot be that strict. I've therefore lowered the check
               to not allow less than two dots. */

            if(dotcount < 2) {
              /* Received and skipped a cookie with a domain using too few
                 dots. */
              badcookie=TRUE; /* mark this as a bad cookie */
              infof(data, "skipped cookie with illegal dotcount domain: %s\n",
                    whatptr);
            }
            else {
              /* Now, we make sure that our host is within the given domain,
                 or the given domain is not valid and thus cannot be set. */

              if('.' == whatptr[0])
                whatptr++; /* ignore preceeding dot */

              if(!domain || tailmatch(whatptr, domain)) {
                const char *tailptr=whatptr;
                if(tailptr[0] == '.')
                  tailptr++;
                co->domain=strdup(tailptr); /* don't prefix w/dots
                                               internally */
                if(!co->domain) {
                  badcookie = TRUE;
                  break;
                }
                co->tailmatch=TRUE; /* we always do that if the domain name was
                                       given */
              }
              else {
                /* we did not get a tailmatch and then the attempted set domain
                   is not a domain to which the current host belongs. Mark as
                   bad. */
                badcookie=TRUE;
                infof(data, "skipped cookie with bad tailmatch domain: %s\n",
                      whatptr);
              }
            }
          }
          else if(strequal("version", name)) {
            co->version=strdup(whatptr);
            if(!co->version) {
              badcookie = TRUE;
              break;
            }
          }
          else if(strequal("max-age", name)) {
            /* Defined in RFC2109:

               Optional.  The Max-Age attribute defines the lifetime of the
               cookie, in seconds.  The delta-seconds value is a decimal non-
               negative integer.  After delta-seconds seconds elapse, the
               client should discard the cookie.  A value of zero means the
               cookie should be discarded immediately.

             */
            co->maxage = strdup(whatptr);
            if(!co->maxage) {
              badcookie = TRUE;
              break;
            }
            co->expires =
              atoi((*co->maxage=='\"')?&co->maxage[1]:&co->maxage[0]) + (long)now;
          }
          else if(strequal("expires", name)) {
            co->expirestr=strdup(whatptr);
            if(!co->expirestr) {
              badcookie = TRUE;
              break;
            }
            co->expires = curl_getdate(what, &now);
          }
          else if(!co->name) {
            co->name = strdup(name);
            co->value = strdup(whatptr);
            if(!co->name || !co->value) {
              badcookie = TRUE;
              break;
            }
          }
          /*
            else this is the second (or more) name we don't know
            about! */
        }
        else {
          /* this is an "illegal" <what>=<this> pair */
        }
      }
      else {
        if(sscanf(ptr, "%" MAX_COOKIE_LINE_TXT "[^;\r\n]",
                  what)) {
          if(strequal("secure", what))
            co->secure = TRUE;
          /* else,
             unsupported keyword without assign! */

        }
      }
      if(!semiptr || !*semiptr) {
        /* we already know there are no more cookies */
        semiptr = NULL;
        continue;
      }

      ptr=semiptr+1;
      while(ptr && *ptr && my_isspace(*ptr))
        ptr++;
      semiptr=strchr(ptr, ';'); /* now, find the next semicolon */

      if(!semiptr && *ptr)
        /* There are no more semicolons, but there's a final name=value pair
           coming up */
        semiptr=strchr(ptr, '\0');
    } while(semiptr);

    if(!badcookie && !co->domain) {
      if(domain) {
        /* no domain was given in the header line, set the default */
        co->domain=strdup(domain);
        if(!co->domain)
          badcookie = TRUE;
      }
    }

    if(!badcookie && !co->path && path) {
      /* no path was given in the header line, set the default  */
      char *endslash = strrchr(path, '/');
      if(endslash) {
        size_t pathlen = endslash-path+1; /* include the ending slash */
        co->path=malloc(pathlen+1); /* one extra for the zero byte */
        if(co->path) {
          memcpy(co->path, path, pathlen);
          co->path[pathlen]=0; /* zero terminate */
        }
        else
          badcookie = TRUE;
      }
    }

    free(what);

    if(badcookie || !co->name) {
      /* we didn't get a cookie name or a bad one,
         this is an illegal line, bail out */
      freecookie(co);
      return NULL;
    }

  }
  else {
    /* This line is NOT a HTTP header style line, we do offer support for
       reading the odd netscape cookies-file format here */
    char *firstptr;
    char *tok_buf;
    int fields;

    if(lineptr[0]=='#') {
      /* don't even try the comments */
      free(co);
      return NULL;
    }
    /* strip off the possible end-of-line characters */
    ptr=strchr(lineptr, '\r');
    if(ptr)
      *ptr=0; /* clear it */
    ptr=strchr(lineptr, '\n');
    if(ptr)
      *ptr=0; /* clear it */

    firstptr=strtok_r(lineptr, "\t", &tok_buf); /* tokenize it on the TAB */

    /* Here's a quick check to eliminate normal HTTP-headers from this */
    if(!firstptr || strchr(firstptr, ':')) {
      free(co);
      return NULL;
    }

    /* Now loop through the fields and init the struct we already have
       allocated */
    for(ptr=firstptr, fields=0; ptr && !badcookie;
        ptr=strtok_r(NULL, "\t", &tok_buf), fields++) {
      switch(fields) {
      case 0:
        if(ptr[0]=='.') /* skip preceeding dots */
          ptr++;
        co->domain = strdup(ptr);
        if(!co->domain)
          badcookie = TRUE;
        break;
      case 1:
        /* This field got its explanation on the 23rd of May 2001 by
           Andrés García:

           flag: A TRUE/FALSE value indicating if all machines within a given
           domain can access the variable. This value is set automatically by
           the browser, depending on the value you set for the domain.

           As far as I can see, it is set to true when the cookie says
           .domain.com and to false when the domain is complete www.domain.com
        */
        co->tailmatch=(bool)strequal(ptr, "TRUE"); /* store information */
        break;
      case 2:
        /* It turns out, that sometimes the file format allows the path
           field to remain not filled in, we try to detect this and work
           around it! Andrés García made us aware of this... */
        if (strcmp("TRUE", ptr) && strcmp("FALSE", ptr)) {
          /* only if the path doesn't look like a boolean option! */
          co->path = strdup(ptr);
          if(!co->path)
            badcookie = TRUE;
          break;
        }
        /* this doesn't look like a path, make one up! */
        co->path = strdup("/");
        if(!co->path)
          badcookie = TRUE;
        fields++; /* add a field and fall down to secure */
        /* FALLTHROUGH */
      case 3:
        co->secure = (bool)strequal(ptr, "TRUE");
        break;
      case 4:
        co->expires = curlx_strtoofft(ptr, NULL, 10);
        break;
      case 5:
        co->name = strdup(ptr);
        if(!co->name)
          badcookie = TRUE;
        break;
      case 6:
        co->value = strdup(ptr);
        if(!co->value)
          badcookie = TRUE;
        break;
      }
    }
    if(6 == fields) {
      /* we got a cookie with blank contents, fix it */
      co->value = strdup("");
      if(!co->value)
        badcookie = TRUE;
      else
        fields++;
    }

    if(!badcookie && (7 != fields))
      /* we did not find the sufficient number of fields */
      badcookie = TRUE;

    if(badcookie) {
      freecookie(co);
      return NULL;
    }

  }

  if(!c->running &&    /* read from a file */
     c->newsession &&  /* clean session cookies */
     !co->expires) {   /* this is a session cookie since it doesn't expire! */
    freecookie(co);
    return NULL;
  }

  co->livecookie = c->running;

  /* now, we have parsed the incoming line, we must now check if this
     superceeds an already existing cookie, which it may if the previous have
     the same domain and path as this */

  clist = c->cookies;
  replace_old = FALSE;
  while(clist) {
    if(strequal(clist->name, co->name)) {
      /* the names are identical */

      if(clist->domain && co->domain) {
        if(strequal(clist->domain, co->domain))
          /* The domains are identical */
          replace_old=TRUE;
      }
      else if(!clist->domain && !co->domain)
        replace_old = TRUE;

      if(replace_old) {
        /* the domains were identical */

        if(clist->path && co->path) {
          if(strequal(clist->path, co->path)) {
            replace_old = TRUE;
          }
          else
            replace_old = FALSE;
        }
        else if(!clist->path && !co->path)
          replace_old = TRUE;
        else
          replace_old = FALSE;

      }

      if(replace_old && !co->livecookie && clist->livecookie) {
        /* Both cookies matched fine, except that the already present
           cookie is "live", which means it was set from a header, while
           the new one isn't "live" and thus only read from a file. We let
           live cookies stay alive */

        /* Free the newcomer and get out of here! */
        freecookie(co);
        return NULL;
      }

      if(replace_old) {
        co->next = clist->next; /* get the next-pointer first */

        /* then free all the old pointers */
        if(clist->name)
          free(clist->name);
        if(clist->value)
          free(clist->value);
        if(clist->domain)
          free(clist->domain);
        if(clist->path)
          free(clist->path);
        if(clist->expirestr)
          free(clist->expirestr);

        if(clist->version)
          free(clist->version);
        if(clist->maxage)
          free(clist->maxage);

        *clist = *co;  /* then store all the new data */

        free(co);   /* free the newly alloced memory */
        co = clist; /* point to the previous struct instead */

        /* We have replaced a cookie, now skip the rest of the list but
           make sure the 'lastc' pointer is properly set */
        do {
          lastc = clist;
          clist = clist->next;
        } while(clist);
        break;
      }
    }
    lastc = clist;
    clist = clist->next;
  }

  if(c->running)
    /* Only show this when NOT reading the cookies from a file */
    infof(data, "%s cookie %s=\"%s\" for domain %s, path %s, expire %d\n",
          replace_old?"Replaced":"Added", co->name, co->value,
          co->domain, co->path, co->expires);

  if(!replace_old) {
    /* then make the last item point on this new one */
    if(lastc)
      lastc->next = co;
    else
      c->cookies = co;
  }

  c->numcookies++; /* one more cookie in the jar */
  return co;
}
Example #13
0
struct Curl_local_handler * get_local_handler(const char *proto)
{
	int i = 0;
	for (; i < local_handlers_size; i++) {
		struct Curl_local_handler *handler = local_handlers[i];
#if defined(WIN32)
		if (handler && handler->protocol && _stricmp(handler->protocol, proto) == 0) {
#else
		if (handler && handler->protocol && strcasecmp(handler->protocol, proto) == 0) {
#endif
			return handler;
		}
	}
	return NULL;
}

int is_local_handler(const char *proto)
{
	return get_local_handler(proto) != NULL;
}

/*
 * FILE scheme handler.
 */

const struct Curl_handler Curl_handler_custom = {
  ZERO_NULL,                           /* scheme */
  ZERO_NULL,                            /* setup_connection */
  custom_do,                              /* do_it */
  custom_done,                            /* done */
  ZERO_NULL,                            /* do_more */
  custom_connect,                         /* connect_it */
  ZERO_NULL,                            /* connecting */
  ZERO_NULL,                            /* doing */
  ZERO_NULL,                            /* proto_getsock */
  ZERO_NULL,                            /* doing_getsock */
  ZERO_NULL,                            /* disconnect */
  0,                                    /* defport */
  PROT_FILE                            /* protocol */
};


 /*
  Check if this is a range download, and if so, set the internal variables
  properly. This code is copied from the FTP implementation and might as
  well be factored out.
 */
static CURLcode custom_range(struct connectdata *conn)
{
  curl_off_t from, to;
  curl_off_t totalsize=-1;
  char *ptr;
  char *ptr2;
  struct SessionHandle *data = conn->data;

  if(data->state.use_range && data->state.range) {
    from=curlx_strtoofft(data->state.range, &ptr, 0);
    while(ptr && *ptr && (isspace((int)*ptr) || (*ptr=='-')))
      ptr++;
    to=curlx_strtoofft(ptr, &ptr2, 0);
    if(ptr == ptr2) {
      /* we didn't get any digit */
      to=-1;
    }
    if((-1 == to) && (from>=0)) {
      /* X - */
      data->state.resume_from = from;
      DEBUGF(infof(data, "RANGE %" FORMAT_OFF_T " to end of file\n",
                   from));
    }
    else if(from < 0) {
      /* -Y */
      totalsize = -from;
      data->req.maxdownload = -from;
      data->state.resume_from = from;
      DEBUGF(infof(data, "RANGE the last %" FORMAT_OFF_T " bytes\n",
                   totalsize));
    }
    else {
      /* X-Y */
      totalsize = to-from;
      data->req.maxdownload = totalsize+1; /* include last byte */
      data->state.resume_from = from;
      DEBUGF(infof(data, "RANGE from %" FORMAT_OFF_T
                   " getting %" FORMAT_OFF_T " bytes\n",
                   from, data->req.maxdownload));
    }
    DEBUGF(infof(data, "range-download from %" FORMAT_OFF_T
                 " to %" FORMAT_OFF_T ", totally %" FORMAT_OFF_T " bytes\n",
                 from, to, data->req.maxdownload));
  }
  else
    data->req.maxdownload = -1;
  return CURLE_OK;
}