Esempio n. 1
0
// analyze header and try to find size of the part
// return 0 -> not a content length header, return -1 : not a header or error, return 1 : success
int find_header_params(char* buffer, dav_size_t buffer_len, dav_size_t* part_size, dav_off_t* part_offset){
    static const std::string delimiter(" bytes-/\t");
    char * p = header_delimiter(buffer, buffer_len);
    if(p == NULL)
        return -1;
    std::string header_type(buffer, p - buffer);
    if( compare_ncase(ans_header_byte_range, 0, p - buffer, buffer) !=0) // check header type
        return 0;

    std::vector<std::string> tokens = tokenSplit(std::string(p+1),delimiter);     // parse header
    if(tokens.size() < 2)
        return -1;

    long chunk_size[2];
    for(int i =0; i <2;++i){
        chunk_size[i]= strtol(tokens[i].c_str(), &p, 10);
        if(chunk_size[i] == LONG_MAX || chunk_size[i] < 0 || *p != '\0'){
            errno =0;
            return -1;
        }
    }
    if(chunk_size[1] < chunk_size[0])
        return -1;

    *part_offset= chunk_size[0];
    *part_size =  chunk_size[1]-chunk_size[0]+1;
    return 1;
}
Esempio n. 2
0
int http_extract_boundary_from_content_type(const std::string & buffer, std::string & boundary, DavixError** err){
    dav_size_t pos_bound;
    static const std::string delimiter = "\";";
    if( (pos_bound= buffer.find(ans_header_boundary_field)) != std::string::npos){
        std::vector<std::string> tokens = tokenSplit(buffer.substr(pos_bound + ans_header_boundary_field.size()), delimiter);
        if( tokens.size() >= 1
            && tokens[0].size() > 0
            && tokens[0].size() <= 70){
            DAVIX_SLOG(DAVIX_LOG_TRACE, DAVIX_LOG_CHAIN, "Multi part boundary: {}", boundary);
            std::swap(boundary,tokens[0]);
            return 0;
        }
    }
    return -1;
}
Esempio n. 3
0
/* Turns a line of text into a Linked List of
 * Token objects. Returns the pointer to the head
 * of the Linked List. Uses a Finite State Machine
 * implementation to change all space characters
 * in str into null bytes, and creates a Token
 * for each word.
 */
Token* Tokenize(char* str) {

    Token* head = strToToken(str);

    if ( ERROR_CODE ) return (Token*)0;

    if ( !head->value ) return head;

    while ( 1 ) {
        if ( strlen(head->value) <= 1 ){
            if ( !head->next ) break;
            head = head->next;
            continue;
        }
        head = tokenSplit(head);
        if ( !head->next ) break;
        head = head->next;
    }

    return tokenHead(head);

}