int strbuffer_append_bytes(strbuffer_t *strbuff, const char *data, size_t size) { if(size >= strbuff->size - strbuff->length) { size_t new_size; char *new_value; /* avoid integer overflow */ if (strbuff->size > STRBUFFER_SIZE_MAX / STRBUFFER_FACTOR || size > STRBUFFER_SIZE_MAX - 1 || strbuff->length > STRBUFFER_SIZE_MAX - 1 - size) return -1; new_size = max(strbuff->size * STRBUFFER_FACTOR, strbuff->length + size + 1); new_value = jsonp_malloc(new_size); if(!new_value) return -1; memcpy(new_value, strbuff->value, strbuff->length); jsonp_free(strbuff->value); strbuff->value = new_value; strbuff->size = new_size; } memcpy(strbuff->value + strbuff->length, data, size); strbuff->length += size; strbuff->value[strbuff->length] = '\0'; return 0; }
static int hashtable_do_rehash(hashtable_t *hashtable) { list_t *list, *next; pair_t *pair; size_t i, index, new_size; jsonp_free(hashtable->buckets); hashtable->num_buckets++; new_size = num_buckets(hashtable); hashtable->buckets = jsonp_malloc(new_size * sizeof(bucket_t)); if(!hashtable->buckets) return -1; for(i = 0; i < num_buckets(hashtable); i++) { hashtable->buckets[i].first = hashtable->buckets[i].last = &hashtable->list; } list = hashtable->list.next; list_init(&hashtable->list); for(; list != &hashtable->list; list = next) { next = list->next; pair = list_to_pair(list); index = pair->hash % new_size; insert_to_bucket(hashtable, &hashtable->buckets[index], &pair->list); } return 0; }
json_t *json_real(double value) { json_real_t *real = (json_real_t *) jsonp_malloc(sizeof(json_real_t)); if(!real) return NULL; json_init(&real->json, JSON_REAL); real->value = value; return &real->json; }
json_t *json_integer(json_int_t value) { json_integer_t *integer = (json_integer_t *) jsonp_malloc(sizeof(json_integer_t)); if(!integer) return NULL; json_init(&integer->json, JSON_INTEGER); integer->value = value; return &integer->json; }
static char *jsonp_strdup(const char *str) { char *new_str; new_str = jsonp_malloc(strlen(str) + 1); if(!new_str) return NULL; strcpy(new_str, str); return new_str; }
json_t *json_array(void) { json_array_t *array = (json_array_t *) jsonp_malloc(sizeof(json_array_t)); if(!array) return NULL; json_init(&array->json, JSON_ARRAY); array->entries = 0; array->size = 8; array->table = (json_t **) jsonp_malloc(array->size * sizeof(json_t *)); if(!array->table) { jsonp_free(array); return NULL; } array->visited = 0; return &array->json; }
char *jsonp_strndup(const char *str, size_t len) { char *new_str; new_str = jsonp_malloc(len + 1); if(!new_str) return NULL; memcpy(new_str, str, len); new_str[len] = '\0'; return new_str; }
int strbuffer_init(strbuffer_t *strbuff) { strbuff->size = STRBUFFER_MIN_SIZE; strbuff->length = 0; strbuff->value = jsonp_malloc(strbuff->size); if(!strbuff->value) return -1; /* initialize to empty */ strbuff->value[0] = '\0'; return 0; }
int hashtable_set(hashtable_t *hashtable, const char *key, size_t serial, json_t *value) { pair_t *pair; bucket_t *bucket; size_t hash, index; /* rehash if the load ratio exceeds 1 */ if(hashtable->size >= hashsize(hashtable->order)) if(hashtable_do_rehash(hashtable)) return -1; hash = hash_str(key); index = hash & hashmask(hashtable->order); bucket = &hashtable->buckets[index]; pair = hashtable_find_pair(hashtable, bucket, key, hash); if(pair) { json_decref(pair->value); pair->value = value; } else { /* offsetof(...) returns the size of pair_t without the last, flexible member. This way, the correct amount is allocated. */ size_t len = strlen(key); if(len >= (size_t)-1 - offsetof(pair_t, key)) { /* Avoid an overflow if the key is very long */ return -1; } pair = jsonp_malloc(offsetof(pair_t, key) + len + 1); if(!pair) return -1; pair->hash = hash; pair->serial = serial; strncpy(pair->key, key, len + 1); pair->value = value; list_init(&pair->list); insert_to_bucket(hashtable, bucket, &pair->list); hashtable->size++; } return 0; }
char *jsonp_strdup(const char *str) { char *new_str; size_t len; len = strlen(str); if(len == (size_t)-1) return NULL; new_str = jsonp_malloc(len + 1); if(!new_str) return NULL; memcpy(new_str, str, len + 1); return new_str; }
static void init_stream(stream_t *stream, const void *buffer, size_t buflen, fill_func fill, void *data) { if (fill) { stream->buffer = jsonp_malloc(MAX_BUF_LEN); stream->buflen = 0; } else { /* load from the given buffer */ stream->buffer = (char *) buffer; stream->buflen = buflen; } stream->fill = fill; stream->data = data; stream->pos = 0; stream->stream_pos = 0; }
json_t *json_object(void) { json_object_t *object = jsonp_malloc(sizeof(json_object_t)); if(!object) return NULL; json_init(&object->json, JSON_OBJECT); if(hashtable_init(&object->hashtable)) { jsonp_free(object); return NULL; } object->serial = 0; object->visited = 0; return &object->json; }
int hashtable_set(hashtable_t *hashtable, const char *key, size_t serial, json_t *value) { pair_t *pair; bucket_t *bucket; size_t hash, index; /* rehash if the load ratio exceeds 1 */ if(hashtable->size >= num_buckets(hashtable)) if(hashtable_do_rehash(hashtable)) return -1; hash = hash_str(key); index = hash % num_buckets(hashtable); bucket = &hashtable->buckets[index]; pair = hashtable_find_pair(hashtable, bucket, key, hash); if(pair) { json_decref(pair->value); pair->value = value; } else { /* offsetof(...) returns the size of pair_t without the last, flexible member. This way, the correct amount is allocated. */ pair = jsonp_malloc(offsetof(pair_t, key) + strlen(key) + 1); if(!pair) return -1; pair->hash = hash; pair->serial = serial; strcpy(pair->key, key); pair->value = value; list_init(&pair->list); insert_to_bucket(hashtable, bucket, &pair->list); hashtable->size++; } return 0; }
int hashtable_init(hashtable_t *hashtable) { size_t i; hashtable->size = 0; hashtable->num_buckets = 0; /* index to primes[] */ hashtable->buckets = jsonp_malloc(num_buckets(hashtable) * sizeof(bucket_t)); if(!hashtable->buckets) return -1; list_init(&hashtable->list); for(i = 0; i < num_buckets(hashtable); i++) { hashtable->buckets[i].first = hashtable->buckets[i].last = &hashtable->list; } return 0; }
int hashtable_init(hashtable_t *hashtable) { size_t i; hashtable->size = 0; hashtable->order = INITIAL_HASHTABLE_ORDER; hashtable->buckets = jsonp_malloc(hashsize(hashtable->order) * sizeof(bucket_t)); if(!hashtable->buckets) return -1; list_init(&hashtable->list); for(i = 0; i < hashsize(hashtable->order); i++) { hashtable->buckets[i].first = hashtable->buckets[i].last = &hashtable->list; } return 0; }
json_t *json_object(void) { json_object_t *object = (json_object_t *) jsonp_malloc(sizeof(json_object_t)); if(!object) return NULL; json_init(&object->json, JSON_OBJECT); if(hashtable_init(&object->hashtable, hash_key, key_equal, jsonp_free, value_decref)) { jsonp_free(object); return NULL; } object->serial = 0; object->visited = 0; return &object->json; }
json_t *json_string_nocheck(const char *value) { json_string_t *string; if(!value) return NULL; string = (json_string_t *) jsonp_malloc(sizeof(json_string_t)); if(!string) return NULL; json_init(&string->json, JSON_STRING); string->value = jsonp_strdup(value); if(!string->value) { jsonp_free(string); return NULL; } return &string->json; }
json_t *json_object(void) { json_object_t *object = jsonp_malloc(sizeof(json_object_t)); if(!object) return NULL; if (!hashtable_seed) { /* Autoseed */ json_object_seed(0); } json_init(&object->json, JSON_OBJECT); if(hashtable_init(&object->hashtable)) { jsonp_free(object); return NULL; } object->serial = 0; return &object->json; }
int json_object_set_new_nocheck(json_t *json, const char *key, json_t *value) { json_object_t *object; object_key_t *k; if(!key || !value) return -1; if(!json_is_object(json) || json == value) { json_decref(value); return -1; } object = json_to_object(json); /* offsetof(...) returns the size of object_key_t without the last, flexible member. This way, the correct amount is allocated. */ k = (object_key_t *) jsonp_malloc(offsetof(object_key_t, key) + strlen(key) + 1); if(!k) { json_decref(value); return -1; } k->serial = object->serial++; strcpy(k->key, key); if(hashtable_set(&object->hashtable, k, value)) { json_decref(value); return -1; } return 0; }
static void lex_scan_string(lex_t *lex, json_error_t *error) { int c; const char *p; char *t; int i; lex->value.string = NULL; lex->token = TOKEN_INVALID; c = lex_get_save(lex, error); while(c != '"') { if(c == STREAM_STATE_ERROR) goto out; else if(c == STREAM_STATE_EOF) { error_set(error, lex, "premature end of input"); goto out; } else if(0 <= c && c <= 0x1F) { /* control character */ lex_unget_unsave(lex, c); if(c == '\n') error_set(error, lex, "unexpected newline", c); else error_set(error, lex, "control character 0x%x", c); goto out; } else if(c == '\\') { c = lex_get_save(lex, error); if(c == 'u') { c = lex_get_save(lex, error); for(i = 0; i < 4; i++) { if(!l_isxdigit(c)) { error_set(error, lex, "invalid escape"); goto out; } c = lex_get_save(lex, error); } } else if(c == '"' || c == '\\' || c == '/' || c == 'b' || c == 'f' || c == 'n' || c == 'r' || c == 't') c = lex_get_save(lex, error); else { error_set(error, lex, "invalid escape"); goto out; } } else c = lex_get_save(lex, error); } /* the actual value is at most of the same length as the source string, because: - shortcut escapes (e.g. "\t") (length 2) are converted to 1 byte - a single \uXXXX escape (length 6) is converted to at most 3 bytes - two \uXXXX escapes (length 12) forming an UTF-16 surrogate pair are converted to 4 bytes */ lex->value.string = jsonp_malloc(lex->saved_text.length + 1); if(!lex->value.string) { /* this is not very nice, since TOKEN_INVALID is returned */ goto out; } /* the target */ t = lex->value.string; /* + 1 to skip the " */ p = strbuffer_value(&lex->saved_text) + 1; while(*p != '"') { if(*p == '\\') { p++; if(*p == 'u') { char buffer[4]; int length; int32_t value; value = decode_unicode_escape(p); p += 5; if(0xD800 <= value && value <= 0xDBFF) { /* surrogate pair */ if(*p == '\\' && *(p + 1) == 'u') { int32_t value2 = decode_unicode_escape(++p); p += 5; if(0xDC00 <= value2 && value2 <= 0xDFFF) { /* valid second surrogate */ value = ((value - 0xD800) << 10) + (value2 - 0xDC00) + 0x10000; } else { /* invalid second surrogate */ error_set(error, lex, "invalid Unicode '\\u%04X\\u%04X'", value, value2); goto out; } } else { /* no second surrogate */ error_set(error, lex, "invalid Unicode '\\u%04X'", value); goto out; } } else if(0xDC00 <= value && value <= 0xDFFF) { error_set(error, lex, "invalid Unicode '\\u%04X'", value); goto out; } else if(value == 0) { error_set(error, lex, "\\u0000 is not allowed"); goto out; } if(utf8_encode(value, buffer, &length)) assert(0); memcpy(t, buffer, length); t += length; } else { switch(*p) { case '"': case '\\': case '/': *t = *p; break; case 'b': *t = '\b'; break; case 'f': *t = '\f'; break; case 'n': *t = '\n'; break; case 'r': *t = '\r'; break; case 't': *t = '\t'; break; default: assert(0); } t++; p++; } } else *(t++) = *(p++); } *t = '\0'; lex->token = TOKEN_STRING; return; out: jsonp_free(lex->value.string); }
static char *parse_string(stream_t *stream, size_t flags, json_error_t *error) { char *endptr; ssize_t colon; size_t pos = 0; size_t length; char *string; (void) flags; colon = search(stream, ':'); if (colon < 0) { error_set(error, stream, "unterminated string length"); return NULL; } if (validate_number(stream, error)) return NULL; /* can overflow, but who cares? */ length = strtoul(&stream->buffer[stream->pos], &endptr, 10); if (endptr != &stream->buffer[colon]) { error_set(error, stream, "invalid string length"); return NULL; } stream->pos = colon + 1; string = jsonp_malloc(length + 1); if (!string) { error_set(error, stream, "out of memory (string length %zd)", length); return NULL; } string[length] = '\0'; while (pos < length) { char *zero; size_t chunk = stream->buflen - stream->pos; if (chunk == 0) { if (stream_refill(stream) <= 0) { error_set(error, stream, "partial string: %zd/%zd", pos, length); goto error; } continue; } if (chunk > length - pos) chunk = length - pos; /* null bytes are not allowed inside strings */ zero = memchr(&stream->buffer[stream->pos], '\0', chunk); if (zero) { stream->pos = zero - stream->buffer; error_set(error, stream, "string contains a zero byte"); goto error; } memcpy(&string[pos], &stream->buffer[stream->pos], chunk); stream->pos += chunk; pos += chunk; } return string; error: jsonp_free(string); return NULL; }
static int lex_scan_number(lex_t *lex, int c, json_error_t *error) { const char *saved_text; char *end; double value; lex->token = TOKEN_INVALID; if(c == '-') c = lex_get_save(lex, error); if(c == '0') { c = lex_get_save(lex, error); if(l_isdigit(c)) { lex_unget_unsave(lex, c); goto out; } } else if(l_isdigit(c)) { c = lex_get_save(lex, error); while(l_isdigit(c)) c = lex_get_save(lex, error); } else { lex_unget_unsave(lex, c); goto out; } if(c != '.' && c != 'E' && c != 'e') { json_int_t value; lex_unget_unsave(lex, c); saved_text = strbuffer_value(&lex->saved_text); errno = 0; value = json_strtoint(saved_text, &end, 10); if(errno == ERANGE) { if(value < 0) error_set(error, lex, "too big negative integer"); else error_set(error, lex, "too big integer"); goto out; } assert(end == saved_text + lex->saved_text.length); lex->token = TOKEN_INTEGER; lex->value.integer = value; // save the string for snumber lex->value.string = jsonp_malloc(lex->saved_text.length + 1); return 0; } if(c == '.') { c = lex_get(lex, error); if(!l_isdigit(c)) { lex_unget(lex, c); goto out; } lex_save(lex, c); c = lex_get_save(lex, error); while(l_isdigit(c)) c = lex_get_save(lex, error); } if(c == 'E' || c == 'e') { c = lex_get_save(lex, error); if(c == '+' || c == '-') c = lex_get_save(lex, error); if(!l_isdigit(c)) { lex_unget_unsave(lex, c); goto out; } c = lex_get_save(lex, error); while(l_isdigit(c)) c = lex_get_save(lex, error); } lex_unget_unsave(lex, c); if(jsonp_strtod(&lex->saved_text, &value)) { error_set(error, lex, "real number overflow"); goto out; } lex->token = TOKEN_REAL; lex->value.real = value; // save the string for snumber lex->value.string = jsonp_malloc(lex->saved_text.length + 1); return 0; out: return -1; }