/* {{{ _mysqlnd_perealloc */ void * _mysqlnd_perealloc(void *ptr, size_t new_size, zend_bool persistent MYSQLND_MEM_D) { void *ret; zend_bool collect_memory_statistics = MYSQLND_G(collect_memory_statistics); size_t old_size = collect_memory_statistics && ptr? *(size_t *) (((char*)ptr) - sizeof(size_t)) : 0; #if PHP_DEBUG long * threshold = persistent? &MYSQLND_G(debug_realloc_fail_threshold):&MYSQLND_G(debug_erealloc_fail_threshold); #endif DBG_ENTER(mysqlnd_perealloc_name); DBG_INF_FMT("file=%-15s line=%4d", strrchr(__zend_filename, PHP_DIR_SEPARATOR) + 1, __zend_lineno); DBG_INF_FMT("ptr=%p old_size=%lu new_size=%lu persistent=%u", ptr, old_size, new_size, persistent); #if PHP_DEBUG /* -1 is also "true" */ if (*threshold) { #endif ret = perealloc(REAL_PTR(ptr), REAL_SIZE(new_size), persistent); #if PHP_DEBUG --*threshold; } else if (*threshold == 0) { ret = NULL; } #endif DBG_INF_FMT("new_ptr=%p", (char*)ret); if (ret && collect_memory_statistics) { enum mysqlnd_collected_stats s1 = persistent? STAT_MEM_REALLOC_COUNT:STAT_MEM_EREALLOC_COUNT; enum mysqlnd_collected_stats s2 = persistent? STAT_MEM_REALLOC_AMOUNT:STAT_MEM_EREALLOC_AMOUNT; *(size_t *) ret = new_size; MYSQLND_INC_GLOBAL_STATISTIC_W_VALUE2(s1, 1, s2, new_size); } DBG_RETURN(FAKE_PTR(ret)); }
static void *s_perealloc_fn(const memcached_st *memc, void *mem, const size_t size, void *context) { zend_bool *is_persistent = memcached_get_user_data(memc); return perealloc(mem, size, *is_persistent); }
int sapi_stack_push(sapi_stack *stack, void *element) { if (stack->top >= stack->max) { /* we need to allocate more memory */ stack->elements = (void **) perealloc(stack->elements, (sizeof(void **) * (stack->max += STACK_BLOCK_SIZE)), stack->persistent); if (!stack->elements) { return FAILURE; } } stack->elements[stack->top] = (void *) element; return stack->top++; }
/* {{{ php_stream_bucket_attach */ static void php_stream_bucket_attach(int append, INTERNAL_FUNCTION_PARAMETERS) { zval *zbrigade, *zobject; zval *pzbucket, *pzdata; php_stream_bucket_brigade *brigade; php_stream_bucket *bucket; ZEND_PARSE_PARAMETERS_START(2, 2) Z_PARAM_RESOURCE(zbrigade) Z_PARAM_OBJECT(zobject) ZEND_PARSE_PARAMETERS_END_EX(RETURN_FALSE); if (NULL == (pzbucket = zend_hash_str_find(Z_OBJPROP_P(zobject), "bucket", sizeof("bucket")-1))) { php_error_docref(NULL, E_WARNING, "Object has no bucket property"); RETURN_FALSE; } if ((brigade = (php_stream_bucket_brigade*)zend_fetch_resource( Z_RES_P(zbrigade), PHP_STREAM_BRIGADE_RES_NAME, le_bucket_brigade)) == NULL) { RETURN_FALSE; } if ((bucket = (php_stream_bucket *)zend_fetch_resource_ex(pzbucket, PHP_STREAM_BUCKET_RES_NAME, le_bucket)) == NULL) { RETURN_FALSE; } if (NULL != (pzdata = zend_hash_str_find(Z_OBJPROP_P(zobject), "data", sizeof("data")-1)) && Z_TYPE_P(pzdata) == IS_STRING) { if (!bucket->own_buf) { bucket = php_stream_bucket_make_writeable(bucket); } if (bucket->buflen != Z_STRLEN_P(pzdata)) { bucket->buf = perealloc(bucket->buf, Z_STRLEN_P(pzdata), bucket->is_persistent); bucket->buflen = Z_STRLEN_P(pzdata); } memcpy(bucket->buf, Z_STRVAL_P(pzdata), bucket->buflen); } if (append) { php_stream_bucket_append(brigade, bucket); } else { php_stream_bucket_prepend(brigade, bucket); } /* This is a hack necessary to accommodate situations where bucket is appended to the stream * multiple times. See bug35916.phpt for reference. */ if (bucket->refcount == 1) { bucket->refcount++; } }
static void zend_hash_do_resize(HashTable *ht) { Bucket **t; #ifdef ZEND_SIGNALS TSRMLS_FETCH(); #endif IS_CONSISTENT(ht); if ((ht->nTableSize << 1) > 0) { /* Let's double the table size */ t = (Bucket **) perealloc(ht->arBuckets, (ht->nTableSize << 1) * sizeof(Bucket *), ht->persistent); HANDLE_BLOCK_INTERRUPTIONS(); ht->arBuckets = t; ht->nTableSize = (ht->nTableSize << 1); ht->nTableMask = ht->nTableSize - 1; zend_hash_rehash(ht); HANDLE_UNBLOCK_INTERRUPTIONS(); } }
/* {{{ php_stream_bucket_attach */ static void php_stream_bucket_attach(int append, INTERNAL_FUNCTION_PARAMETERS) { zval *zbrigade, *zobject; zval *pzbucket, *pzdata; php_stream_bucket_brigade *brigade; php_stream_bucket *bucket; if (zend_parse_parameters(ZEND_NUM_ARGS(), "zo", &zbrigade, &zobject) == FAILURE) { RETURN_FALSE; } if (NULL == (pzbucket = zend_hash_str_find(Z_OBJPROP_P(zobject), "bucket", sizeof("bucket")-1))) { php_error_docref(NULL, E_WARNING, "Object has no bucket property"); RETURN_FALSE; } ZEND_FETCH_RESOURCE(brigade, php_stream_bucket_brigade *, zbrigade, -1, PHP_STREAM_BRIGADE_RES_NAME, le_bucket_brigade); ZEND_FETCH_RESOURCE(bucket, php_stream_bucket *, pzbucket, -1, PHP_STREAM_BUCKET_RES_NAME, le_bucket); if (NULL != (pzdata = zend_hash_str_find(Z_OBJPROP_P(zobject), "data", sizeof("data")-1)) && Z_TYPE_P(pzdata) == IS_STRING) { if (!bucket->own_buf) { bucket = php_stream_bucket_make_writeable(bucket); } if ((int)bucket->buflen != Z_STRLEN_P(pzdata)) { bucket->buf = perealloc(bucket->buf, Z_STRLEN_P(pzdata), bucket->is_persistent); bucket->buflen = Z_STRLEN_P(pzdata); } memcpy(bucket->buf, Z_STRVAL_P(pzdata), bucket->buflen); } if (append) { php_stream_bucket_append(brigade, bucket); } else { php_stream_bucket_prepend(brigade, bucket); } /* This is a hack necessary to accommodate situations where bucket is appended to the stream * multiple times. See bug35916.phpt for reference. */ if (bucket->refcount == 1) { bucket->refcount++; } }
static void gc_grow_root_buffer(void) { size_t new_size; if (GC_G(buf_size) >= GC_MAX_BUF_SIZE) { if (!GC_G(gc_full)) { zend_error(E_WARNING, "GC buffer overflow (GC disabled)\n"); GC_G(gc_active) = 1; GC_G(gc_protected) = 1; GC_G(gc_full) = 1; return; } } if (GC_G(buf_size) < GC_BUF_GROW_STEP) { new_size = GC_G(buf_size) * 2; } else { new_size = GC_G(buf_size) + GC_BUF_GROW_STEP; } if (new_size > GC_MAX_BUF_SIZE) { new_size = GC_MAX_BUF_SIZE; } GC_G(buf) = perealloc(GC_G(buf), sizeof(gc_root_buffer) * new_size, 1); GC_G(buf_size) = new_size; }
*/ /* $Id$ */ #include "php_solr.h" /* {{{ static inline void solr_string_alloc(solr_string_t *dest, size_t length, size_t *new_length SOLR_MEM_DEBUG_DC) */ static inline void solr_string_alloc(solr_string_t *dest, size_t length, size_t *new_length SOLR_MEM_DEBUG_DC) { /* If buffer is empty */ if (!dest->str) { (*new_length) = length; dest->cap = ((*new_length) < SOLR_STRING_START_SIZE) ? SOLR_STRING_START_SIZE : (length + SOLR_STRING_INCREMENT_SIZE); dest->str = (solr_char_t *) perealloc(dest->str, dest->cap * sizeof(solr_char_t), SOLR_STRING_PERSISTENT); } else { (*new_length) = dest->len + length; /* If the buffer full, get more space */ if ((*new_length) >= dest->cap) { dest->cap = (*new_length) + SOLR_STRING_INCREMENT_SIZE; dest->str = (solr_char_t *) perealloc(dest->str, dest->cap * sizeof(solr_char_t), SOLR_STRING_PERSISTENT); } } #ifdef SOLR_MEM_DEBUG
PHPAPI int _php_stream_filter_flush(php_stream_filter *filter, int finish) { php_stream_bucket_brigade brig_a = { NULL, NULL }, brig_b = { NULL, NULL }, *inp = &brig_a, *outp = &brig_b, *brig_temp; php_stream_bucket *bucket; php_stream_filter_chain *chain; php_stream_filter *current; php_stream *stream; size_t flushed_size = 0; long flags = (finish ? PSFS_FLAG_FLUSH_CLOSE : PSFS_FLAG_FLUSH_INC); if (!filter->chain || !filter->chain->stream) { /* Filter is not attached to a chain, or chain is somehow not part of a stream */ return FAILURE; } chain = filter->chain; stream = chain->stream; for(current = filter; current; current = current->next) { php_stream_filter_status_t status; status = filter->fops->filter(stream, current, inp, outp, NULL, flags); if (status == PSFS_FEED_ME) { /* We've flushed the data far enough */ return SUCCESS; } if (status == PSFS_ERR_FATAL) { return FAILURE; } /* Otherwise we have data available to PASS_ON Swap the brigades and continue */ brig_temp = inp; inp = outp; outp = brig_temp; outp->head = NULL; outp->tail = NULL; flags = PSFS_FLAG_NORMAL; } /* Last filter returned data via PSFS_PASS_ON Do something with it */ for(bucket = inp->head; bucket; bucket = bucket->next) { flushed_size += bucket->buflen; } if (flushed_size == 0) { /* Unlikely, but possible */ return SUCCESS; } if (chain == &(stream->readfilters)) { /* Dump any newly flushed data to the read buffer */ if (stream->readpos > 0) { /* Back the buffer up */ memcpy(stream->readbuf, stream->readbuf + stream->readpos, stream->writepos - stream->readpos); stream->readpos = 0; stream->writepos -= stream->readpos; } if (flushed_size > (stream->readbuflen - stream->writepos)) { /* Grow the buffer */ stream->readbuf = perealloc(stream->readbuf, stream->writepos + flushed_size + stream->chunk_size, stream->is_persistent); } while ((bucket = inp->head)) { memcpy(stream->readbuf + stream->writepos, bucket->buf, bucket->buflen); stream->writepos += bucket->buflen; php_stream_bucket_unlink(bucket); php_stream_bucket_delref(bucket); } } else if (chain == &(stream->writefilters)) { /* Send flushed data to the stream */ while ((bucket = inp->head)) { stream->ops->write(stream, bucket->buf, bucket->buflen); php_stream_bucket_unlink(bucket); php_stream_bucket_delref(bucket); } } return SUCCESS; }
PHPAPI int php_stream_filter_append_ex(php_stream_filter_chain *chain, php_stream_filter *filter) { php_stream *stream = chain->stream; filter->prev = chain->tail; filter->next = NULL; if (chain->tail) { chain->tail->next = filter; } else { chain->head = filter; } chain->tail = filter; filter->chain = chain; if (&(stream->readfilters) == chain && (stream->writepos - stream->readpos) > 0) { /* Let's going ahead and wind anything in the buffer through this filter */ php_stream_bucket_brigade brig_in = { NULL, NULL }, brig_out = { NULL, NULL }; php_stream_bucket_brigade *brig_inp = &brig_in, *brig_outp = &brig_out; php_stream_filter_status_t status; php_stream_bucket *bucket; size_t consumed = 0; bucket = php_stream_bucket_new(stream, (char*) stream->readbuf + stream->readpos, stream->writepos - stream->readpos, 0, 0); php_stream_bucket_append(brig_inp, bucket); status = filter->fops->filter(stream, filter, brig_inp, brig_outp, &consumed, PSFS_FLAG_NORMAL); if (stream->readpos + consumed > (uint)stream->writepos) { /* No behaving filter should cause this. */ status = PSFS_ERR_FATAL; } switch (status) { case PSFS_ERR_FATAL: while (brig_in.head) { bucket = brig_in.head; php_stream_bucket_unlink(bucket); php_stream_bucket_delref(bucket); } while (brig_out.head) { bucket = brig_out.head; php_stream_bucket_unlink(bucket); php_stream_bucket_delref(bucket); } php_error_docref(NULL, E_WARNING, "Filter failed to process pre-buffered data"); return FAILURE; case PSFS_FEED_ME: /* We don't actually need data yet, leave this filter in a feed me state until data is needed. Reset stream's internal read buffer since the filter is "holding" it. */ stream->readpos = 0; stream->writepos = 0; break; case PSFS_PASS_ON: /* If any data is consumed, we cannot rely upon the existing read buffer, as the filtered data must replace the existing data, so invalidate the cache */ /* note that changes here should be reflected in main/streams/streams.c::php_stream_fill_read_buffer */ stream->writepos = 0; stream->readpos = 0; while (brig_outp->head) { bucket = brig_outp->head; /* Grow buffer to hold this bucket if need be. TODO: See warning in main/stream/streams.c::php_stream_fill_read_buffer */ if (stream->readbuflen - stream->writepos < bucket->buflen) { stream->readbuflen += bucket->buflen; stream->readbuf = perealloc(stream->readbuf, stream->readbuflen, stream->is_persistent); } memcpy(stream->readbuf + stream->writepos, bucket->buf, bucket->buflen); stream->writepos += bucket->buflen; php_stream_bucket_unlink(bucket); php_stream_bucket_delref(bucket); } break; } } return SUCCESS; }