void TestMemPool_run_tests() { TestBatch *batch = TestBatch_new(4); MemoryPool *mem_pool = MemPool_new(0); MemoryPool *other = MemPool_new(0); char *ptr_a, *ptr_b; TestBatch_Plan(batch); ptr_a = (char*)MemPool_Grab(mem_pool, 10); strcpy(ptr_a, "foo"); MemPool_Release_All(mem_pool); ptr_b = (char*)MemPool_Grab(mem_pool, 10); TEST_STR_EQ(batch, ptr_b, "foo", "Recycle RAM on Release_All"); ptr_a = mem_pool->buf; MemPool_Resize(mem_pool, ptr_b, 6); TEST_TRUE(batch, mem_pool->buf < ptr_a, "Resize"); ptr_a = (char*)MemPool_Grab(other, 20); MemPool_Release_All(other); MemPool_Eat(other, mem_pool); TEST_TRUE(batch, other->buf == mem_pool->buf, "Eat"); TEST_TRUE(batch, other->buf != NULL, "Eat"); DECREF(mem_pool); DECREF(other); DECREF(batch); }
void SortWriter_add_inverted_doc(SortWriter *self, Inverter *inverter, int32_t doc_id) { SortWriterIVARS *const ivars = SortWriter_IVARS(self); int32_t field_num; Inverter_Iterate(inverter); while (0 != (field_num = Inverter_Next(inverter))) { FieldType *type = Inverter_Get_Type(inverter); if (FType_Sortable(type)) { SortFieldWriter *field_writer = S_lazy_init_field_writer(self, field_num); SortFieldWriter_Add(field_writer, doc_id, Inverter_Get_Value(inverter)); } } // If our SortFieldWriters have collectively passed the memory threshold, // flush all of them, then release all unique values with a single action. if (MemPool_Get_Consumed(ivars->mem_pool) > ivars->mem_thresh) { for (uint32_t i = 0; i < VA_Get_Size(ivars->field_writers); i++) { SortFieldWriter *const field_writer = (SortFieldWriter*)VA_Fetch(ivars->field_writers, i); if (field_writer) { SortFieldWriter_Flush(field_writer); } } MemPool_Release_All(ivars->mem_pool); ivars->flush_at_finish = true; } }