void SortWriter_add_inverted_doc(SortWriter *self, Inverter *inverter, int32_t doc_id) { SortWriterIVARS *const ivars = SortWriter_IVARS(self); int32_t field_num; Inverter_Iterate(inverter); while (0 != (field_num = Inverter_Next(inverter))) { FieldType *type = Inverter_Get_Type(inverter); if (FType_Sortable(type)) { SortFieldWriter *field_writer = S_lazy_init_field_writer(self, field_num); SortFieldWriter_Add(field_writer, doc_id, Inverter_Get_Value(inverter)); } } // If our SortFieldWriters have collectively passed the memory threshold, // flush all of them, then release all unique values with a single action. if (MemPool_Get_Consumed(ivars->mem_pool) > ivars->mem_thresh) { for (uint32_t i = 0; i < VA_Get_Size(ivars->field_writers); i++) { SortFieldWriter *const field_writer = (SortFieldWriter*)VA_Fetch(ivars->field_writers, i); if (field_writer) { SortFieldWriter_Flush(field_writer); } } MemPool_Release_All(ivars->mem_pool); ivars->flush_at_finish = true; } }
void SortWriter_Add_Inverted_Doc_IMP(SortWriter *self, Inverter *inverter, int32_t doc_id) { SortWriterIVARS *const ivars = SortWriter_IVARS(self); int32_t field_num; Inverter_Iterate(inverter); while (0 != (field_num = Inverter_Next(inverter))) { FieldType *type = Inverter_Get_Type(inverter); if (FType_Sortable(type)) { SortFieldWriter *field_writer = S_lazy_init_field_writer(self, field_num); SortFieldWriter_Add(field_writer, doc_id, Inverter_Get_Value(inverter)); } } // If our SortFieldWriters have collectively passed the memory threshold, // flush all of them, then reset the counter which tracks memory // consumption. if ((size_t)Counter_Get_Value(ivars->counter) > ivars->mem_thresh) { for (size_t i = 0; i < Vec_Get_Size(ivars->field_writers); i++) { SortFieldWriter *const field_writer = (SortFieldWriter*)Vec_Fetch(ivars->field_writers, i); if (field_writer) { SortFieldWriter_Flush(field_writer); } } Counter_Reset(ivars->counter); ivars->flush_at_finish = true; } }
void SortWriter_finish(SortWriter *self) { SortWriterIVARS *const ivars = SortWriter_IVARS(self); VArray *const field_writers = ivars->field_writers; // If we have no data, bail out. if (!ivars->temp_ord_out) { return; } // If we've either flushed or added segments, flush everything so that any // one field can use the entire margin up to mem_thresh. if (ivars->flush_at_finish) { for (uint32_t i = 1, max = VA_Get_Size(field_writers); i < max; i++) { SortFieldWriter *field_writer = (SortFieldWriter*)VA_Fetch(field_writers, i); if (field_writer) { SortFieldWriter_Flush(field_writer); } } } // Close down temp streams. OutStream_Close(ivars->temp_ord_out); OutStream_Close(ivars->temp_ix_out); OutStream_Close(ivars->temp_dat_out); for (uint32_t i = 1, max = VA_Get_Size(field_writers); i < max; i++) { SortFieldWriter *field_writer = (SortFieldWriter*)VA_Delete(field_writers, i); if (field_writer) { CharBuf *field = Seg_Field_Name(ivars->segment, i); SortFieldWriter_Flip(field_writer); int32_t count = SortFieldWriter_Finish(field_writer); Hash_Store(ivars->counts, (Obj*)field, (Obj*)CB_newf("%i32", count)); int32_t null_ord = SortFieldWriter_Get_Null_Ord(field_writer); if (null_ord != -1) { Hash_Store(ivars->null_ords, (Obj*)field, (Obj*)CB_newf("%i32", null_ord)); } int32_t ord_width = SortFieldWriter_Get_Ord_Width(field_writer); Hash_Store(ivars->ord_widths, (Obj*)field, (Obj*)CB_newf("%i32", ord_width)); } DECREF(field_writer); } VA_Clear(field_writers); // Store metadata. Seg_Store_Metadata_Str(ivars->segment, "sort", 4, (Obj*)SortWriter_Metadata(self)); // Clean up. Folder *folder = ivars->folder; CharBuf *seg_name = Seg_Get_Name(ivars->segment); CharBuf *path = CB_newf("%o/sort_ord_temp", seg_name); Folder_Delete(folder, path); CB_setf(path, "%o/sort_ix_temp", seg_name); Folder_Delete(folder, path); CB_setf(path, "%o/sort_dat_temp", seg_name); Folder_Delete(folder, path); DECREF(path); }