void Inversion_Invert_IMP(Inversion *self) { InversionIVARS *const ivars = Inversion_IVARS(self); Token **tokens = ivars->tokens; Token **limit = tokens + ivars->size; int32_t token_pos = 0; // Thwart future attempts to append. if (ivars->inverted) { THROW(ERR, "Inversion has already been inverted"); } ivars->inverted = true; // Assign token positions. for (; tokens < limit; tokens++) { TokenIVARS *const cur_token_ivars = Token_IVARS(*tokens); cur_token_ivars->pos = token_pos; token_pos = (int32_t)((uint32_t)token_pos + (uint32_t)cur_token_ivars->pos_inc); if (token_pos < cur_token_ivars->pos) { THROW(ERR, "Token positions out of order: %i32 %i32", cur_token_ivars->pos, token_pos); } } // Sort the tokens lexically, and hand off to cluster counting routine. qsort(ivars->tokens, ivars->size, sizeof(Token*), Token_compare); S_count_clusters(self, ivars); }
Token* Inversion_Next_IMP(Inversion *self) { InversionIVARS *const ivars = Inversion_IVARS(self); // Kill the iteration if we're out of tokens. if (ivars->cur == ivars->size) { return NULL; } return ivars->tokens[ivars->cur++]; }
void Inversion_Append_IMP(Inversion *self, Token *token) { InversionIVARS *const ivars = Inversion_IVARS(self); if (ivars->inverted) { THROW(ERR, "Can't append tokens after inversion"); } if (ivars->size >= ivars->cap) { size_t new_capacity = Memory_oversize(ivars->size + 1, sizeof(Token*)); S_grow(self, new_capacity); } ivars->tokens[ivars->size] = token; ivars->size++; }
static void S_grow(Inversion *self, size_t size) { InversionIVARS *const ivars = Inversion_IVARS(self); if (size > ivars->cap) { uint64_t amount = size * sizeof(Token*); // Clip rather than wrap. if (amount > SIZE_MAX || amount < size) { amount = SIZE_MAX; } ivars->tokens = (Token**)REALLOCATE(ivars->tokens, (size_t)amount); ivars->cap = size; memset(ivars->tokens + ivars->size, 0, (size - ivars->size) * sizeof(Token*)); } }
void Inversion_Destroy_IMP(Inversion *self) { InversionIVARS *const ivars = Inversion_IVARS(self); if (ivars->tokens) { Token **tokens = ivars->tokens; Token **const limit = tokens + ivars->size; for (; tokens < limit; tokens++) { DECREF(*tokens); } FREEMEM(ivars->tokens); } FREEMEM(ivars->cluster_counts); SUPER_DESTROY(self, INVERSION); }
static void S_grow(Inversion *self, size_t size) { InversionIVARS *const ivars = Inversion_IVARS(self); if (size > ivars->cap) { if (size > SIZE_MAX / sizeof(Token*) || size > UINT32_MAX) { THROW(ERR, "Can't grow Inversion to hold %u64 elements", (uint64_t)size); } size_t amount = size * sizeof(Token*); ivars->tokens = (Token**)REALLOCATE(ivars->tokens, amount); ivars->cap = (uint32_t)size; memset(ivars->tokens + ivars->size, 0, (size - ivars->size) * sizeof(Token*)); } }
Inversion* Inversion_new(Token *seed_token) { Inversion *self = (Inversion*)Class_Make_Obj(INVERSION); InversionIVARS *const ivars = Inversion_IVARS(self); // Init. ivars->cap = 16; ivars->size = 0; ivars->tokens = (Token**)CALLOCATE(ivars->cap, sizeof(Token*)); ivars->cur = 0; ivars->inverted = false; ivars->cluster_counts = NULL; ivars->cluster_counts_size = 0; // Process the seed token. if (seed_token != NULL) { Inversion_Append(self, (Token*)INCREF(seed_token)); } return self; }
void Inversion_Reset_IMP(Inversion *self) { Inversion_IVARS(self)->cur = 0; }
uint32_t Inversion_Get_Size_IMP(Inversion *self) { return Inversion_IVARS(self)->size; }