int main (int argc, char* argv[]) { element_t* elementPtr; coordinate_t coordinates[4]; coordinates[0].x = 1; coordinates[0].y = 0; coordinates[1].x = 0; coordinates[1].y = 1; coordinates[2].x = 0; coordinates[2].y = 0; coordinates[3].x = -2; coordinates[3].y = -2; elementPtr = element_alloc(coordinates, 3); assert(elementPtr); printElement(elementPtr); element_free(elementPtr); elementPtr = element_alloc(coordinates, 2); assert(elementPtr); printElement(elementPtr); element_free(elementPtr); elementPtr = element_alloc(coordinates+1, 3); assert(elementPtr); printElement(elementPtr); element_free(elementPtr); return 0; }
/* 10.3.4 */ static int gsm_plmnsel_decode(struct osim_decoded_data *dd, const struct osim_file_desc *desc, int len, uint8_t *data) { int i, n_plmn = len / 3; if (n_plmn < 1) return -EINVAL; for (i = 0; i < n_plmn; i++) { uint8_t *cur = data + 3*i; struct osim_decoded_element *elem, *mcc, *mnc; uint8_t ra_buf[6]; struct gprs_ra_id ra_id; memset(ra_buf, 0, sizeof(ra_buf)); memcpy(ra_buf, cur, 3); gsm48_parse_ra(&ra_id, ra_buf); elem = element_alloc(dd, "PLMN", ELEM_T_GROUP, ELEM_REPR_NONE); mcc = element_alloc_sub(elem, "MCC", ELEM_T_UINT16, ELEM_REPR_DEC); mcc->u.u16 = ra_id.mcc; mnc = element_alloc_sub(elem, "MNC", ELEM_T_UINT16, ELEM_REPR_DEC); mnc->u.u16 = ra_id.mnc; } return 0; }
/* 10.3.3 */ static int gsm_kc_decode(struct osim_decoded_data *dd, const struct osim_file_desc *desc, int len, uint8_t *data) { struct osim_decoded_element *kc, *cksn; if (len < 9) return -EINVAL; kc = element_alloc(dd, "Kc", ELEM_T_BYTES, ELEM_REPR_HEX); kc->u.buf = talloc_memdup(kc, data, 8); cksn = element_alloc(dd, "CKSN", ELEM_T_UINT8, ELEM_REPR_DEC); cksn->u.u8 = data[8]; return 0; }
void vector_insert(struct vector *vector, int index, void *data, size_t size) { if (index < 0 || index > vector->length) return; element_dealloc(vector->elements[index]); vector->elements[index] = element_alloc(data, size); }
static OS_ERR_TYPE add_data(queue_impl_t * list, void * data) { OS_ERR_TYPE err = E_OS_ERR_OVERFLOW; /* if linked-list not full */ if(list->current_size < list->max_size) { list_element * element = element_alloc(); if(element) { element->data = data; list_add(&(list->_list), (list_t*)element); list->current_size++; err = E_OS_OK; } else { panic(E_OS_ERR_NO_MEMORY); /* Panic if no memory available */ } } else { panic(E_OS_ERR_OVERFLOW); /* Panic if max size reached */ } return err; }
/* 10.3.5 */ int gsm_hpplmn_decode(struct osim_decoded_data *dd, const struct osim_file_desc *desc, int len, uint8_t *data) { struct osim_decoded_element *elem; elem = element_alloc(dd, "Time interval", ELEM_T_UINT8, ELEM_REPR_DEC); elem->u.u8 = *data; return 0; }
static int iccid_decode(struct osim_decoded_data *dd, const struct osim_file_desc *desc, int len, uint8_t *data) { struct osim_decoded_element *elem; elem = element_alloc(dd, "ICCID", ELEM_T_BCD, ELEM_REPR_DEC); elem->length = len; elem->u.buf = talloc_memdup(elem, data, len); return 0; }
/* 10.3.1 */ int gsm_lp_decode(struct osim_decoded_data *dd, const struct osim_file_desc *desc, int len, uint8_t *data) { int i; for (i = 0; i < len; i++) { struct osim_decoded_element *elem; elem = element_alloc(dd, "Language Code", ELEM_T_UINT8, ELEM_REPR_DEC); elem->u.u8 = data[i]; } return 0; }
/* 10.3.2 */ int gsm_imsi_decode(struct osim_decoded_data *dd, const struct osim_file_desc *desc, int len, uint8_t *data) { struct osim_decoded_element *elem; if (len < 2) return -EINVAL; elem = element_alloc(dd, "IMSI", ELEM_T_BCD, ELEM_REPR_DEC); elem->length = data[0]; elem->u.buf = talloc_memdup(elem, data+1, len-1); return 0; }
static int elp_decode(struct osim_decoded_data *dd, const struct osim_file_desc *desc, int len, uint8_t *data) { int i, num_lp = len / 2; for (i = 0; i < num_lp; i++) { uint8_t *cur = data + i*2; struct osim_decoded_element *elem; elem = element_alloc(dd, "Language Code", ELEM_T_STRING, ELEM_REPR_NONE); elem->u.buf = (uint8_t *) talloc_strndup(elem, (const char *) cur, 2); } return 0; }
/* * create a new entry if possible */ static HASHBUCKET get_hash_entry(HTAB *hashp) { /* use volatile pointer to prevent code rearrangement */ volatile HASHHDR *hctlv = hashp->hctl; HASHBUCKET newElement; for (;;) { /* if partitioned, must lock to touch nentries and freeList */ if (IS_PARTITIONED(hctlv)) SpinLockAcquire(&hctlv->mutex); /* try to get an entry from the freelist */ newElement = hctlv->freeList; if (newElement != NULL) break; /* no free elements. allocate another chunk of buckets */ if (IS_PARTITIONED(hctlv)) SpinLockRelease(&hctlv->mutex); if (!element_alloc(hashp, hctlv->nelem_alloc)) { /* out of memory */ return NULL; } } /* remove entry from freelist, bump nentries */ hctlv->freeList = newElement->link; hctlv->nentries++; if (IS_PARTITIONED(hctlv)) SpinLockRelease(&hctlv->mutex); return newElement; }
/* * hash_create -- create a new dynamic hash table * * tabname: a name for the table (for debugging purposes) * nelem: maximum number of elements expected * *info: additional table parameters, as indicated by flags * flags: bitmask indicating which parameters to take from *info * * Note: for a shared-memory hashtable, nelem needs to be a pretty good * estimate, since we can't expand the table on the fly. But an unshared * hashtable can be expanded on-the-fly, so it's better for nelem to be * on the small side and let the table grow if it's exceeded. An overly * large nelem will penalize hash_seq_search speed without buying much. */ HTAB * hash_create(const char *tabname, long nelem, HASHCTL *info, int flags) { HTAB *hashp; HASHHDR *hctl; /* * For shared hash tables, we have a local hash header (HTAB struct) that * we allocate in TopMemoryContext; all else is in shared memory. * * For non-shared hash tables, everything including the hash header is in * a memory context created specially for the hash table --- this makes * hash_destroy very simple. The memory context is made a child of either * a context specified by the caller, or TopMemoryContext if nothing is * specified. */ if (flags & HASH_SHARED_MEM) { /* Set up to allocate the hash header */ CurrentDynaHashCxt = TopMemoryContext; } else { /* Create the hash table's private memory context */ if (flags & HASH_CONTEXT) CurrentDynaHashCxt = info->hcxt; else CurrentDynaHashCxt = TopMemoryContext; CurrentDynaHashCxt = AllocSetContextCreate(CurrentDynaHashCxt, tabname, ALLOCSET_DEFAULT_MINSIZE, ALLOCSET_DEFAULT_INITSIZE, ALLOCSET_DEFAULT_MAXSIZE); } /* Initialize the hash header, plus a copy of the table name */ hashp = (HTAB *) DynaHashAlloc(sizeof(HTAB) + strlen(tabname) +1); MemSet(hashp, 0, sizeof(HTAB)); hashp->tabname = (char *) (hashp + 1); strcpy(hashp->tabname, tabname); if (flags & HASH_FUNCTION) hashp->hash = info->hash; else hashp->hash = string_hash; /* default hash function */ /* * If you don't specify a match function, it defaults to string_compare if * you used string_hash (either explicitly or by default) and to memcmp * otherwise. (Prior to PostgreSQL 7.4, memcmp was always used.) */ if (flags & HASH_COMPARE) hashp->match = info->match; else if (hashp->hash == string_hash) hashp->match = (HashCompareFunc) string_compare; else hashp->match = memcmp; /* * Similarly, the key-copying function defaults to strlcpy or memcpy. */ if (flags & HASH_KEYCOPY) hashp->keycopy = info->keycopy; else if (hashp->hash == string_hash) hashp->keycopy = (HashCopyFunc) strlcpy; else hashp->keycopy = memcpy; if (flags & HASH_ALLOC) hashp->alloc = info->alloc; else hashp->alloc = DynaHashAlloc; if (flags & HASH_SHARED_MEM) { /* * ctl structure and directory are preallocated for shared memory * tables. Note that HASH_DIRSIZE and HASH_ALLOC had better be set as * well. */ hashp->hctl = info->hctl; hashp->dir = (HASHSEGMENT *) (((char *) info->hctl) + sizeof(HASHHDR)); hashp->hcxt = NULL; hashp->isshared = true; /* hash table already exists, we're just attaching to it */ if (flags & HASH_ATTACH) { /* make local copies of some heavily-used values */ hctl = hashp->hctl; hashp->keysize = hctl->keysize; hashp->ssize = hctl->ssize; hashp->sshift = hctl->sshift; return hashp; } } else { /* setup hash table defaults */ hashp->hctl = NULL; hashp->dir = NULL; hashp->hcxt = CurrentDynaHashCxt; hashp->isshared = false; } if (!hashp->hctl) { hashp->hctl = (HASHHDR *) hashp->alloc(sizeof(HASHHDR)); if (!hashp->hctl) ereport(ERROR, (errcode(ERRCODE_OUT_OF_MEMORY), errmsg("out of memory"))); } hashp->frozen = false; hdefault(hashp); hctl = hashp->hctl; if (flags & HASH_PARTITION) { /* Doesn't make sense to partition a local hash table */ Assert(flags & HASH_SHARED_MEM); /* * The number of partitions had better be a power of 2. Also, it must * be less than INT_MAX (see init_htab()), so call the int version of * next_pow2. */ Assert(info->num_partitions == next_pow2_int(info->num_partitions)); hctl->num_partitions = info->num_partitions; } if (flags & HASH_SEGMENT) { hctl->ssize = info->ssize; hctl->sshift = my_log2(info->ssize); /* ssize had better be a power of 2 */ Assert(hctl->ssize == (1L << hctl->sshift)); } if (flags & HASH_FFACTOR) hctl->ffactor = info->ffactor; /* * SHM hash tables have fixed directory size passed by the caller. */ if (flags & HASH_DIRSIZE) { hctl->max_dsize = info->max_dsize; hctl->dsize = info->dsize; } /* * hash table now allocates space for key and data but you have to say how * much space to allocate */ if (flags & HASH_ELEM) { Assert(info->entrysize >= info->keysize); hctl->keysize = info->keysize; hctl->entrysize = info->entrysize; } /* make local copies of heavily-used constant fields */ hashp->keysize = hctl->keysize; hashp->ssize = hctl->ssize; hashp->sshift = hctl->sshift; /* Build the hash directory structure */ if (!init_htab(hashp, nelem)) elog(ERROR, "failed to initialize hash table \"%s\"", hashp->tabname); /* * For a shared hash table, preallocate the requested number of elements. * This reduces problems with run-time out-of-shared-memory conditions. * * For a non-shared hash table, preallocate the requested number of * elements if it's less than our chosen nelem_alloc. This avoids wasting * space if the caller correctly estimates a small table size. */ if ((flags & HASH_SHARED_MEM) || nelem < hctl->nelem_alloc) { if (!element_alloc(hashp, (int) nelem)) ereport(ERROR, (errcode(ERRCODE_OUT_OF_MEMORY), errmsg("out of memory"))); } if (flags & HASH_FIXED_SIZE) hashp->isfixed = true; return hashp; }
/*---------- * hash_search -- look up key in table and perform action * * action is one of: * HASH_FIND: look up key in table * HASH_ENTER: look up key in table, creating entry if not present * HASH_REMOVE: look up key in table, remove entry if present * HASH_FIND_SAVE: look up key in table, also save in static var * HASH_REMOVE_SAVED: remove entry saved by HASH_FIND_SAVE * * Return value is a pointer to the element found/entered/removed if any, * or NULL if no match was found. (NB: in the case of the REMOVE actions, * the result is a dangling pointer that shouldn't be dereferenced!) * A NULL result for HASH_ENTER implies we ran out of memory. * * If foundPtr isn't NULL, then *foundPtr is set TRUE if we found an * existing entry in the table, FALSE otherwise. This is needed in the * HASH_ENTER case, but is redundant with the return value otherwise. * * The HASH_FIND_SAVE/HASH_REMOVE_SAVED interface is a hack to save one * table lookup in a find/process/remove scenario. Note that no other * addition or removal in the table can safely happen in between. *---------- */ void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr) { HASHHDR *hctl = hashp->hctl; uint32 hashvalue = 0; uint32 bucket; long segment_num; long segment_ndx; HASHSEGMENT segp; HASHBUCKET currBucket; HASHBUCKET *prevBucketPtr; static struct State { HASHBUCKET currBucket; HASHBUCKET *prevBucketPtr; } saveState; #if HASH_STATISTICS hash_accesses++; hctl->accesses++; #endif /* * Do the initial lookup (or recall result of prior lookup) */ if (action == HASH_REMOVE_SAVED) { currBucket = saveState.currBucket; prevBucketPtr = saveState.prevBucketPtr; /* * Try to catch subsequent errors */ Assert(currBucket); saveState.currBucket = NULL; } else { HashCompareFunc match; Size keysize = hctl->keysize; hashvalue = hashp->hash(keyPtr, keysize); bucket = calc_bucket(hctl, hashvalue); segment_num = bucket >> hctl->sshift; segment_ndx = MOD(bucket, hctl->ssize); segp = hashp->dir[segment_num]; if (segp == NULL) hash_corrupted(hashp); prevBucketPtr = &segp[segment_ndx]; currBucket = *prevBucketPtr; /* * Follow collision chain looking for matching key */ match = hashp->match; /* save one fetch in inner loop */ while (currBucket != NULL) { if (currBucket->hashvalue == hashvalue && match(ELEMENTKEY(currBucket), keyPtr, keysize) == 0) break; prevBucketPtr = &(currBucket->link); currBucket = *prevBucketPtr; #if HASH_STATISTICS hash_collisions++; hctl->collisions++; #endif } } if (foundPtr) *foundPtr = (bool) (currBucket != NULL); /* * OK, now what? */ switch (action) { case HASH_FIND: if (currBucket != NULL) return (void *) ELEMENTKEY(currBucket); return NULL; case HASH_FIND_SAVE: if (currBucket != NULL) { saveState.currBucket = currBucket; saveState.prevBucketPtr = prevBucketPtr; return (void *) ELEMENTKEY(currBucket); } return NULL; case HASH_REMOVE: case HASH_REMOVE_SAVED: if (currBucket != NULL) { Assert(hctl->nentries > 0); hctl->nentries--; /* remove record from hash bucket's chain. */ *prevBucketPtr = currBucket->link; /* add the record to the freelist for this table. */ currBucket->link = hctl->freeList; hctl->freeList = currBucket; /* * better hope the caller is synchronizing access to this * element, because someone else is going to reuse it the * next time something is added to the table */ return (void *) ELEMENTKEY(currBucket); } return NULL; case HASH_ENTER: /* Return existing element if found, else create one */ if (currBucket != NULL) return (void *) ELEMENTKEY(currBucket); /* get the next free element */ currBucket = hctl->freeList; if (currBucket == NULL) { /* no free elements. allocate another chunk of buckets */ if (!element_alloc(hashp)) return NULL; /* out of memory */ currBucket = hctl->freeList; Assert(currBucket != NULL); } hctl->freeList = currBucket->link; /* link into hashbucket chain */ *prevBucketPtr = currBucket; currBucket->link = NULL; /* copy key into record */ currBucket->hashvalue = hashvalue; hashp->keycopy(ELEMENTKEY(currBucket), keyPtr, hctl->keysize); /* caller is expected to fill the data field on return */ /* Check if it is time to split the segment */ if (++hctl->nentries / (long) (hctl->max_bucket + 1) > hctl->ffactor) { /* * NOTE: failure to expand table is not a fatal error, it * just means we have to run at higher fill factor than we * wanted. */ expand_table(hashp); } return (void *) ELEMENTKEY(currBucket); } elog(ERROR, "unrecognized hash action code: %d", (int) action); return NULL; /* keep compiler quiet */ }
void vector_add(struct vector *vector, void *data, size_t size) { vector_ensure_capacity(vector); vector->elements[vector->length] = element_alloc(data, size); vector->length++; }