void _log_debug(log_domain_mask_t domain, const char *format, ...) { va_list ap; /* For GCC we do this check in the macro. */ if (PREDICT_LIKELY(LOG_DEBUG > _log_global_min_severity)) return; va_start(ap,format); logv(LOG_DEBUG, domain, _log_fn_function_name, format, ap); va_end(ap); _log_fn_function_name = NULL; }
STATIC int curve25519_basepoint_impl(uint8_t *output, const uint8_t *secret) { int r = 0; if (PREDICT_UNLIKELY(curve25519_use_ed == -1)) { pick_curve25519_basepoint_impl(); } /* TODO: Someone should benchmark curved25519_scalarmult_basepoint versus * an optimized NaCl build to see which should be used when compiled with * NaCl available. I suspected that the ed25519 optimization always wins. */ if (PREDICT_LIKELY(curve25519_use_ed == 1)) { curved25519_scalarmult_basepoint_donna(output, secret); r = 0; } else { static const uint8_t basepoint[32] = {9}; r = curve25519_impl(output, secret, basepoint); } return r; }
/** Return a newly allocated item from <b>pool</b>. */ void * mp_pool_get(mp_pool_t *pool) { mp_chunk_t *chunk; mp_allocated_t *allocated; if (PREDICT_LIKELY(pool->used_chunks != NULL)) { /* Common case: there is some chunk that is neither full nor empty. Use * that one. (We can't use the full ones, obviously, and we should fill * up the used ones before we start on any empty ones. */ chunk = pool->used_chunks; } else if (pool->empty_chunks) { /* We have no used chunks, but we have an empty chunk that we haven't * freed yet: use that. (We pull from the front of the list, which should * get us the most recently emptied chunk.) */ chunk = pool->empty_chunks; /* Remove the chunk from the empty list. */ pool->empty_chunks = chunk->next; if (chunk->next) chunk->next->prev = NULL; /* Put the chunk on the 'used' list*/ add_newly_used_chunk_to_used_list(pool, chunk); ASSERT(!chunk->prev); --pool->n_empty_chunks; if (pool->n_empty_chunks < pool->min_empty_chunks) pool->min_empty_chunks = pool->n_empty_chunks; } else { /* We have no used or empty chunks: allocate a new chunk. */ chunk = mp_chunk_new(pool); CHECK_ALLOC(chunk); /* Add the new chunk to the used list. */ add_newly_used_chunk_to_used_list(pool, chunk); } ASSERT(chunk->n_allocated < chunk->capacity); if (chunk->first_free) { /* If there's anything on the chunk's freelist, unlink it and use it. */ allocated = chunk->first_free; chunk->first_free = allocated->u.next_free; allocated->u.next_free = NULL; /* For debugging; not really needed. */ ASSERT(allocated->in_chunk == chunk); } else { /* Otherwise, the chunk had better have some free space left on it. */ ASSERT(chunk->next_mem + pool->item_alloc_size <= chunk->mem + chunk->mem_size); /* Good, it did. Let's carve off a bit of that free space, and use * that. */ allocated = (void*)chunk->next_mem; chunk->next_mem += pool->item_alloc_size; allocated->in_chunk = chunk; allocated->u.next_free = NULL; /* For debugging; not really needed. */ } ++chunk->n_allocated; #ifdef MEMPOOL_STATS ++pool->total_items_allocated; #endif if (PREDICT_UNLIKELY(chunk->n_allocated == chunk->capacity)) { /* This chunk just became full. */ ASSERT(chunk == pool->used_chunks); ASSERT(chunk->prev == NULL); /* Take it off the used list. */ pool->used_chunks = chunk->next; if (chunk->next) chunk->next->prev = NULL; /* Put it on the full list. */ chunk->next = pool->full_chunks; if (chunk->next) chunk->next->prev = chunk; pool->full_chunks = chunk; } /* And return the memory portion of the mp_allocated_t. */ return A2M(allocated); }
/** Helper: Deal with confused or out-of-bounds values from localtime_r and * friends. (On some platforms, they can give out-of-bounds values or can * return NULL.) If <b>islocal</b>, this is a localtime result; otherwise * it's from gmtime. The function returned <b>r</b>, when given <b>timep</b> * as its input. If we need to store new results, store them in * <b>resultbuf</b>. */ struct tm *correct_tm(int islocal, const time_t *timep, struct tm *resultbuf, struct tm *r) { const char *outcome; if (PREDICT_LIKELY(r)) { if (r->tm_year > 8099) { /* We can't strftime dates after 9999 CE. */ r->tm_year = 8099; r->tm_mon = 11; r->tm_mday = 31; r->tm_yday = 365; r->tm_hour = 23; r->tm_min = 59; r->tm_sec = 59; } return r; } /* If we get here, gmtime or localtime returned NULL. It might have done * this because of overrun or underrun, or it might have done it because of * some other weird issue. */ if (timep) { if (*timep < 0) { r = resultbuf; r->tm_year = 70; /* 1970 CE */ r->tm_mon = 0; r->tm_mday = 1; r->tm_yday = 1; r->tm_hour = 0; r->tm_min = 0 ; r->tm_sec = 0; outcome = "Rounding up to 1970"; goto done; } else if (*timep >= INT32_MAX) { /* Rounding down to INT32_MAX isn't so great, but keep in mind that we * only do it if gmtime/localtime tells us NULL. */ r = resultbuf; r->tm_year = 137; /* 2037 CE */ r->tm_mon = 11; r->tm_mday = 31; r->tm_yday = 365; r->tm_hour = 23; r->tm_min = 59; r->tm_sec = 59; outcome = "Rounding down to 2037"; goto done; } } /* If we get here, then gmtime/localtime failed without getting an extreme * value for *timep */ // tor_fragile_assert(); r = resultbuf; sgx_memset(resultbuf, 0, sizeof(struct tm)); outcome="can't recover"; done: // err(1, "%s("I64_FORMAT") failed with error %s: %s", // islocal?"localtime":"gmtime", // timep?I64_PRINTF_ARG(*timep):0, // strerror(errno), // outcome); sgx_puts("failed with error in correct_tm"); return r; }