// caller should own dup_lock static void init(void) { static bool inited; if (inited && bucket_width == last_bucket_width) return; inited = true; last_bucket_width = bucket_width; if (bucket_width == 0) { unsigned columns; get_window_size(&columns, NULL); if (0 == columns) { columns = 80; // good old defaults never fail us } bucket_width = CEIL_DIV(max_dup_delay, columns); if (0 == bucket_width) { // It's ok to use the console as at this point we are likely // in the debugger already fprintf(stderr, "Cannot compute bucket width"); bucket_width = 1; } } nb_buckets = CEIL_DIV(max_dup_delay, bucket_width); if (dups) free(dups); dups = malloc(nb_buckets * sizeof(*dups)); assert(dups); dup_reset_locked(); }
static void init_utmip_pll(void) { int khz = clock_get_pll_input_khz(); /* Shut off PLL crystal clock while we mess with it */ clrbits_le32(&clk_rst->utmip_pll_cfg2, 1 << 30); /* PHY_XTAL_CLKEN */ udelay(1); write32(&clk_rst->utmip_pll_cfg0, /* 960MHz * 1 / 80 == 12 MHz */ 80 << 16 | /* (rst) phy_divn */ 1 << 8); /* (rst) phy_divm */ write32(&clk_rst->utmip_pll_cfg1, CEIL_DIV(khz, 8000) << 27 | /* pllu_enbl_cnt / 8 (1us) */ 0 << 16 | /* PLLU pwrdn */ 0 << 14 | /* pll_enable pwrdn */ 0 << 12 | /* pll_active pwrdn */ CEIL_DIV(khz, 102) << 0); /* phy_stbl_cnt / 256 (2.5ms) */ /* TODO: TRM can't decide if actv is 5us or 10us, keep an eye on it */ write32(&clk_rst->utmip_pll_cfg2, 0 << 24 | /* SAMP_D/XDEV pwrdn */ CEIL_DIV(khz, 3200) << 18 | /* phy_actv_cnt / 16 (5us) */ CEIL_DIV(khz, 256) << 6 | /* pllu_stbl_cnt / 256 (1ms) */ 0 << 4 | /* SAMP_C/USB3 pwrdn */ 0 << 2 | /* SAMP_B/XHOST pwrdn */ 0 << 0); /* SAMP_A/USBD pwrdn */ setbits_le32(&clk_rst->utmip_pll_cfg2, 1 << 30); /* PHY_XTAL_CLKEN */ }
static inline uint32_t TimeToTicks(uint32_t aTime, AlarmIndex aIndex) { uint32_t ticks; if (aIndex == kMsTimer) { ticks = (uint32_t)CEIL_DIV((aTime * US_PER_MS * RTC_FREQUENCY), US_PER_S) & RTC_CC_COMPARE_Msk; } else { ticks = (uint32_t)CEIL_DIV((aTime * RTC_FREQUENCY), US_PER_S) & RTC_CC_COMPARE_Msk; } return ticks; }
bool PoolCreate( MemPool* pPool, uint8_t* pBackingBuffer, uint32_t backingBufferSize, uint32_t numUnits, uint32_t* pFreeBits ) { bool retval = false; if ( pPool && pBackingBuffer && pFreeBits ) { if ( backingBufferSize % numUnits == 0 ) { uint32_t i = 0; pPool->pBackingStore = pBackingBuffer; pPool->numOfUnits = numUnits; pPool->pFreeBits = pFreeBits; for ( i = 0; i < CEIL_DIV( numUnits, CHAR_BIT * sizeof( uint32_t ) ); i++ ) { *( pFreeBits + i ) = (uint32_t)-1; } if ( KMutexCreate( &pPool->mutex, "PoolMutex" ) ) { retval = true; } else { LOG( "Couldn't Initialize Mutex" ); } } else { LOG( "Can't cleanly allocate %d units from %d bytes", numUnits, backingBufferSize ); } } return retval; }
static unsigned ia_css_elems_bytes_from_info(const struct ia_css_frame_info *info) { if (info->format == IA_CSS_FRAME_FORMAT_RGB565) return 2; /* bytes per pixel */ if (info->format == IA_CSS_FRAME_FORMAT_YUV420_16) return 2; /* bytes per pixel */ if (info->format == IA_CSS_FRAME_FORMAT_YUV422_16) return 2; /* bytes per pixel */ if (info->format == IA_CSS_FRAME_FORMAT_RAW || (info->format == IA_CSS_FRAME_FORMAT_RAW_PACKED)) { if (info->raw_bit_depth) return CEIL_DIV(info->raw_bit_depth,8); else return 2; /* bytes per pixel */ } if (info->format == IA_CSS_FRAME_FORMAT_PLANAR_RGB888) return 3; /* bytes per pixel */ if (info->format == IA_CSS_FRAME_FORMAT_RGBA888) return 4; /* bytes per pixel */ if (info->format == IA_CSS_FRAME_FORMAT_QPLANE6) return 2; /* bytes per pixel */ return 1; /* Default is 1 byte per pixel */ }
static unsigned long long calibrate_tsc(void) { /* Set the Gate high, disable speaker */ outb((inb(0x61) & ~0x02) | 0x01, 0x61); /* * Now let's take care of CTC channel 2 * * Set the Gate high, program CTC channel 2 for mode 0, * (interrupt on terminal count mode), binary count, * load 5 * LATCH count, (LSB and MSB) to begin countdown. */ outb(0xb0, 0x43); /* binary, mode 0, LSB/MSB, Ch 2 */ outb(CALIBRATE_INTERVAL & 0xff, 0x42); /* LSB of count */ outb(CALIBRATE_INTERVAL >> 8, 0x42); /* MSB of count */ { tsc_t start; tsc_t end; unsigned long count; start = rdtsc(); count = 0; do { count++; } while ((inb(0x61) & 0x20) == 0); end = rdtsc(); /* Error: ECTCNEVERSET */ if (count <= 1) goto bad_ctc; /* 64-bit subtract - gcc just messes up with long longs */ __asm__("subl %2,%0\n\t" "sbbl %3,%1" :"=a" (end.lo), "=d" (end.hi) :"g" (start.lo), "g" (start.hi), "0" (end.lo), "1" (end.hi)); /* Error: ECPUTOOFAST */ if (end.hi) goto bad_ctc; /* Error: ECPUTOOSLOW */ if (end.lo <= CALIBRATE_DIVISOR) goto bad_ctc; return CEIL_DIV(end.lo, CALIBRATE_DIVISOR); } /* * The CTC wasn't reliable: we got a hit on the very first read, * or the CPU was so fast/slow that the quotient wouldn't fit in * 32 bits.. */ bad_ctc: printk(BIOS_ERR, "bad_ctc\n"); return 0; }
/* Helper function for ref_zn_array_unpack(). Inverse operation of ref_zn_array_pack_helper(); each output coefficient occupies ceil(b / ULONG_BITS) ulongs. Running time is soft-linear in output length. */ void ref_zn_array_unpack_helper (ulong* res, const mpz_t op, size_t n, unsigned b, unsigned k) { ZNP_ASSERT (n >= 1); ZNP_ASSERT (mpz_sizeinbase (op, 2) <= n * b + k); unsigned w = CEIL_DIV (b, ULONG_BITS); mpz_t y; mpz_init (y); if (n == 1) { // base case unsigned i; mpz_tdiv_q_2exp (y, op, k); for (i = 0; i < w; i++) { res[i] = mpz_get_ui (y); mpz_tdiv_q_2exp (y, y, ULONG_BITS); } } else { // recursively split into top and bottom halves mpz_tdiv_q_2exp (y, op, (n / 2) * b + k); ref_zn_array_unpack_helper (res + w * (n / 2), y, n - n / 2, b, 0); mpz_tdiv_r_2exp (y, op, (n / 2) * b + k); ref_zn_array_unpack_helper (res, y, n / 2, b, k); } mpz_clear (y); }
/** Create new cross-certification object to certify <b>ed_key</b> as the * master ed25519 identity key for the RSA identity key <b>rsa_key</b>. * Allocates and stores the encoded certificate in *<b>cert</b>, and returns * the number of bytes stored. Returns negative on error.*/ ssize_t tor_make_rsa_ed25519_crosscert(const ed25519_public_key_t *ed_key, const crypto_pk_t *rsa_key, time_t expires, uint8_t **cert) { uint8_t *res; rsa_ed_crosscert_t *cc = rsa_ed_crosscert_new(); memcpy(cc->ed_key, ed_key->pubkey, ED25519_PUBKEY_LEN); cc->expiration = (uint32_t) CEIL_DIV(expires, 3600); cc->sig_len = crypto_pk_keysize(rsa_key); rsa_ed_crosscert_setlen_sig(cc, crypto_pk_keysize(rsa_key)); ssize_t alloc_sz = rsa_ed_crosscert_encoded_len(cc); tor_assert(alloc_sz > 0); res = tor_malloc_zero(alloc_sz); ssize_t sz = rsa_ed_crosscert_encode(res, alloc_sz, cc); tor_assert(sz > 0 && sz <= alloc_sz); const int signed_part_len = 32 + 4; int siglen = crypto_pk_private_sign(rsa_key, (char*)rsa_ed_crosscert_getarray_sig(cc), rsa_ed_crosscert_getlen_sig(cc), (char*)res, signed_part_len); tor_assert(siglen > 0 && siglen <= (int)crypto_pk_keysize(rsa_key)); tor_assert(siglen <= UINT8_MAX); cc->sig_len = siglen; rsa_ed_crosscert_setlen_sig(cc, siglen); sz = rsa_ed_crosscert_encode(res, alloc_sz, cc); rsa_ed_crosscert_free(cc); *cert = res; return sz; }
/* See also: ia_css_dma_configure_from_info() */ static bool calculate_isys2401_dma_port_cfg( const input_system_cfg_t *isys_cfg, bool raw_packed, bool metadata, isys2401_dma_port_cfg_t *cfg) { int32_t bits_per_pixel; int32_t pixels_per_line; int32_t align_req_in_bytes; /* TODO: Move metadata away from isys_cfg to application layer */ if (metadata) { bits_per_pixel = isys_cfg->metadata.bits_per_pixel; pixels_per_line = isys_cfg->metadata.pixels_per_line; align_req_in_bytes = isys_cfg->metadata.align_req_in_bytes; } else { bits_per_pixel = isys_cfg->input_port_resolution.bits_per_pixel; pixels_per_line = isys_cfg->input_port_resolution.pixels_per_line; align_req_in_bytes = isys_cfg->input_port_resolution.align_req_in_bytes; } cfg->stride = calculate_stride(bits_per_pixel, pixels_per_line, raw_packed, align_req_in_bytes); if (!raw_packed) bits_per_pixel = CEIL_MUL(bits_per_pixel, 8); cfg->elements = HIVE_ISP_DDR_WORD_BITS / bits_per_pixel; cfg->cropping = 0; cfg->width = CEIL_DIV(cfg->stride, HIVE_ISP_DDR_WORD_BYTES); return true; }
static unsigned ia_css_elems_bytes_from_info (const struct ia_css_frame_info *info) { if (info->format == IA_CSS_FRAME_FORMAT_RGB565) return 2; /* 2 bytes per pixel */ return CEIL_DIV(info->raw_bit_depth,8); }
int faft_util_produce_phrase_input(ChoiceInfo *pci, AvailInfo *pai) { int i; int len = MAX_PHRASE_LEN * MAX_UTF8_SIZE + 1; char temp_s[MAX_PHRASE_LEN * MAX_UTF8_SIZE + 1]; faft_table_get_phrase_again_strn(temp_s, len); pci->nTotalChoice = 0; for (i = 0; i < MAX_CHOICE; i++ ) { strcpy(pci->totalChoiceStr[pci->nTotalChoice], temp_s); pci->nTotalChoice++; if (faft_table_get_phrase_next_strn(temp_s, len) == 0) break; } pai->avail[ 0 ].len = 1; pai->avail[ 0 ].id = -1; pai->nAvail = 1; pai->currentAvail = 0; pci->nChoicePerPage = 10; if (pci->nChoicePerPage > 10) { pci->nChoicePerPage = 10; } pci->nPage = CEIL_DIV( pci->nTotalChoice, pci->nChoicePerPage ); pci->pageNo = 0; /* To prvent cursor from changing by ChoiceEndChoice */ pci->isSymbol = 2; }
std::pair<size_t,size_t> Backend::computeDivision(size_t count) { size_t i; std::pair<size_t,size_t> result; result.first = CEIL_DIV(count, numComputeUnits); for (i = 1; result.first > maxThreadsPerBlock; i++) { result.first = CEIL_DIV(count, i * numComputeUnits); } size_t adjust = result.first % 32; result.first += adjust ? 32 - adjust : 0; result.second = std::min(i * numComputeUnits, CEIL_DIV(count, result.first)); return result; }
/* Reference implementation of zn_array_pack(). (doesn't take into account the s or r parameters) */ void ref_zn_array_pack (mp_limb_t* res, const ulong* op, size_t n, unsigned b, unsigned k) { mpz_t x; mpz_init (x); ref_zn_array_pack_helper (x, op, n, b, k); mpz_to_mpn (res, CEIL_DIV (n * b + k, GMP_NUMB_BITS), x); mpz_clear (x); }
/* Reference implementation of zn_array_unpack(). */ void ref_zn_array_unpack (ulong* res, const mp_limb_t* op, size_t n, unsigned b, unsigned k) { mpz_t x; mpz_init (x); mpn_to_mpz (x, op, CEIL_DIV (n * b + k, GMP_NUMB_BITS)); ref_zn_array_unpack_helper (res, x, n, b, k); mpz_clear (x); }
void ia_css_fpn_configure( const struct ia_css_binary *binary, const struct ia_css_frame_info *info) { struct ia_css_frame_info my_info = IA_CSS_BINARY_DEFAULT_FRAME_INFO; const struct ia_css_fpn_configuration config = { &my_info }; my_info.res.width = CEIL_DIV(info->res.width, 2); /* Packed by 2x */ my_info.res.height = info->res.height; my_info.padded_width = CEIL_DIV(info->padded_width, 2); /* Packed by 2x */ my_info.format = info->format; my_info.raw_bit_depth = FPN_BITS_PER_PIXEL; my_info.raw_bayer_order = info->raw_bayer_order; my_info.crop_info = info->crop_info; ia_css_configure_fpn(binary, &config); }
/* tests zn_array_unpack() once for given n, b, k */ int testcase_zn_array_unpack (size_t n, unsigned b, unsigned k) { size_t buf_size = CEIL_DIV (n * b + k, GMP_NUMB_BITS); size_t size = n * CEIL_DIV (b, ULONG_BITS); mp_limb_t* buf = (mp_limb_t*) malloc (sizeof (mp_limb_t) * buf_size); ulong* res = (ulong*) malloc (sizeof (ulong) * (size + 2)); ulong* ref = (ulong*) malloc (sizeof (ulong) * (size + 2)); // sentries to check buffer overflow res[0] = res[size + 1] = ref[0] = ref[size + 1] = 0x1234; // generate random data mpz_t x; mpz_init (x); mpz_urandomb (x, randstate, n * b); mpz_mul_2exp (x, x, k); mpz_to_mpn (buf, buf_size, x); mpz_clear (x); // run target and reference implementation zn_array_unpack (res + 1, buf, n, b, k); ref_zn_array_unpack (ref + 1, buf, n, b, k); int success = 1; // check sentries success = success && (res[0] == 0x1234); success = success && (ref[0] == 0x1234); success = success && (res[size + 1] == 0x1234); success = success && (ref[size + 1] == 0x1234); // check correct result success = success && (zn_array_cmp (res + 1, ref + 1, size) == 0); free (ref); free (res); free (buf); return success; }
void nrf_log_frontend_hexdump(uint8_t severity, char const * const p_str, const void * const p_data, uint16_t length) { #if (NRF_LOG_DEFERRED == 0) uint32_t timestamp = 0; #if NRF_LOG_USES_TIMESTAMP timestamp = m_log_data.timestamp_func(); #else //NRF_LOG_USES_TIMESTAMP (void) timestamp; #endif //NRF_LOG_USES_TIMESTAMP uint32_t curr_offset = 0; do { curr_offset = m_log_data.hexdump_handler(severity, NRF_LOG_USES_TIMESTAMP ? ×tamp : NULL, p_str, curr_offset, p_data, length, NULL, 0); } while (curr_offset < length); #else //(NRF_LOG_DEFERRED == 0) uint32_t mask = m_log_data.mask; uint32_t wr_idx; if (buf_prealloc(CEIL_DIV(length, 4) + 1, &wr_idx)) { HEXDUMP_HEADER_DEF(header, severity, length); m_log_data.buffer[wr_idx++ & mask] = header.raw; #if NRF_LOG_USES_TIMESTAMP m_log_data.buffer[wr_idx++ & mask] = m_log_data.timestamp_func(); #endif //NRF_LOG_USES_TIMESTAMP m_log_data.buffer[wr_idx++ & mask] = (uint32_t)p_str; uint32_t space0 = sizeof(uint32_t) * (m_log_data.mask + 1 - (wr_idx & mask)); if (length <= space0) { memcpy(&m_log_data.buffer[wr_idx & mask], p_data, length); } else { memcpy(&m_log_data.buffer[wr_idx & mask], p_data, space0); length -= space0; memcpy(&m_log_data.buffer[0], &((uint8_t *)p_data)[space0], length); } } #endif //(NRF_LOG_DEFERRED == 0) }
static chunk_t * b16_dec(const chunk_t *inp) { chunk_t *ch = chunk_new(CEIL_DIV(inp->len, 2)); int r = base16_decode((char *)ch->buf, ch->len, (char *)inp->buf, inp->len); if (r >= 0) { ch->len = r; } else { chunk_free(ch); } return ch; }
void ia_css_fpn_configure( const struct ia_css_binary *binary, const struct ia_css_frame_info *info) { const struct ia_css_frame_info my_info = { { CEIL_DIV(info->res.width, 2), /* Packed by 2x */ info->res.height }, CEIL_DIV(info->padded_width, 2), /* Packed by 2x */ info->format, FPN_BITS_PER_PIXEL, info->raw_bayer_order, { info->crop_info.start_column, info->crop_info.start_line } }; const struct ia_css_fpn_configuration config = { &my_info }; ia_css_configure_fpn(binary, &config); }
/** Create new cross-certification object to certify <b>ed_key</b> as the * master ed25519 identity key for the RSA identity key <b>rsa_key</b>. * Allocates and stores the encoded certificate in *<b>cert</b>, and returns * the number of bytes stored. Returns negative on error.*/ ssize_t tor_make_rsa_ed25519_crosscert(const ed25519_public_key_t *ed_key, const crypto_pk_t *rsa_key, time_t expires, uint8_t **cert) { // It is later than 1985, since otherwise there would be no C89 // compilers. (Try to diagnose #22466.) tor_assert_nonfatal(expires >= 15 * 365 * 86400); uint8_t *res; rsa_ed_crosscert_t *cc = rsa_ed_crosscert_new(); memcpy(cc->ed_key, ed_key->pubkey, ED25519_PUBKEY_LEN); cc->expiration = (uint32_t) CEIL_DIV(expires, 3600); cc->sig_len = crypto_pk_keysize(rsa_key); rsa_ed_crosscert_setlen_sig(cc, crypto_pk_keysize(rsa_key)); ssize_t alloc_sz = rsa_ed_crosscert_encoded_len(cc); tor_assert(alloc_sz > 0); res = tor_malloc_zero(alloc_sz); ssize_t sz = rsa_ed_crosscert_encode(res, alloc_sz, cc); tor_assert(sz > 0 && sz <= alloc_sz); crypto_digest_t *d = crypto_digest256_new(DIGEST_SHA256); crypto_digest_add_bytes(d, RSA_ED_CROSSCERT_PREFIX, strlen(RSA_ED_CROSSCERT_PREFIX)); const int signed_part_len = 32 + 4; crypto_digest_add_bytes(d, (char*)res, signed_part_len); uint8_t digest[DIGEST256_LEN]; crypto_digest_get_digest(d, (char*)digest, sizeof(digest)); crypto_digest_free(d); int siglen = crypto_pk_private_sign(rsa_key, (char*)rsa_ed_crosscert_getarray_sig(cc), rsa_ed_crosscert_getlen_sig(cc), (char*)digest, sizeof(digest)); tor_assert(siglen > 0 && siglen <= (int)crypto_pk_keysize(rsa_key)); tor_assert(siglen <= UINT8_MAX); cc->sig_len = siglen; rsa_ed_crosscert_setlen_sig(cc, siglen); sz = rsa_ed_crosscert_encode(res, alloc_sz, cc); rsa_ed_crosscert_free(cc); *cert = res; return sz; }
static uint32_t GetFreeIndex( MemPool* pPool, uint32_t levelDeep ) { if ( levelDeep < CEIL_DIV( pPool->numOfUnits, sizeof( uint32_t ) * CHAR_BIT ) ) { uint32_t bitField = *( pPool->pFreeBits + levelDeep ); uint32_t freeLocation = __builtin_ctz( bitField ); if ( freeLocation < 32 ) { return ( levelDeep * ( CHAR_BIT * sizeof( uint32_t ) ) + freeLocation ); } else { return GetFreeIndex( pPool, levelDeep + 1 ); } } else return levelDeep * ( CHAR_BIT * sizeof( uint32_t ) ); }
static void frame_init_raw_single_plane( struct ia_css_frame *frame, struct ia_css_frame_plane *plane, unsigned int height, unsigned int subpixels_per_line, unsigned int bits_per_pixel) { unsigned int stride; assert(frame != NULL); stride = HIVE_ISP_DDR_WORD_BYTES * CEIL_DIV(subpixels_per_line, HIVE_ISP_DDR_WORD_BITS / bits_per_pixel); frame->data_bytes = stride * height; frame_init_plane(plane, subpixels_per_line, stride, height, 0); return; }
/** * @brief API to clear data in blocks of persistent memory. */ uint32_t pstorage_raw_clear(pstorage_handle_t * p_dest, uint32_t size) { uint32_t retval; uint32_t pages; VERIFY_MODULE_INITIALIZED(); NULL_PARAM_CHECK(p_dest); MODULE_RAW_ID_RANGE_CHECK(p_dest); retval = NRF_SUCCESS; pages = CEIL_DIV(size, PSTORAGE_FLASH_PAGE_SIZE); retval = cmd_queue_enqueue(PSTORAGE_CLEAR_OP_CODE, p_dest, NULL , pages, 0); return retval; }
void ia_css_dma_configure_from_info( struct dma_port_config *config, const struct ia_css_frame_info *info) { unsigned is_raw_packed = info->format == IA_CSS_FRAME_FORMAT_RAW_PACKED; unsigned bits_per_pixel = is_raw_packed ? info->raw_bit_depth : ia_css_elems_bytes_from_info(info)*8; unsigned pix_per_ddrword = HIVE_ISP_DDR_WORD_BITS / bits_per_pixel; unsigned words_per_line = CEIL_DIV(info->padded_width, pix_per_ddrword); unsigned elems_b = pix_per_ddrword; config->stride = HIVE_ISP_DDR_WORD_BYTES * words_per_line; config->elems = elems_b; config->width = info->res.width; config->crop = 0; assert (config->width <= info->padded_width); }
uint32_t ble_error_log_write(uint32_t err_code, const uint8_t * p_message, uint16_t line_number) { uint8_t error_log_size; error_log_size = CEIL_DIV(sizeof(ble_error_log_data_t), sizeof(uint32_t)); m_ble_error_log.failure = true; m_ble_error_log.err_code = err_code; m_ble_error_log.line_number = line_number; strncpy((char *)m_ble_error_log.message, (const char *)p_message, ERROR_MESSAGE_LENGTH - 1); m_ble_error_log.message[ERROR_MESSAGE_LENGTH - 1] = '\0'; fetch_stack(&m_ble_error_log); return ble_flash_page_write(FLASH_PAGE_ERROR_LOG, (uint32_t *) &m_ble_error_log, error_log_size); }
/* tests zn_array_pack() once for given n, b, k */ int testcase_zn_array_pack (size_t n, unsigned b, unsigned k) { ZNP_ASSERT (b >= 1); ZNP_ASSERT (n >= 1); int success = 1; ulong* in = (ulong*) malloc (sizeof (ulong) * n); size_t size = CEIL_DIV (n * b + k, GMP_NUMB_BITS); mp_limb_t* res = (mp_limb_t*) malloc (sizeof (mp_limb_t) * (size + 2)); mp_limb_t* ref = (mp_limb_t*) malloc (sizeof (mp_limb_t) * (size + 2)); // sentries to check buffer overflow res[0] = res[size + 1] = ref[0] = ref[size + 1] = 0x1234; // generate random data: at most b bits per input coefficient, possibly less unsigned rand_bits = (b >= ULONG_BITS) ? ULONG_BITS : b; rand_bits = random_ulong (rand_bits) + 1; ulong max = (rand_bits == ULONG_BITS) ? ((ulong)(-1)) : ((1UL << rand_bits) - 1); size_t i; for (i = 0; i < n; i++) in[i] = random_ulong (max); // run target and reference implementation zn_array_pack (res + 1, in, n, 1, b, k, 0); ref_zn_array_pack (ref + 1, in, n, b, k); // check sentries success = success && (res[0] == 0x1234); success = success && (ref[0] == 0x1234); success = success && (res[size + 1] == 0x1234); success = success && (ref[size + 1] == 0x1234); // check correct result success = success && (mpn_cmp (res + 1, ref + 1, size) == 0); free (ref); free (res); free (in); return success; }
void SetTrp(DRAM_SYS_ATTR * DramAttr) { u8 Data; u16 Max, Tmp; u8 Socket; /*get the max Trp value from SPD data SPD Byte27, Bit7:2->1ns~63ns, Bit1:0->0ns, 0.25ns, 0.50ns, 0.75ns */ Max = 0; for (Socket = 0; Socket < MAX_SOCKETS; Socket++) { if (DramAttr->DimmInfo[Socket].bPresence) { Tmp = (u16) (DramAttr-> DimmInfo[Socket].SPDDataBuf[SPD_SDRAM_TRP]); if (Tmp > Max) Max = Tmp; } /*Calculate clock,this value should be 2T,3T,4T,5T */ } Tmp = (u16) CEIL_DIV(Max * 100, (DramAttr->DramCyc) << 2); PRINT_DEBUG_MEM("Trp = "); PRINT_DEBUG_MEM_HEX16(Tmp); PRINT_DEBUG_MEM("\r"); if (Tmp > MAX_TRP) Tmp = MAX_TRP; else if (Tmp < MIN_TRP) Tmp = MIN_TRP; Tmp -= 2; //00->2T, 01->3T, 10->4T, 11->5T Tmp <<= 1; //bit1,2,3 Data = pci_read_config8(MEMCTRL, 0x64); Data = (u8) ((Data & 0xf1) | (u8) Tmp); pci_write_config8(MEMCTRL, 0x64, Data); //enable DDR2 8-Bank Device Timing Constraint Data = pci_read_config8(MEMCTRL, 0x62); Data = (u8) ((Data & 0xf7) | 0x08); pci_write_config8(MEMCTRL, 0x62, Data); }
void common_rtc_set_interrupt(uint32_t us_timestamp, uint32_t cc_channel, uint32_t int_mask) { // The internal counter is clocked with a frequency that cannot be easily // multiplied to 1 MHz, therefore besides the translation of values // (microsecond <-> ticks) a special care of overflows handling must be // taken. Here the 32-bit timestamp value is complemented with information // about current the system up time of (ticks + number of overflows of tick // counter on upper bits, converted to microseconds), and such 64-bit value // is then translated to counter ticks. Finally, the lower 24 bits of thus // calculated value is written to the counter compare register to prepare // the interrupt generation. uint64_t current_time64 = common_rtc_64bit_us_get(); // [add upper 32 bits from the current time to the timestamp value] uint64_t timestamp64 = us_timestamp + (current_time64 & ~(uint64_t)0xFFFFFFFF); // [if the original timestamp value happens to be after the 32 bit counter // of microsends overflows, correct the upper 32 bits accordingly] if (us_timestamp < (uint32_t)(current_time64 & 0xFFFFFFFF)) { timestamp64 += ((uint64_t)1 << 32); } // [microseconds -> ticks, always round the result up to avoid too early // interrupt generation] uint32_t compare_value = (uint32_t)CEIL_DIV((timestamp64) * RTC_INPUT_FREQ, 1000000); // The COMPARE event occurs when the value in compare register is N and // the counter value changes from N-1 to N. Therefore, the minimal safe // difference between the compare value to be set and the current counter // value is 2 ticks. This guarantees that the compare trigger is properly // setup before the compare condition occurs. uint32_t closest_safe_compare = common_rtc_32bit_ticks_get() + 2; if ((int)(compare_value - closest_safe_compare) <= 0) { compare_value = closest_safe_compare; } nrf_rtc_cc_set(COMMON_RTC_INSTANCE, cc_channel, RTC_WRAP(compare_value)); nrf_rtc_event_enable(COMMON_RTC_INSTANCE, int_mask); }
uint32_t nrf_log_push(char * const p_str) { #if (NRF_LOG_DEFERRED == 0) return (uint32_t)p_str; #else //(NRF_LOG_DEFERRED == 0) uint32_t mask = m_log_data.mask; uint32_t slen = strlen(p_str) + 1; uint32_t buflen = CEIL_DIV(slen, 4); uint32_t offset = 0; uint32_t wr_idx; char * p_dst_str = (char *)cont_buf_prealloc(buflen, &offset, &wr_idx); if (p_dst_str) { PUSHED_HEADER_DEF(header, offset, buflen); m_log_data.buffer[wr_idx++ & mask] = header.raw; memcpy(p_dst_str, p_str, slen); } return (uint32_t)p_dst_str; #endif //(NRF_LOG_DEFERRED == 0) }
uint32_t ble_error_log_read(ble_error_log_data_t * error_log) { uint8_t error_log_size = CEIL_DIV(sizeof(ble_error_log_data_t), sizeof(uint32_t)); uint32_t err_code = NRF_SUCCESS; err_code = ble_flash_page_read(FLASH_PAGE_ERROR_LOG, (uint32_t *) error_log, &error_log_size); // If nothing is in flash; then return NRF_SUCCESS. if (err_code == NRF_ERROR_NOT_FOUND) { return NRF_SUCCESS; } if (err_code != NRF_SUCCESS) { return err_code; } if (!error_log->failure) { return NRF_SUCCESS; } nrf_gpio_pin_set(LOG_LED_PIN_NO); // Notify that a message exists in the log. while (error_log->failure && !m_ble_log_clear_flag) { // Put breakpoint, and read data, then log->failure=false; to continue in debug mode. // In application, define how to clear the error log, // e.g. read button 6, if pressed, then clear log and continue. } nrf_gpio_pin_clear(LOG_LED_PIN_NO); err_code = ble_flash_page_erase(FLASH_PAGE_ERROR_LOG); return err_code; }