/** * \internal * \brief Non-inline uart_baud_rate_is_valid() with fewer parameters * * This function is called via an inline wrapper to preserve the * standard API while eliminating some unnecessary argument setup * overhead. */ bool uart_priv_baud_rate_is_valid(uint8_t flags, uint32_t rate) { uint32_t max_rate; uint32_t min_rate; max_rate = CONFIG_CPU_HZ / 8; min_rate = div_ceil(CONFIG_CPU_HZ, 128UL * 8 * 4096); if (!(flags & USART_BIT(CLK2X))) { max_rate /= 2; min_rate = div_ceil(min_rate, 2); } return rate <= max_rate && rate >= min_rate; }
int16_t getBaudDiv(const uint32_t baudrate, uint32_t pb_hz) { uint32_t baudDiv = div_ceil(pb_hz, baudrate); if (baudDiv <= 0 || baudDiv > 255) { return -1; } return baudDiv; }
int16_t getBaudDiv(const unsigned int baudrate, uint32_t pb_hz) { int baudDiv = div_ceil((pb_hz + baudrate / 2), baudrate); if (baudDiv <= 0 || baudDiv > 255) { return -1; } return baudDiv; }
Bitmask::Bitmask (const uint16_t symbols) : _max_nonrepair (symbols) { holes = _max_nonrepair; size_t max_element = static_cast<size_t> (div_ceil (_max_nonrepair, sizeof(size_t))); mask.reserve (max_element + 1); for (size_t i = 0; i <= max_element; ++i) mask.push_back (0); }
array(size_t _size, uint_t _key_len, uint_t _val_len, uint_t _reprobe_limit, size_t *_reprobes) : lsize(ceilLog2(_size)), size(((size_t)1) << lsize), size_mask(size - 1), reprobe_limit(_reprobe_limit, _reprobes, size), key_len(_key_len), key_mask(key_len <= lsize ? 0 : (((word)1) << (key_len - lsize)) - 1), key_off(key_len <= lsize ? 0 : key_len - lsize), offsets(key_off + bitsize(reprobe_limit.val() + 1), _val_len, reprobe_limit.val() + 1), mem_block(div_ceil(size, (size_t)offsets.get_block_len()) * offsets.get_block_word_len() * sizeof(word)), data((word *)mem_block.get_ptr()), reprobes(_reprobes), hash_matrix(key_len), hash_inverse_matrix(hash_matrix.init_random_inverse()) { if(!data) eraise(ErrorAllocation) << "Failed to allocate " << (div_ceil(size, (size_t)offsets.get_block_len()) * offsets.get_block_word_len() * sizeof(word)) << " bytes of memory"; }
array(size_t _size, uint_t _key_len, uint_t _val_len, uint_t _reprobe_limit, size_t *_reprobes) : size(((size_t)1) << ceilLog2(_size)), size_mask(size - 1), reprobe_limit(_reprobe_limit), offsets(_key_len, _val_len, _reprobe_limit), mem_block(div_ceil(size, (size_t)offsets.get_block_len()) * offsets.get_block_word_len() * sizeof(word)), data((word *)mem_block.get_ptr()), zero_count(0), reprobes(_reprobes) { if(!data) { // TODO: should throw an error std::cerr << "allocation failed"; } }
/** * Converts blktrace blocks to PV extents * * @param[in,out] offset block where the IO started * @param[in,out] len number of blocks in IO * @param[out] extent converted block to extent number * @param ssize sector size (in bytes) * @param esize extent size (in bytes) * @return 0 if whole IO fits into extent, 1 if function needs to be run again */ int trace_blocks_to_extents(int64_t *offset, int64_t *len, int64_t *extent, size_t ssize, size_t esize) { assert(offset); assert(*offset >= 0); assert(len); assert(*len > 0); assert(extent); assert(ssize > 0); assert(esize > 0); int64_t s_in_e = div_ceil(esize, ssize); *extent = *offset / s_in_e; if ((*offset) / s_in_e == (*offset + *len - 1) / s_in_e) { return 0; } *len -= (*extent + 1) * s_in_e - *offset - 1; *offset += (*extent + 1) * s_in_e - *offset; return 1; }
static int ne_write(const char *path, const char *buf, size_t size, off_t offset, struct fuse_file_info *fi) { static ne_write_res res; ne_write_arg arg; int stat, i, chunksize; char *output; (void) fi; memset((char *)&res, 0, sizeof(res)); arg.path = strdup(path); // arg.size = size; arg.offset = offset; // arg.buf = strdup(buf); chunksize = div_ceil(size, K); arg.size = chunksize; output = encode(buf, K, M, chunksize); for (i = 0; i < M; i++) { arg.buf = strndup(output + (i * chunksize), chunksize); stat = cs_write(arg, &res, slave[i]); } //TODO: //staterr&xdrfree // size = res.res; //test = size; free(output); dirty = 1; return size; }
static void frag_report(const char *filename) { struct statfs fsinfo; #ifdef HAVE_FSTAT64 struct stat64 fileinfo; #else struct stat fileinfo; #endif int bs; long fd; unsigned long block, last_block = 0, numblocks, i, count; long bpib; /* Blocks per indirect block */ long cylgroups; int discont = 0, expected; int is_ext2 = 0; unsigned int flags; if (statfs(filename, &fsinfo) < 0) { perror("statfs"); return; } #ifdef HAVE_FSTAT64 if (stat64(filename, &fileinfo) < 0) { #else if (stat(filename, &fileinfo) < 0) { #endif perror("stat"); return; } if (!S_ISREG(fileinfo.st_mode)) { printf("%s: Not a regular file\n", filename); return; } if ((fsinfo.f_type == 0xef51) || (fsinfo.f_type == 0xef52) || (fsinfo.f_type == 0xef53)) is_ext2++; if (verbose) { printf("Filesystem type is: %lx\n", (unsigned long) fsinfo.f_type); } cylgroups = div_ceil(fsinfo.f_blocks, fsinfo.f_bsize*8); if (verbose) { printf("Filesystem cylinder groups is approximately %ld\n", cylgroups); } #ifdef HAVE_OPEN64 fd = open64(filename, O_RDONLY); #else fd = open(filename, O_RDONLY); #endif if (fd < 0) { perror("open"); return; } if (ioctl(fd, FIGETBSZ, &bs) < 0) { /* FIGETBSZ takes an int */ perror("FIGETBSZ"); close(fd); return; } if (ioctl(fd, EXT3_IOC_GETFLAGS, &flags) < 0) flags = 0; if (flags & EXT4_EXTENTS_FL) { if (verbose) printf("File is stored in extents format\n"); is_ext2 = 0; } if (verbose) printf("Blocksize of file %s is %d\n", filename, bs); bpib = bs / 4; numblocks = (fileinfo.st_size + (bs-1)) / bs; if (verbose) { printf("File size of %s is %lld (%ld blocks)\n", filename, (long long) fileinfo.st_size, numblocks); printf("First block: %lu\nLast block: %lu\n", get_bmap(fd, 0), get_bmap(fd, numblocks - 1)); } for (i=0, count=0; i < numblocks; i++) { if (is_ext2 && last_block) { if (((i-EXT2_DIRECT) % bpib) == 0) last_block++; if (((i-EXT2_DIRECT-bpib) % (bpib*bpib)) == 0) last_block++; if (((i-EXT2_DIRECT-bpib-bpib*bpib) % (bpib*bpib*bpib)) == 0) last_block++; } block = get_bmap(fd, i); if (block == 0) continue; count++; if (last_block && (block != last_block +1) ) { if (verbose) printf("Discontinuity: Block %ld is at %lu (was %lu)\n", i, block, last_block); discont++; } last_block = block; } if (discont==0) printf("%s: 1 extent found", filename); else printf("%s: %d extents found", filename, discont+1); expected = (count/((bs*8)-(fsinfo.f_files/8/cylgroups)-3))+1; if (is_ext2 && expected < discont+1) printf(", perfection would be %d extent%s\n", expected, (expected>1) ? "s" : ""); else fputc('\n', stdout); close(fd); } static void usage(const char *progname) { fprintf(stderr, "Usage: %s [-v] file ...\n", progname); exit(1); }
/** * drbd_al_read_log() - Restores the activity log from its on disk representation. * @mdev: DRBD device. * @bdev: Block device to read form. * * Returns 1 on success, returns 0 when reading the log failed due to IO errors. */ int drbd_al_read_log(struct drbd_conf *mdev, struct drbd_backing_dev *bdev) { struct al_transaction *buffer; int i; int rv; int mx; int active_extents = 0; int transactions = 0; int found_valid = 0; int from = 0; int to = 0; u32 from_tnr = 0; u32 to_tnr = 0; u32 cnr; mx = div_ceil(mdev->act_log->nr_elements, AL_EXTENTS_PT); /* lock out all other meta data io for now, * and make sure the page is mapped. */ mutex_lock(&mdev->md_io_mutex); buffer = page_address(mdev->md_io_page); /* Find the valid transaction in the log */ for (i = 0; i <= mx; i++) { rv = drbd_al_read_tr(mdev, bdev, buffer, i); if (rv == 0) continue; if (rv == -1) { mutex_unlock(&mdev->md_io_mutex); return 0; } cnr = be32_to_cpu(buffer->tr_number); if (++found_valid == 1) { from = i; to = i; from_tnr = cnr; to_tnr = cnr; continue; } if ((int)cnr - (int)from_tnr < 0) { D_ASSERT(from_tnr - cnr + i - from == mx+1); from = i; from_tnr = cnr; } if ((int)cnr - (int)to_tnr > 0) { D_ASSERT(cnr - to_tnr == i - to); to = i; to_tnr = cnr; } } if (!found_valid) { dev_warn(DEV, "No usable activity log found.\n"); mutex_unlock(&mdev->md_io_mutex); return 1; } /* Read the valid transactions. * dev_info(DEV, "Reading from %d to %d.\n",from,to); */ i = from; while (1) { int j, pos; unsigned int extent_nr; unsigned int trn; rv = drbd_al_read_tr(mdev, bdev, buffer, i); ERR_IF(rv == 0) goto cancel; if (rv == -1) { mutex_unlock(&mdev->md_io_mutex); return 0; } trn = be32_to_cpu(buffer->tr_number); spin_lock_irq(&mdev->al_lock); /* This loop runs backwards because in the cyclic elements there might be an old version of the updated element (in slot 0). So the element in slot 0 can overwrite old versions. */ for (j = AL_EXTENTS_PT; j >= 0; j--) { pos = be32_to_cpu(buffer->updates[j].pos); extent_nr = be32_to_cpu(buffer->updates[j].extent); if (extent_nr == LC_FREE) continue; lc_set(mdev->act_log, extent_nr, pos); active_extents++; } spin_unlock_irq(&mdev->al_lock); transactions++; cancel: if (i == to) break; i++; if (i > mx) i = 0; } mdev->al_tr_number = to_tnr+1; mdev->al_tr_pos = to; if (++mdev->al_tr_pos > div_ceil(mdev->act_log->nr_elements, AL_EXTENTS_PT)) mdev->al_tr_pos = 0; /* ok, we are done with it */ mutex_unlock(&mdev->md_io_mutex); dev_info(DEV, "Found %d transactions (%d active extents) in activity log.\n", transactions, active_extents); return 1; }
int w_al_write_transaction(struct drbd_conf *mdev, struct drbd_work *w, int unused) { struct update_al_work *aw = container_of(w, struct update_al_work, w); struct lc_element *updated = aw->al_ext; const unsigned int new_enr = aw->enr; const unsigned int evicted = aw->old_enr; struct al_transaction *buffer; sector_t sector; int i, n, mx; unsigned int extent_nr; u32 xor_sum = 0; if (!get_ldev(mdev)) { dev_err(DEV, "disk is %s, cannot start al transaction (-%d +%d)\n", drbd_disk_str(mdev->state.disk), evicted, new_enr); complete(&((struct update_al_work *)w)->event); return 1; } /* do we have to do a bitmap write, first? * TODO reduce maximum latency: * submit both bios, then wait for both, * instead of doing two synchronous sector writes. * For now, we must not write the transaction, * if we cannot write out the bitmap of the evicted extent. */ if (mdev->state.conn < C_CONNECTED && evicted != LC_FREE) drbd_bm_write_page(mdev, al_extent_to_bm_page(evicted)); /* The bitmap write may have failed, causing a state change. */ if (mdev->state.disk < D_INCONSISTENT) { dev_err(DEV, "disk is %s, cannot write al transaction (-%d +%d)\n", drbd_disk_str(mdev->state.disk), evicted, new_enr); complete(&((struct update_al_work *)w)->event); put_ldev(mdev); return 1; } mutex_lock(&mdev->md_io_mutex); /* protects md_io_buffer, al_tr_cycle, ... */ buffer = (struct al_transaction *)page_address(mdev->md_io_page); buffer->magic = __constant_cpu_to_be32(DRBD_MAGIC); buffer->tr_number = cpu_to_be32(mdev->al_tr_number); n = lc_index_of(mdev->act_log, updated); buffer->updates[0].pos = cpu_to_be32(n); buffer->updates[0].extent = cpu_to_be32(new_enr); xor_sum ^= new_enr; mx = min_t(int, AL_EXTENTS_PT, mdev->act_log->nr_elements - mdev->al_tr_cycle); for (i = 0; i < mx; i++) { unsigned idx = mdev->al_tr_cycle + i; extent_nr = lc_element_by_index(mdev->act_log, idx)->lc_number; buffer->updates[i+1].pos = cpu_to_be32(idx); buffer->updates[i+1].extent = cpu_to_be32(extent_nr); xor_sum ^= extent_nr; } for (; i < AL_EXTENTS_PT; i++) { buffer->updates[i+1].pos = __constant_cpu_to_be32(-1); buffer->updates[i+1].extent = __constant_cpu_to_be32(LC_FREE); xor_sum ^= LC_FREE; } mdev->al_tr_cycle += AL_EXTENTS_PT; if (mdev->al_tr_cycle >= mdev->act_log->nr_elements) mdev->al_tr_cycle = 0; buffer->xor_sum = cpu_to_be32(xor_sum); sector = mdev->ldev->md.md_offset + mdev->ldev->md.al_offset + mdev->al_tr_pos; if (!drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) drbd_chk_io_error(mdev, 1, true); if (++mdev->al_tr_pos > div_ceil(mdev->act_log->nr_elements, AL_EXTENTS_PT)) mdev->al_tr_pos = 0; D_ASSERT(mdev->al_tr_pos < MD_AL_MAX_SIZE); mdev->al_tr_number++; mutex_unlock(&mdev->md_io_mutex); complete(&((struct update_al_work *)w)->event); put_ldev(mdev); return 1; }
static void frag_report(const char *filename) { struct statfs fsinfo; #ifdef HAVE_FSTAT64 struct stat64 fileinfo; #else struct stat fileinfo; #endif int bs; long fd; unsigned long block, last_block = 0, numblocks, i, count; long bpib; /* Blocks per indirect block */ long cylgroups; int num_extents = 0, expected; int is_ext2 = 0; static int once = 1; unsigned int flags; int rc; #ifdef HAVE_OPEN64 fd = open64(filename, O_RDONLY); #else fd = open(filename, O_RDONLY); #endif if (fd < 0) { perror("open"); return; } if (statfs(filename, &fsinfo) < 0) { perror("statfs"); return; } #ifdef HAVE_FSTAT64 if (stat64(filename, &fileinfo) < 0) { #else if (stat(filename, &fileinfo) < 0) { #endif perror("stat"); return; } if (ioctl(fd, EXT3_IOC_GETFLAGS, &flags) < 0) flags = 0; if (!(flags & EXT4_EXTENTS_FL) && ((fsinfo.f_type == 0xef51) || (fsinfo.f_type == 0xef52) || (fsinfo.f_type == 0xef53))) is_ext2++; if (verbose && once) printf("Filesystem type is: %lx\n", (unsigned long) fsinfo.f_type); cylgroups = div_ceil(fsinfo.f_blocks, fsinfo.f_bsize*8); if (verbose && is_ext2 && once) printf("Filesystem cylinder groups is approximately %ld\n", cylgroups); physical_width = int_log10(fsinfo.f_blocks); if (physical_width < 8) physical_width = 8; if (ioctl(fd, FIGETBSZ, &bs) < 0) { /* FIGETBSZ takes an int */ perror("FIGETBSZ"); close(fd); return; } if (no_bs) bs = 1024; bpib = bs / 4; numblocks = (fileinfo.st_size + (bs-1)) / bs; logical_width = int_log10(numblocks); if (logical_width < 7) logical_width = 7; filesize = (long long)fileinfo.st_size; if (verbose) printf("File size of %s is %lld (%ld block%s, blocksize %d)\n", filename, (long long) fileinfo.st_size, numblocks, numblocks == 1 ? "" : "s", bs); if (force_bmap || filefrag_fiemap(fd, int_log2(bs), &num_extents) != 0) { for (i = 0, count = 0; i < numblocks; i++) { if (is_ext2 && last_block) { if (((i-EXT2_DIRECT) % bpib) == 0) last_block++; if (((i-EXT2_DIRECT-bpib) % (bpib*bpib)) == 0) last_block++; if (((i-EXT2_DIRECT-bpib-bpib*bpib) % (bpib*bpib*bpib)) == 0) last_block++; } rc = get_bmap(fd, i, &block); if (block == 0) continue; if (!num_extents) num_extents++; count++; if (last_block && (block != last_block+1) ) { if (verbose) printf("Discontinuity: Block %ld is at " "%lu (was %lu)\n", i, block, last_block+1); num_extents++; } last_block = block; } } if (num_extents == 1) printf("%s: 1 extent found", filename); else printf("%s: %d extents found", filename, num_extents); expected = (count/((bs*8)-(fsinfo.f_files/8/cylgroups)-3))+1; if (is_ext2 && expected < num_extents) printf(", perfection would be %d extent%s\n", expected, (expected>1) ? "s" : ""); else fputc('\n', stdout); close(fd); once = 0; } static void usage(const char *progname) { fprintf(stderr, "Usage: %s [-Bbvsx] file ...\n", progname); exit(1); }
static int rsa_provable_prime (mpz_t p, unsigned *prime_seed_length, void *prime_seed, unsigned bits, unsigned seed_length, const void *seed, mpz_t e, void *progress_ctx, nettle_progress_func * progress) { mpz_t x, t, s, r1, r2, p0, sq; int ret; unsigned pcounter = 0; unsigned iterations; unsigned storage_length = 0, i; uint8_t *storage = NULL; uint8_t pseed[MAX_PVP_SEED_SIZE+1]; unsigned pseed_length = sizeof(pseed), tseed_length; unsigned max = bits*5; mpz_init(p0); mpz_init(sq); mpz_init(x); mpz_init(t); mpz_init(s); mpz_init(r1); mpz_init(r2); /* p1 = p2 = 1 */ ret = st_provable_prime(p0, &pseed_length, pseed, NULL, 1+div_ceil(bits,2), seed_length, seed, progress_ctx, progress); if (ret == 0) { goto cleanup; } iterations = div_ceil(bits, DIGEST_SIZE*8); mpz_set_ui(x, 0); if (iterations > 0) { storage_length = iterations * DIGEST_SIZE; storage = malloc(storage_length); if (storage == NULL) { goto fail; } nettle_mpz_set_str_256_u(s, pseed_length, pseed); for (i = 0; i < iterations; i++) { tseed_length = mpz_seed_sizeinbase_256_u(s, pseed_length); if (tseed_length > sizeof(pseed)) goto fail; nettle_mpz_get_str_256(tseed_length, pseed, s); hash(&storage[(iterations - i - 1) * DIGEST_SIZE], tseed_length, pseed); mpz_add_ui(s, s, 1); } nettle_mpz_set_str_256_u(x, storage_length, storage); } /* x = sqrt(2)*2^(bits-1) + (x mod 2^(bits) - sqrt(2)*2(bits-1)) */ /* sq = sqrt(2)*2^(bits-1) */ mpz_set_ui(r1, 1); mpz_mul_2exp(r1, r1, 2*bits-1); mpz_sqrt(sq, r1); /* r2 = 2^bits - sq */ mpz_set_ui(r2, 1); mpz_mul_2exp(r2, r2, bits); mpz_sub(r2, r2, sq); /* x = sqrt(2)*2^(bits-1) + (x mod (2^L - sqrt(2)*2^(bits-1)) */ mpz_mod(x, x, r2); mpz_add(x, x, sq); /* y = p2 = p1 = 1 */ /* r1 = (2 y p0 p1) */ mpz_mul_2exp(r1, p0, 1); /* r2 = 2 p0 p1 p2 (p2=y=1) */ mpz_set(r2, r1); /* r1 = (2 y p0 p1) + x */ mpz_add(r1, r1, x); /* t = ((2 y p0 p1) + x) / (2 p0 p1 p2) */ mpz_cdiv_q(t, r1, r2); retry: /* p = t p2 - y = t - 1 */ mpz_sub_ui(p, t, 1); /* p = 2(tp2-y)p0p1 */ mpz_mul(p, p, p0); mpz_mul_2exp(p, p, 1); /* p = 2(tp2-y)p0p1 + 1*/ mpz_add_ui(p, p, 1); mpz_set_ui(r2, 1); mpz_mul_2exp(r2, r2, bits); if (mpz_cmp(p, r2) > 0) { /* t = (2 y p0 p1) + sqrt(2)*2^(bits-1) / (2p0p1p2) */ mpz_set(r1, p0); /* r1 = (2 y p0 p1) */ mpz_mul_2exp(r1, r1, 1); /* sq = sqrt(2)*2^(bits-1) */ /* r1 = (2 y p0 p1) + sq */ mpz_add(r1, r1, sq); /* r2 = 2 p0 p1 p2 */ mpz_mul_2exp(r2, p0, 1); /* t = ((2 y p0 p1) + sq) / (2 p0 p1 p2) */ mpz_cdiv_q(t, r1, r2); } pcounter++; /* r2 = p - 1 */ mpz_sub_ui(r2, p, 1); /* r1 = GCD(p1, e) */ mpz_gcd(r1, e, r2); if (mpz_cmp_ui(r1, 1) == 0) { mpz_set_ui(x, 0); /* a = 0 */ if (iterations > 0) { for (i = 0; i < iterations; i++) { tseed_length = mpz_seed_sizeinbase_256_u(s, pseed_length); if (tseed_length > sizeof(pseed)) goto fail; nettle_mpz_get_str_256(tseed_length, pseed, s); hash(&storage[(iterations - i - 1) * DIGEST_SIZE], tseed_length, pseed); mpz_add_ui(s, s, 1); } nettle_mpz_set_str_256_u(x, storage_length, storage); } /* a = 2 + a mod p-3 */ mpz_sub_ui(r1, p, 3); /* p is too large to worry about negatives */ mpz_mod(x, x, r1); mpz_add_ui(x, x, 2); /* z = a^(2(tp2-y)p1) mod p */ /* r1 = (tp2-y) */ mpz_sub_ui(r1, t, 1); /* r1 = 2(tp2-y)p1 */ mpz_mul_2exp(r1, r1, 1); /* z = r2 = a^r1 mod p */ mpz_powm(r2, x, r1, p); mpz_sub_ui(r1, r2, 1); mpz_gcd(x, r1, p); if (mpz_cmp_ui(x, 1) == 0) { mpz_powm(r1, r2, p0, p); if (mpz_cmp_ui(r1, 1) == 0) { if (prime_seed_length != NULL) { tseed_length = mpz_seed_sizeinbase_256_u(s, pseed_length); if (tseed_length > sizeof(pseed)) goto fail; nettle_mpz_get_str_256(tseed_length, pseed, s); if (*prime_seed_length < tseed_length) { *prime_seed_length = tseed_length; goto fail; } *prime_seed_length = tseed_length; if (prime_seed != NULL) memcpy(prime_seed, pseed, tseed_length); } ret = 1; goto cleanup; } } } if (pcounter >= max) { goto fail; } mpz_add_ui(t, t, 1); goto retry; fail: ret = 0; cleanup: free(storage); mpz_clear(p0); mpz_clear(sq); mpz_clear(r1); mpz_clear(r2); mpz_clear(x); mpz_clear(t); mpz_clear(s); return ret; }
/** * \internal Sets configurations to module * * \param[out] module Pointer to software module structure * \param[in] config Configuration structure with configurations to set * * \return Status of setting configuration. * \retval STATUS_OK If module was configured correctly * \retval STATUS_ERR_ALREADY_INITIALIZED If setting other GCLK generator than * previously set * \retval STATUS_ERR_BAUDRATE_UNAVAILABLE If given baudrate is not compatible * with set GCLK frequency */ static enum status_code _i2c_master_set_config( struct i2c_master_module *const module, const struct i2c_master_config *const config) { /* Sanity check arguments. */ Assert(module); Assert(module->hw); Assert(config); /* Temporary variables. */ uint32_t tmp_ctrla; int32_t tmp_baud; int32_t tmp_baud_hs; enum status_code tmp_status_code = STATUS_OK; SercomI2cm *const i2c_module = &(module->hw->I2CM); Sercom *const sercom_hw = module->hw; uint8_t sercom_index = _sercom_get_sercom_inst_index(sercom_hw); /* Pin configuration */ struct system_pinmux_config pin_conf; system_pinmux_get_config_defaults(&pin_conf); uint32_t pad0 = config->pinmux_pad0; uint32_t pad1 = config->pinmux_pad1; /* SERCOM PAD0 - SDA */ if (pad0 == PINMUX_DEFAULT) { pad0 = _sercom_get_default_pad(sercom_hw, 0); } pin_conf.mux_position = pad0 & 0xFFFF; pin_conf.direction = SYSTEM_PINMUX_PIN_DIR_OUTPUT_WITH_READBACK; system_pinmux_pin_set_config(pad0 >> 16, &pin_conf); /* SERCOM PAD1 - SCL */ if (pad1 == PINMUX_DEFAULT) { pad1 = _sercom_get_default_pad(sercom_hw, 1); } pin_conf.mux_position = pad1 & 0xFFFF; pin_conf.direction = SYSTEM_PINMUX_PIN_DIR_OUTPUT_WITH_READBACK; system_pinmux_pin_set_config(pad1 >> 16, &pin_conf); /* Save timeout on unknown bus state in software module. */ module->unknown_bus_state_timeout = config->unknown_bus_state_timeout; /* Save timeout on buffer write. */ module->buffer_timeout = config->buffer_timeout; /* Set whether module should run in standby. */ if (config->run_in_standby || system_is_debugger_present()) { tmp_ctrla = SERCOM_I2CM_CTRLA_RUNSTDBY; } else { tmp_ctrla = 0; } /* Check and set start data hold timeout. */ if (config->start_hold_time != I2C_MASTER_START_HOLD_TIME_DISABLED) { tmp_ctrla |= config->start_hold_time; } /* Check and set transfer speed */ tmp_ctrla |= config->transfer_speed; /* Check and set SCL low timeout. */ if (config->scl_low_timeout) { tmp_ctrla |= SERCOM_I2CM_CTRLA_LOWTOUTEN; } /* Check and set inactive bus timeout. */ if (config->inactive_timeout != I2C_MASTER_INACTIVE_TIMEOUT_DISABLED) { tmp_ctrla |= config->inactive_timeout; } /* Check and set SCL clock stretch mode. */ if (config->scl_stretch_only_after_ack_bit) { tmp_ctrla |= SERCOM_I2CM_CTRLA_SCLSM; } /* Check and set slave SCL low extend timeout. */ if (config->slave_scl_low_extend_timeout) { tmp_ctrla |= SERCOM_I2CM_CTRLA_SEXTTOEN; } /* Check and set master SCL low extend timeout. */ if (config->master_scl_low_extend_timeout) { tmp_ctrla |= SERCOM_I2CM_CTRLA_MEXTTOEN; } /* Write config to register CTRLA. */ i2c_module->CTRLA.reg |= tmp_ctrla; /* Set configurations in CTRLB. */ i2c_module->CTRLB.reg = SERCOM_I2CM_CTRLB_SMEN; /* Find and set baudrate. */ tmp_baud = (int32_t)(div_ceil( system_gclk_chan_get_hz(SERCOM0_GCLK_ID_CORE + sercom_index), (2000*(config->baud_rate))) - 5); /* Check that baudrate is supported at current speed. */ if (tmp_baud > 255 || tmp_baud < 0) { /* Baud rate not supported. */ tmp_status_code = STATUS_ERR_BAUDRATE_UNAVAILABLE; } else { /* Find baudrate for high speed */ tmp_baud_hs = (int32_t)(div_ceil( system_gclk_chan_get_hz(SERCOM0_GCLK_ID_CORE + sercom_index), (2000*(config->baud_rate_high_speed))) - 1); /* Check that baudrate is supported at current speed. */ if (tmp_baud_hs > 255 || tmp_baud_hs < 0) { /* Baud rate not supported. */ tmp_status_code = STATUS_ERR_BAUDRATE_UNAVAILABLE; } } if (tmp_status_code != STATUS_ERR_BAUDRATE_UNAVAILABLE) { /* Baud rate acceptable. */ i2c_module->BAUD.reg = SERCOM_I2CM_BAUD_BAUD(tmp_baud) | SERCOM_I2CM_BAUD_HSBAUD(tmp_baud_hs); } return tmp_status_code; }
l_gadget(protoboard<FieldT> &pb) : gadget<FieldT>(pb, "l_gadget") { // Allocate space for the verifier input. const size_t input_size_in_bits = sha256_digest_len * 3; { // We use a "multipacking" technique which allows us to constrain // the input bits in as few field elements as possible. const size_t input_size_in_field_elements = div_ceil(input_size_in_bits, FieldT::capacity()); input_as_field_elements.allocate(pb, input_size_in_field_elements, "input_as_field_elements"); this->pb.set_input_sizes(input_size_in_field_elements); } zero.allocate(this->pb, FMT(this->annotation_prefix, "zero")); // SHA256's length padding for (size_t i = 0; i < 256; i++) { if (sha256_padding[i]) padding_var.emplace_back(ONE); else padding_var.emplace_back(zero); } // Verifier (and prover) inputs: h1_var.reset(new digest_variable<FieldT>(pb, sha256_digest_len, "h1")); h2_var.reset(new digest_variable<FieldT>(pb, sha256_digest_len, "h2")); x_var.reset(new digest_variable<FieldT>(pb, sha256_digest_len, "x")); input_as_bits.insert(input_as_bits.end(), h1_var->bits.begin(), h1_var->bits.end()); input_as_bits.insert(input_as_bits.end(), h2_var->bits.begin(), h2_var->bits.end()); input_as_bits.insert(input_as_bits.end(), x_var->bits.begin(), x_var->bits.end()); // Multipacking assert(input_as_bits.size() == input_size_in_bits); unpack_inputs.reset(new multipacking_gadget<FieldT>(this->pb, input_as_bits, input_as_field_elements, FieldT::capacity(), FMT(this->annotation_prefix, " unpack_inputs"))); // Prover inputs: r1_var.reset(new digest_variable<FieldT>(pb, sha256_digest_len, "r1")); r2_var.reset(new digest_variable<FieldT>(pb, sha256_digest_len, "r2")); // IV for SHA256 pb_linear_combination_array<FieldT> IV = SHA256_default_IV(pb); // Initialize the block gadget for r1's hash h_r1_block.reset(new block_variable<FieldT>(pb, { r1_var->bits, padding_var }, "h_r1_block")); // Initialize the hash gadget for r1's hash h_r1.reset(new sha256_compression_function_gadget<FieldT>(pb, IV, h_r1_block->bits, *h1_var, "h_r1")); // Initialize the block gadget for r2's hash h_r2_block.reset(new block_variable<FieldT>(pb, { r2_var->bits, padding_var }, "h_r2_block")); // Initialize the hash gadget for r2's hash h_r2.reset(new sha256_compression_function_gadget<FieldT>(pb, IV, h_r2_block->bits, *h2_var, "h_r2")); }
/** * \brief Main Application Routine * -Initialize the system clocks \n * -Configure and Enable the PWMA Generic clock \n * -Configure and Enable the PWMA Module \n * -Load Duty cycle value using Interlinked multi-value mode \n * -Register and Enable the Interrupt \n * -Enter into sleep mode \n */ int main (void) { uint32_t div; bool config_status = FAIL; bool set_value_status = FAIL; /* * Duty cycle value to be loaded. As Two channels are used * in this example, two values has been assigned. Initialize the unused * channel duty cycle value to 0, as it may take some garbage values * and will return FAIL while updating duty cycle as the value might * exceed the limit NOTE : Maximum four values can be loaded at a time, * as the channel limitation for Interlinked multi-value mode is four. */ uint16_t duty_cycle[] = {11,5,0,0}; /* PWMA Pin and Function Map */ static const gpio_map_t PWMA_GPIO_MAP = { {EXAMPLE_PWMA_PIN1, EXAMPLE_PWMA_FUNCTION1}, {EXAMPLE_PWMA_PIN2, EXAMPLE_PWMA_FUNCTION2}, }; /* Enable the PWMA Pins */ gpio_enable_module(PWMA_GPIO_MAP, sizeof(PWMA_GPIO_MAP) / sizeof(PWMA_GPIO_MAP[0])); /* * Calculate the division factor value to be loaded and * Enable the Generic clock */ div = div_ceil((sysclk_get_pba_hz()) , EXAMPLE_PWMA_GCLK_FREQUENCY); genclk_enable_config(EXAMPLE_PWMA_GCLK_ID,EXAMPLE_PWMA_GCLK_SOURCE,div); /* * Initialize the System clock * Note: Clock should be configured in conf_clock.h */ sysclk_init(); /* Initialize the delay routines */ delay_init(sysclk_get_cpu_hz()); /* * Configure and Enable the PWMA Module. The LED will turned if * configuration fails because of invalid argument. */ config_status = pwma_config_enable(pwma, EXAMPLE_PWMA_OUTPUT_FREQUENCY, EXAMPLE_PWMA_GCLK_FREQUENCY, PWMA_SPREAD); /* Error in configuring the PWMA module */ if (config_status == FAIL){ while (1) { delay_ms(10); gpio_tgl_gpio_pin(ERROR_LED); } } /* Load the duty cycle values using Interlinked multi-value mode */ set_value_status = pwma_set_multiple_values(pwma, ((EXAMPLE_PWMA_CHANNEL_ID1<<0) | (EXAMPLE_PWMA_CHANNEL_ID2<<8)), (uint16_t*)&duty_cycle); /* Error in loading the duty cycle value */ if (set_value_status == FAIL){ while (1) { delay_ms(10); gpio_tgl_gpio_pin(ERROR_LED); } } /* Disable global interrupts */ cpu_irq_disable(); /* * Initialize the interrupt vectors * Note: This function adds nothing for IAR as the interrupts are * handled by the IAR compiler itself. It provides an abstraction * between GCC & IAR compiler to use interrupts. * Refer function implementation in interrupt_avr32.h */ irq_initialize_vectors(); /* * Register the ACIFB interrupt handler * Note: This function adds nothing for IAR as the interrupts are * handled by the IAR compiler itself. It provides an abstraction * between GCC & IAR compiler to use interrupts. * Refer function implementation in interrupt_avr32.h */ irq_register_handler(&tofl_irq, AVR32_PWMA_IRQ, PWMA_INTERRUPT_PRIORITY); /* Enable the Timebase overflow interrupt */ pwma->ier = AVR32_PWMA_IER_TOFL_MASK; /* Enable global interrupt */ cpu_irq_enable(); /* Go to sleep mode */ while(1){ /* Enter into sleep mode */ pm_sleep(AVR32_PM_SMODE_FROZEN); } } /* End of main() */