/** * Mark blocks in the supplied vector as allocated in the checking bitmap. * * @param db the sdbm database * @param bvec vector where allocated block numbers are stored * @param bcnt amount of blocks in vector */ static void big_file_mark_used(DBM *db, const void *bvec, int bcnt) { DBMBIG *dbg = db->big; const void *q; int n; if (!big_check_start(db)) return; for (q = bvec, n = bcnt; n > 0; n--) { size_t bno = peek_be32(q); bit_field_t *map; long bmap; size_t bit; bmap = bno / BIG_BITCOUNT; /* Bitmap handling this block */ bit = bno & (BIG_BITCOUNT - 1); /* Index within bitmap */ q = const_ptr_add_offset(q, sizeof(guint32)); /* * It's because of this sanity check that we don't want to consider * the bitcheck field as a huge continuous map. Also doing that would * violate the encapsulation: we're not supposed to know how bits are * allocated in the field. */ if (bmap >= dbg->bitmaps) continue; map = ptr_add_offset(dbg->bitcheck, bmap * BIG_BLKSIZE); bit_field_set(map, bit); } }
static inline void _mark_spike(uint32_t neuron_id, uint32_t n_spikes) { if (recording_flags > 0) { if (n_spike_buffers_allocated < n_spikes) { uint32_t new_size = 8 + (n_spikes * spike_buffer_size); timed_out_spikes *new_spikes = (timed_out_spikes *) spin1_malloc( new_size); if (new_spikes == NULL) { log_error("Cannot reallocate spike buffer"); rt_error(RTE_SWERR); } uint32_t *data = (uint32_t *) new_spikes; for (uint32_t n = new_size >> 2; n > 0; n--) { data[n - 1] = 0; } if (spikes != NULL) { uint32_t old_size = 8 + (n_spike_buffers_allocated * spike_buffer_size); spin1_memcpy(new_spikes, spikes, old_size); sark_free(spikes); } spikes = new_spikes; n_spike_buffers_allocated = n_spikes; } if (spikes->n_buffers < n_spikes) { spikes->n_buffers = n_spikes; } for (uint32_t n = n_spikes; n > 0; n--) { bit_field_set(_out_spikes(n - 1), neuron_id); } }
/** * If not already done, initiate bitmap checking by creating all the currently * defined bitmaps in memory, zeroed, so that we can check that all the pages * flagged as used are indeed referred to by either a big key or a big value. * * @return TRUE if OK. */ gboolean big_check_start(DBM *db) { DBMBIG *dbg = db->big; long i; if (-1 == dbg->fd && -1 == big_open(dbg)) return FALSE; if (dbg->bitcheck != NULL) return TRUE; /* * The array of bitmaps is zeroed, with all the bits corresponding to each * bitmap page (the bit 0) set. * * Looping over the big keys and values and marking their blocks set will * set additional bits in these checking maps, which at the end will be * compared to the ones on disk. */ dbg->bitcheck = halloc0(BIG_BLKSIZE * dbg->bitmaps); for (i = 0; i < dbg->bitmaps; i++) { bit_field_t *map = ptr_add_offset(dbg->bitcheck, i * BIG_BLKSIZE); bit_field_set(map, 0); /* Bit 0 is for the bitmap itself */ } return TRUE; }
ExternalInterrupt:: ExternalInterrupt(Board::ExternalInterruptPin pin, InterruptMode mode, bool pullup) : IOPin((Board::DigitalPin) pin, INPUT_MODE, pullup) { m_ix = (pin == Board::EXT1); ext[m_ix] = this; bit_field_set(MCUCR, 0b11, mode); }
ExternalInterrupt:: ExternalInterrupt(Board::ExternalInterruptPin pin, InterruptMode mode, bool pullup) : IOPin((Board::DigitalPin) pin, INPUT_MODE, pullup) { if (pin <= Board::EXT5) { m_ix = pin - Board::EXT4; uint8_t ix = (m_ix << 1); bit_field_set(EICRB, 0b11 << ix, mode << ix); m_ix += 4; } else { m_ix = pin - Board::EXT0; uint8_t ix = (m_ix << 1); bit_field_set(EICRA, 0b11 << ix, mode << ix); } ext[m_ix] = this; }
ExternalInterrupt:: ExternalInterrupt(Board::ExternalInterruptPin pin, InterruptMode mode, bool pullup) : IOPin((Board::DigitalPin) pin, INPUT_MODE, pullup), m_ix(pin - Board::EXT0) { ext[m_ix] = this; uint8_t ix = (m_ix << 1); bit_field_set(EICRA, 0b11 << ix, mode << ix); }
/** * Allocate a single block in the file, without extending it. * * @param db the sdbm database * @param first first block to consider * * @return the block number if found, 0 otherwise. */ static size_t big_falloc(DBM *db, size_t first) { DBMBIG *dbg = db->big; long max_bitmap = dbg->bitmaps; long i; long bmap; size_t first_bit; bmap = first / BIG_BITCOUNT; /* Bitmap handling this block */ first_bit = first & (BIG_BITCOUNT - 1); /* Index within bitmap */ g_assert(first_bit != 0); /* Bit 0 is the bitmap itself */ /* * Loop through all the currently existing bitmaps. */ for (i = bmap; i < max_bitmap; i++) { size_t bno; if (!fetch_bitbuf(db, i)) return 0; bno = bit_field_first_clear(dbg->bitbuf, first_bit, BIG_BITCOUNT - 1); if ((size_t) -1 == bno) continue; /* * Found a free block. */ bit_field_set(dbg->bitbuf, bno); dbg->bitbuf_dirty = TRUE; /* * Correct the block number corresponding to "bno", if we did * not find it in bitmap #0. */ bno = size_saturate_add(bno, size_saturate_mult(BIG_BITCOUNT, i)); /* Make sure we can represent the block number in 32 bits */ g_assert(bno <= MAX_INT_VAL(guint32)); return bno; /* Allocated block number */ } return 0; /* No free block found */ }
/** * Open the .dat file, creating it if missing. * * @return -1 on error with errno set, 0 if OK with cleared errno. */ static int big_open(DBMBIG *dbg) { struct datfile *file; filestat_t buf; g_assert(-1 == dbg->fd); g_assert(dbg->file != NULL); file = dbg->file; dbg->fd = file_open(file->datname, file->flags | O_CREAT, file->mode); if (-1 == dbg->fd) return -1; big_datfile_free_null(&dbg->file); dbg->bitbuf = walloc(BIG_BLKSIZE); if (-1 == fstat(dbg->fd, &buf)) { buf.st_size = 0; } else { if (buf.st_size < BIG_BLKSIZE) { buf.st_size = 0; } else { dbg->bitmaps = 1 + (buf.st_size - BIG_BLKSIZE) / (BIG_BITCOUNT * BIG_BLKSIZE); } } /* * Create a first bitmap if the file is empty. * No need to flush it to disk, this will happen at the first allocation. */ if (0 == buf.st_size) { memset(dbg->bitbuf, 0, BIG_BLKSIZE); bit_field_set(dbg->bitbuf, 0); /* First page is the bitmap itself */ dbg->bitbno = 0; dbg->bitmaps = 1; } errno = 0; return 0; }
void AnalogPin::prescale(uint8_t factor) { const uint8_t MASK = (_BV(ADPS2) | _BV(ADPS1) | _BV(ADPS0)); bit_field_set(ADCSRA, MASK, factor); }
/** * Allocate "n" consecutive (sequential) blocks in the file, without * attempting to extend it. * * @param db the sdbm database * @param bmap bitmap number from which we need to start looking * @param n amount of consecutive blocks we want * * @return the block number of the first "n" blocks if found, 0 if nothing * was found. */ static size_t big_falloc_seq(DBM *db, int bmap, int n) { DBMBIG *dbg = db->big; long max_bitmap = dbg->bitmaps; long i; g_assert(bmap >= 0); g_assert(n > 0); /* * Loop through all the currently existing bitmaps, starting at the * specified bitmap number. */ for (i = bmap; i < max_bitmap; i++) { size_t first; size_t j; int r; /* Remaining blocks to allocate consecutively */ if (!fetch_bitbuf(db, i)) return 0; /* * We start at bit #1 since bit #0 is the bitmap itself. * * Bit #0 should always be set but in case the file is corrupted, * we don't want to start allocating data in the bitmap itself!. */ first = bit_field_first_clear(dbg->bitbuf, 1, BIG_BITCOUNT - 1); if ((size_t) -1 == first) continue; for (j = first + 1, r = n - 1; r > 0 && j < BIG_BITCOUNT; r--, j++) { if (bit_field_get(dbg->bitbuf, j)) break; } /* * If "r" is 0, we have no remaining page to allocate: we found our * "n" consecutive free blocks. */ if (0 == r) { /* * Mark the "n" consecutive blocks as busy. */ for (j = first, r = n; r > 0; r--, j++) { bit_field_set(dbg->bitbuf, j); } dbg->bitbuf_dirty = TRUE; /* * Correct the block number corresponding to "first", if we did * not find it in bitmap #0. */ first = size_saturate_add(first, size_saturate_mult(BIG_BITCOUNT, i)); /* Make sure we can represent all block numbers in 32 bits */ g_assert(size_saturate_add(first, n - 1) <= MAX_INT_VAL(guint32)); return first; /* "n" consecutive free blocks found */ } } return 0; /* No free block found */ }
/** * Allocate blocks (consecutive if possible) from the .dat file. * Block numbers are written back in the specified vector, in sequence. * * Blocks are always allocated with increasing block numbers, i.e. the list * of block numbers returned is guaranteed to be sorted. This will help * upper layers to quickly determine whether all the blocks are contiguous * for instance. * * The file is extended as necessary to be able to allocate the blocks but * this is only done when there are no more free blocks available. * * @param db the sdbm database * @param bvec vector where allocated block numbers will be stored * @param bcnt amount of blocks in vector (amount to allocate) * * @return TRUE if we were able to allocate all the requested blocks. */ static gboolean big_file_alloc(DBM *db, void *bvec, int bcnt) { DBMBIG *dbg = db->big; size_t first; int n; void *q; int bmap = 0; /* Initial bitmap from which we allocate */ g_assert(bcnt > 0); g_return_val_if_fail(NULL == dbg->bitcheck, FALSE); if (-1 == dbg->fd && -1 == big_open(dbg)) return FALSE; /* * First try to allocate all the blocks sequentially. */ retry: first = big_falloc_seq(db, bmap, bcnt); if (first != 0) { while (bcnt-- > 0) { bvec = poke_be32(bvec, first++); } goto success; } /* * There are no "bcnt" consecutive free blocks in the file. * * Before extending the file, we're going to fill the holes as much * as possible. */ for (first = 0, q = bvec, n = bcnt; n > 0; n--) { first = big_falloc(db, first + 1); if (0 == first) break; q = poke_be32(q, first); } if (0 == n) goto success; /* Found the requested "bcnt" free blocks */ /* * Free the incompletely allocated blocks: since we're about to extend * the file, we'll use consecutive blocks from the new chunk governed * by the added empty bitmap. */ for (q = bvec, n = bcnt - n; n > 0; n--) { first = peek_be32(q); big_ffree(db, first); q = ptr_add_offset(q, sizeof(guint32)); } /* * Extend the file by allocating another bitmap. */ g_assert(0 == bmap); /* Never retried yet */ if (dbg->bitbuf_dirty && !flush_bitbuf(db)) return FALSE; memset(dbg->bitbuf, 0, BIG_BLKSIZE); bit_field_set(dbg->bitbuf, 0); /* First page is the bitmap itself */ dbg->bitbno = dbg->bitmaps * BIG_BITCOUNT; dbg->bitmaps++; /* * Now retry starting to allocate blocks from the newly added bitmap. * * This will likely succeed if we're trying to allocate less than 8 MiB * worth of data (with 1 KiB blocks). */ bmap = dbg->bitmaps - 1; goto retry; success: /* * We successfully allocated blocks from the bitmap. * * If the database is not volatile, we need to flush the bitmap to disk * immediately in case of a crash, to avoid reusing these parts of the file. */ if (!db->is_volatile && dbg->bitbuf_dirty && !flush_bitbuf(db)) { /* Cannot flush -> cannot allocate the blocks: free them */ for (q = bvec, n = bcnt; n > 0; n--) { first = peek_be32(q); big_ffree(db, first); q = ptr_add_offset(q, sizeof(guint32)); } return FALSE; } return TRUE; /* Succeeded */ }
/** *! \brief Generate the data for a single connector *! \param[in/out] in_region The address to read the parameters from. Should be *! updated to the position just after the parameters *! after calling. *! \param[in/out] neuron_delay_stage_config Bit fields into which to write the *! delay information *! \param[in] pre_slice_start The start of the slice of the delay extension to *! generate for *! \param[in] pre_slice_count The number of neurons of the delay extension to *! generate for *! \return True if the region was correctly generated, False if there was an *! error */ bool read_delay_builder_region(address_t *in_region, bit_field_t *neuron_delay_stage_config, uint32_t pre_slice_start, uint32_t pre_slice_count) { // Get the parameters address_t region = *in_region; const uint32_t max_row_n_synapses = *region++; const uint32_t max_delayed_row_n_synapses = *region++; const uint32_t post_slice_start = *region++; const uint32_t post_slice_count = *region++; const uint32_t max_stage = *region++; accum timestep_per_delay; spin1_memcpy(×tep_per_delay, region++, sizeof(accum)); // Get the connector and delay parameter generators const uint32_t connector_type_hash = *region++; const uint32_t delay_type_hash = *region++; connection_generator_t connection_generator = connection_generator_init(connector_type_hash, ®ion); param_generator_t delay_generator = param_generator_init(delay_type_hash, ®ion); *in_region = region; // If any components couldn't be created return false if (connection_generator == NULL || delay_generator == NULL) { return false; } // For each pre-neuron, generate the connections uint32_t pre_slice_end = pre_slice_start + pre_slice_count; for (uint32_t pre_neuron_index = pre_slice_start; pre_neuron_index < pre_slice_end; pre_neuron_index++) { // Generate the connections uint32_t max_n_synapses = max_row_n_synapses + max_delayed_row_n_synapses; uint16_t indices[max_n_synapses]; uint32_t n_indices = connection_generator_generate( connection_generator, pre_slice_start, pre_slice_count, pre_neuron_index, post_slice_start, post_slice_count, max_n_synapses, indices); log_debug("Generated %u synapses", n_indices); // Generate delays for each index accum delay_params[n_indices]; param_generator_generate( delay_generator, n_indices, pre_neuron_index, indices, delay_params); // Go through the delays for (uint32_t i = 0; i < n_indices; i++) { // Get the delay in timesteps accum delay = delay_params[i] * timestep_per_delay; if (delay < 0) { delay = 1; } // Round down to an integer number of timesteps uint16_t rounded_delay = (uint16_t) delay; if (delay != rounded_delay) { log_debug("Rounded delay %k to %u", delay, rounded_delay); } // Get the delay stage and update the data struct delay_value delay_value = get_delay( rounded_delay, max_stage); if (delay_value.stage > 0) { bit_field_set(neuron_delay_stage_config[delay_value.stage - 1], pre_neuron_index - pre_slice_start); } } } // Finish with the generators connection_generator_free(connection_generator); param_generator_free(delay_generator); return true; }