/* * Include or exclude pages in a sparse dump, by half-open virtual * address interval (which may wrap around the end of the space). */ static void sparse_dump_mark(vaddr_t vbegin, vaddr_t vend, int includep) { pmap_t pmap; paddr_t p; vaddr_t v; /* * If a partial page is called for, the whole page must be included. */ if (includep) { vbegin = rounddown(vbegin, PAGE_SIZE); vend = roundup(vend, PAGE_SIZE); } else { vbegin = roundup(vbegin, PAGE_SIZE); vend = rounddown(vend, PAGE_SIZE); } pmap = pmap_kernel(); for (v = vbegin; v != vend; v += PAGE_SIZE) { if (pmap_extract(pmap, v, &p)) { if (includep) setbit(sparse_dump_physmap, p/PAGE_SIZE); else clrbit(sparse_dump_physmap, p/PAGE_SIZE); } } }
static void __xvip_dma_try_format(struct xvip_dma *dma, struct v4l2_pix_format *pix, const struct xvip_video_format **fmtinfo) { const struct xvip_video_format *info; unsigned int min_width; unsigned int max_width; unsigned int min_bpl; unsigned int max_bpl; unsigned int width; unsigned int align; unsigned int bpl; /* Retrieve format information and select the default format if the * requested format isn't supported. */ info = xvip_get_format_by_fourcc(pix->pixelformat); if (IS_ERR(info)) info = xvip_get_format_by_fourcc(XVIP_DMA_DEF_FORMAT); pix->pixelformat = info->fourcc; pix->colorspace = V4L2_COLORSPACE_SRGB; pix->field = V4L2_FIELD_NONE; /* The transfer alignment requirements are expressed in bytes. Compute * the minimum and maximum values, clamp the requested width and convert * it back to pixels. */ align = lcm(dma->align, info->bpp); min_width = roundup(XVIP_DMA_MIN_WIDTH, align); max_width = rounddown(XVIP_DMA_MAX_WIDTH, align); width = rounddown(pix->width * info->bpp, align); pix->width = clamp(width, min_width, max_width) / info->bpp; pix->height = clamp(pix->height, XVIP_DMA_MIN_HEIGHT, XVIP_DMA_MAX_HEIGHT); /* Clamp the requested bytes per line value. If the maximum bytes per * line value is zero, the module doesn't support user configurable line * sizes. Override the requested value with the minimum in that case. */ min_bpl = pix->width * info->bpp; max_bpl = rounddown(XVIP_DMA_MAX_WIDTH, dma->align); bpl = rounddown(pix->bytesperline, dma->align); pix->bytesperline = clamp(bpl, min_bpl, max_bpl); pix->sizeimage = pix->bytesperline * pix->height; if (fmtinfo) *fmtinfo = info; }
static unsigned long pcm512x_pllin_dac_rate(struct snd_soc_dai *dai, unsigned long osr_rate, unsigned long pllin_rate) { struct snd_soc_codec *codec = dai->codec; struct pcm512x_priv *pcm512x = snd_soc_codec_get_drvdata(codec); unsigned long dac_rate; if (!pcm512x->pll_out) return 0; /* no PLL to bypass, force SCK as DAC input */ if (pllin_rate % osr_rate) return 0; /* futile, quit early */ /* run DAC no faster than 6144000 Hz */ for (dac_rate = rounddown(pcm512x_dac_max(pcm512x, 6144000), osr_rate); dac_rate; dac_rate -= osr_rate) { if (pllin_rate / dac_rate > 128) return 0; /* DAC divider would be too big */ if (!(pllin_rate % dac_rate)) return dac_rate; dac_rate -= osr_rate; } return 0; }
static void update_range__(struct sw_flow_match *match, size_t offset, size_t size, bool is_mask) { struct sw_flow_key_range *range = NULL; size_t start = rounddown(offset, sizeof(long)); size_t end = roundup(offset + size, sizeof(long)); if (!is_mask) range = &match->range; else if (match->mask) range = &match->mask->range; if (!range) return; if (range->start == range->end) { range->start = start; range->end = end; return; } if (range->start > start) range->start = start; if (range->end < end) range->end = end; }
static int chacha20_simd(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct blkcipher_walk walk; u32 state[16]; int err; if (nbytes <= CHACHA20_BLOCK_SIZE || !may_use_simd()) return crypto_chacha20_crypt(desc, dst, src, nbytes); blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt_block(desc, &walk, CHACHA20_BLOCK_SIZE); crypto_chacha20_init(state, crypto_blkcipher_ctx(desc->tfm), walk.iv); kernel_neon_begin(); while (walk.nbytes >= CHACHA20_BLOCK_SIZE) { chacha20_dosimd(state, walk.dst.virt.addr, walk.src.virt.addr, rounddown(walk.nbytes, CHACHA20_BLOCK_SIZE)); err = blkcipher_walk_done(desc, &walk, walk.nbytes % CHACHA20_BLOCK_SIZE); } if (walk.nbytes) { chacha20_dosimd(state, walk.dst.virt.addr, walk.src.virt.addr, walk.nbytes); err = blkcipher_walk_done(desc, &walk, 0); } kernel_neon_end(); return err; }
static int elf_ph_sane(Elf_Phdr *phdr) { if (rounddown((uintptr_t)phdr, sizeof(Elf_Addr)) != (uintptr_t)phdr) { return 0; } return 1; }
static bool offtab_read_window(struct offtab *offtab, uint32_t blkno, int read_flags) { const uint32_t window_start = rounddown(blkno, offtab->ot_window_size); size_t window_bytes; off_t window_pos; assert(offtab->ot_mode == OFFTAB_MODE_READ); assert(ISSET(read_flags, OFFTAB_READ_SEEK) || (lseek(offtab->ot_fd, 0, SEEK_CUR) == offtab->ot_fdpos) || ((lseek(offtab->ot_fd, 0, SEEK_CUR) == -1) && (errno == ESPIPE))); offtab_compute_window_position(offtab, window_start, &window_bytes, &window_pos); const ssize_t n_read = (ISSET(read_flags, OFFTAB_READ_SEEK) ? pread_block(offtab->ot_fd, offtab->ot_window, window_bytes, window_pos) : read_block(offtab->ot_fd, offtab->ot_window, window_bytes)); if (n_read == -1) { (*offtab->ot_report)("read offset table at %"PRIuMAX, (uintmax_t)window_pos); return false; } assert(n_read >= 0); if ((size_t)n_read != window_bytes) { (*offtab->ot_reportx)("partial read of offset table" " at %"PRIuMAX": %zu != %zu", (uintmax_t)window_pos, (size_t)n_read, window_bytes); return false; } offtab->ot_window_start = window_start; return true; }
void swiotlb_set_max_segment(unsigned int val) { if (swiotlb_force == SWIOTLB_FORCE) max_segment = 1; else max_segment = rounddown(val, PAGE_SIZE); }
static unsigned long pcm512x_find_sck(struct snd_soc_dai *dai, unsigned long bclk_rate) { struct device *dev = dai->dev; struct snd_soc_codec *codec = dai->codec; struct pcm512x_priv *pcm512x = snd_soc_codec_get_drvdata(codec); unsigned long sck_rate; int pow2; /* 64 MHz <= pll_rate <= 100 MHz, VREF mode */ /* 16 MHz <= sck_rate <= 25 MHz, VREF mode */ /* select sck_rate as a multiple of bclk_rate but still with * as many factors of 2 as possible, as that makes it easier * to find a fast DAC rate */ pow2 = 1 << fls((pcm512x_pll_max(pcm512x) - 16000000) / bclk_rate); for (; pow2; pow2 >>= 1) { sck_rate = rounddown(pcm512x_pll_max(pcm512x), bclk_rate * pow2); if (sck_rate >= 16000000) break; } if (!pow2) { dev_err(dev, "Impossible to generate a suitable SCK\n"); return 0; } dev_dbg(dev, "sck_rate %lu\n", sck_rate); return sck_rate; }
int ath10k_bmi_fast_download(struct ath10k *ar, u32 address, const void *buffer, u32 length) { u8 trailer[4] = {}; u32 head_len = rounddown(length, 4); u32 trailer_len = length - head_len; int ret; ret = ath10k_bmi_lz_stream_start(ar, address); if (ret) return ret; /* copy the last word into a zero padded buffer */ if (trailer_len > 0) memcpy(trailer, buffer + head_len, trailer_len); ret = ath10k_bmi_lz_data(ar, buffer, head_len); if (ret) return ret; if (trailer_len > 0) ret = ath10k_bmi_lz_data(ar, trailer, 4); if (ret != 0) return ret; /* * Close compressed stream and open a new (fake) one. * This serves mainly to flush Target caches. */ ret = ath10k_bmi_lz_stream_start(ar, 0x00); return ret; }
/* a mouse button is pressed, start cut operation */ static void mouse_cut_start(scr_stat *scp) { int i; int s; if (scp->status & MOUSE_VISIBLE) { sc_remove_all_cutmarkings(scp->sc); if ((scp->mouse_pos == scp->mouse_cut_start) && (scp->mouse_pos == scp->mouse_cut_end)) { cut_buffer[0] = '\0'; return; } else if (skip_spc_right(scp, scp->mouse_pos) >= scp->xsize) { /* if the pointer is on trailing blank chars, mark towards eol */ i = skip_spc_left(scp, scp->mouse_pos) + 1; s = spltty(); scp->mouse_cut_start = rounddown(scp->mouse_pos, scp->xsize) + i; scp->mouse_cut_end = (scp->mouse_pos / scp->xsize + 1) * scp->xsize - 1; splx(s); cut_buffer[0] = '\r'; } else { s = spltty(); scp->mouse_cut_start = scp->mouse_pos; scp->mouse_cut_end = scp->mouse_cut_start; splx(s); cut_buffer[0] = sc_vtb_getc(&scp->vtb, scp->mouse_cut_start); } cut_buffer[1] = '\0'; scp->status |= MOUSE_CUTTING; mark_all(scp); /* this is probably overkill XXX */ } }
/* copy a word under the mouse pointer */ static void mouse_cut_word(scr_stat *scp) { int start; int end; int sol; int eol; int c; int j; int len; /* * Because we don't have locale information in the kernel, * we only distinguish space char and non-space chars. Punctuation * chars, symbols and other regular chars are all treated alike * unless user specified SC_CUT_SEPCHARS in his kernel config file. */ if (scp->status & MOUSE_VISIBLE) { sol = rounddown(scp->mouse_pos, scp->xsize); eol = sol + scp->xsize; c = sc_vtb_getc(&scp->vtb, scp->mouse_pos); if (IS_SEP_CHAR(c)) { /* blank space */ for (j = scp->mouse_pos; j >= sol; --j) { c = sc_vtb_getc(&scp->vtb, j); if (!IS_SEP_CHAR(c)) break; } start = ++j; for (j = scp->mouse_pos; j < eol; ++j) { c = sc_vtb_getc(&scp->vtb, j); if (!IS_SEP_CHAR(c)) break; } end = j - 1; } else { /* non-space word */ for (j = scp->mouse_pos; j >= sol; --j) { c = sc_vtb_getc(&scp->vtb, j); if (IS_SEP_CHAR(c)) break; } start = ++j; for (j = scp->mouse_pos; j < eol; ++j) { c = sc_vtb_getc(&scp->vtb, j); if (IS_SEP_CHAR(c)) break; } end = j - 1; } /* copy the found word */ mouse_do_cut(scp, start, end); len = strlen(cut_buffer); if (cut_buffer[len - 1] == '\r') cut_buffer[len - 1] = '\0'; } }
static long tscm_hwdep_read_queue(struct snd_tscm *tscm, char __user *buf, long remained, loff_t *offset) { char __user *pos = buf; unsigned int type = SNDRV_FIREWIRE_EVENT_TASCAM_CONTROL; struct snd_firewire_tascam_change *entries = tscm->queue; long count; // At least, one control event can be copied. if (remained < sizeof(type) + sizeof(*entries)) { spin_unlock_irq(&tscm->lock); return -EINVAL; } // Copy the type field later. count = sizeof(type); remained -= sizeof(type); pos += sizeof(type); while (true) { unsigned int head_pos; unsigned int tail_pos; unsigned int length; if (tscm->pull_pos == tscm->push_pos) break; else if (tscm->pull_pos < tscm->push_pos) tail_pos = tscm->push_pos; else tail_pos = SND_TSCM_QUEUE_COUNT; head_pos = tscm->pull_pos; length = (tail_pos - head_pos) * sizeof(*entries); if (remained < length) length = rounddown(remained, sizeof(*entries)); if (length == 0) break; spin_unlock_irq(&tscm->lock); if (copy_to_user(pos, &entries[head_pos], length)) return -EFAULT; spin_lock_irq(&tscm->lock); tscm->pull_pos = tail_pos % SND_TSCM_QUEUE_COUNT; count += length; remained -= length; pos += length; } spin_unlock_irq(&tscm->lock); if (copy_to_user(buf, &type, sizeof(type))) return -EFAULT; return count; }
/* Doorbell calculations for device init. */ void kfd_doorbell_init(struct kfd_dev *kfd) { size_t doorbell_start_offset; size_t doorbell_aperture_size; size_t doorbell_process_limit; /* * We start with calculations in bytes because the input data might * only be byte-aligned. * Only after we have done the rounding can we assume any alignment. */ doorbell_start_offset = roundup(kfd->shared_resources.doorbell_start_offset, doorbell_process_allocation()); doorbell_aperture_size = rounddown(kfd->shared_resources.doorbell_aperture_size, doorbell_process_allocation()); if (doorbell_aperture_size > doorbell_start_offset) doorbell_process_limit = (doorbell_aperture_size - doorbell_start_offset) / doorbell_process_allocation(); else doorbell_process_limit = 0; kfd->doorbell_base = kfd->shared_resources.doorbell_physical_address + doorbell_start_offset; kfd->doorbell_id_offset = doorbell_start_offset / sizeof(u32); kfd->doorbell_process_limit = doorbell_process_limit - 1; kfd->doorbell_kernel_ptr = ioremap(kfd->doorbell_base, doorbell_process_allocation()); BUG_ON(!kfd->doorbell_kernel_ptr); pr_debug("kfd: doorbell initialization:\n"); pr_debug("kfd: doorbell base == 0x%08lX\n", (uintptr_t)kfd->doorbell_base); pr_debug("kfd: doorbell_id_offset == 0x%08lX\n", kfd->doorbell_id_offset); pr_debug("kfd: doorbell_process_limit == 0x%08lX\n", doorbell_process_limit); pr_debug("kfd: doorbell_kernel_offset == 0x%08lX\n", (uintptr_t)kfd->doorbell_base); pr_debug("kfd: doorbell aperture size == 0x%08lX\n", kfd->shared_resources.doorbell_aperture_size); pr_debug("kfd: doorbell kernel address == 0x%08lX\n", (uintptr_t)kfd->doorbell_kernel_ptr); }
int smsi2c_ts_feed(void *args, unsigned char * ts_buffer, int size) { struct smscore_device_t *coredev = (struct smscore_device_t *)args; struct smscore_buffer_t *cb; struct SmsMsgHdr_S *phdr; int len = 0; int quotient, residue; int ts_buf_size_188align; sms_debug("%s: buffer:0x%p, size:%d\n", __func__, ts_buffer, size); if (!size || !args) return 0; #define TS_PACKET_SIZE 188 ts_buf_size_188align = rounddown((MAX_I2C_BUF_SIZE - sizeof(struct SmsMsgHdr_S)), TS_PACKET_SIZE); quotient = size / ts_buf_size_188align; residue = size % ts_buf_size_188align; for (; quotient > 0; quotient--) { cb = smscore_getbuffer(coredev); if (!cb) { sms_err("Unable to allocate data buffer!\n"); goto exit; } phdr = (struct SmsMsgHdr_S *)cb->p; memset(cb->p, 0, (int)sizeof(struct SmsMsgHdr_S)); SMS_INIT_MSG_EX(phdr, MSG_SMS_DAB_CHANNEL, HIF_TASK, 1, ts_buf_size_188align + sizeof(struct SmsMsgHdr_S)); memcpy((u8*)(phdr+1),ts_buffer, ts_buf_size_188align); cb->offset = 0; cb->size = ts_buf_size_188align + sizeof(struct SmsMsgHdr_S); smscore_onresponse(coredev, cb); ts_buffer += ts_buf_size_188align; len += ts_buf_size_188align; } if (residue) { cb = smscore_getbuffer(coredev); if (!cb) { sms_err("Unable to allocate data buffer!\n"); goto exit; } phdr = (struct SmsMsgHdr_S *)cb->p; memset(cb->p, 0, (int)sizeof(struct SmsMsgHdr_S)); SMS_INIT_MSG_EX(phdr, MSG_SMS_DAB_CHANNEL, HIF_TASK, 1, residue + sizeof(struct SmsMsgHdr_S)); memcpy((u8*)(phdr+1),ts_buffer, residue); cb->offset = 0; cb->size = residue + sizeof(struct SmsMsgHdr_S); smscore_onresponse(coredev, cb); ts_buffer += residue; len += residue; } exit: return len; }
void read_wbuf(int fd, wbuf *buf, xfs_mount_t *mp) { int res = 0; xfs_off_t lres = 0; xfs_off_t newpos; size_t diff; newpos = rounddown(buf->position, (xfs_off_t) buf->min_io_size); if (newpos != buf->position) { diff = buf->position - newpos; buf->position = newpos; buf->length += diff; } if (source_position != buf->position) { lres = lseek64(fd, buf->position, SEEK_SET); if (lres < 0LL) { do_warn(_("%s: lseek64 failure at offset %lld\n"), progname, source_position); die_perror(); } source_position = buf->position; } ASSERT(source_position % source_sectorsize == 0); /* round up length for direct I/O if necessary */ if (buf->length % buf->min_io_size != 0) buf->length = roundup(buf->length, buf->min_io_size); if (buf->length > buf->size) { do_warn(_("assert error: buf->length = %d, buf->size = %d\n"), buf->length, buf->size); killall(); abort(); } if ((res = read(fd, buf->data, buf->length)) < 0) { do_warn(_("%s: read failure at offset %lld\n"), progname, source_position); die_perror(); } if (res < buf->length && source_position + res == mp->m_sb.sb_dblocks * source_blocksize) res = buf->length; else ASSERT(res == buf->length); source_position += res; buf->length = res; }
/* * ok, the uncertain inodes are a set of trees just like the * good inodes but all starting inode records are (arbitrarily) * aligned on XFS_CHUNK_PER_INODE boundaries to prevent overlaps. * this means we may have partials records in the tree (e.g. records * without 64 confirmed uncertain inodes). Tough. * * free is set to 1 if the inode is thought to be free, 0 if used */ void add_aginode_uncertain( struct xfs_mount *mp, xfs_agnumber_t agno, xfs_agino_t ino, int free) { ino_tree_node_t *ino_rec; xfs_agino_t s_ino; int offset; ASSERT(agno < glob_agcount); ASSERT(last_rec != NULL); s_ino = rounddown(ino, XFS_INODES_PER_CHUNK); /* * check for a cache hit */ if (last_rec[agno] != NULL && last_rec[agno]->ino_startnum == s_ino) { offset = ino - s_ino; if (free) set_inode_free(last_rec[agno], offset); else set_inode_used(last_rec[agno], offset); return; } /* * check to see if record containing inode is already in the tree. * if not, add it */ ino_rec = (ino_tree_node_t *) avl_findrange(inode_uncertain_tree_ptrs[agno], s_ino); if (!ino_rec) { ino_rec = alloc_ino_node(mp, s_ino); if (!avl_insert(inode_uncertain_tree_ptrs[agno], &ino_rec->avl_node)) do_error( _("add_aginode_uncertain - duplicate inode range\n")); } if (free) set_inode_free(ino_rec, ino - s_ino); else set_inode_used(ino_rec, ino - s_ino); /* * set cache entry */ last_rec[agno] = ino_rec; }
ulong board_init_f_alloc_reserve(ulong top) { /* Reserve early malloc arena */ #if CONFIG_VAL(SYS_MALLOC_F_LEN) top -= CONFIG_VAL(SYS_MALLOC_F_LEN); #endif /* LAST : reserve GD (rounded up to a multiple of 16 bytes) */ top = rounddown(top-sizeof(struct global_data), 16); return top; }
static void __init test_hexdump_overflow_set(size_t buflen, bool ascii) { unsigned int i = 0; int rs = (get_random_int() % 2 + 1) * 16; do { int gs = 1 << i; size_t len = get_random_int() % rs + gs; test_hexdump_overflow(buflen, rounddown(len, gs), rs, gs, ascii); } while (i++ < 3); }
long probe_kernel_read(void *dst, const void *src, size_t size) { void *psrc = (void *)rounddown((unsigned long)src, PAGE_SIZE); if ((unsigned long)src < PAGE_SIZE || size <= 0) return -EFAULT; if (os_mincore(psrc, size + src - psrc) <= 0) return -EFAULT; return __probe_kernel_read(dst, src, size); }
static union dinode * get_inode(int fd, struct fs *super, ino_t ino) { static caddr_t ipbuf; static struct cg *cgp; static ino_t last; static int cg; struct ufs2_dinode *di2; if (fd < 0) { /* flush cache */ if (ipbuf) { free(ipbuf); ipbuf = 0; if (super != NULL && super->fs_magic == FS_UFS2_MAGIC) { free(cgp); cgp = 0; } } return 0; } if (!ipbuf || ino < last || ino >= last + INOCNT(super)) { if (super->fs_magic == FS_UFS2_MAGIC && (!cgp || cg != ino_to_cg(super, ino))) { cg = ino_to_cg(super, ino); if (!cgp && !(cgp = malloc(super->fs_cgsize))) errx(1, "allocate cg"); if (lseek(fd, (off_t)cgtod(super, cg) << super->fs_fshift, 0) < 0) err(1, "lseek cg"); if (read(fd, cgp, super->fs_cgsize) != super->fs_cgsize) err(1, "read cg"); if (!cg_chkmagic(cgp)) errx(1, "cg has bad magic"); } if (!ipbuf && !(ipbuf = malloc(INOSZ(super)))) errx(1, "allocate inodes"); last = rounddown(ino, INOCNT(super)); if (lseek(fd, (off_t)ino_to_fsba(super, last) << super->fs_fshift, 0) < (off_t)0 || read(fd, ipbuf, INOSZ(super)) != (ssize_t)INOSZ(super)) err(1, "read inodes"); } if (super->fs_magic == FS_UFS1_MAGIC) return ((union dinode *) &((struct ufs1_dinode *)ipbuf)[ino % INOCNT(super)]); di2 = &((struct ufs2_dinode *)ipbuf)[ino % INOCNT(super)]; /* If the inode is unused, it might be unallocated too, so zero it. */ if (isclr(cg_inosused(cgp), ino % super->fs_ipg)) bzero(di2, sizeof (*di2)); return ((union dinode *)di2); }
static int iio_read_first_n_kfifo(struct iio_buffer *r, size_t n, char __user *buf) { int ret, copied; struct iio_kfifo *kf = iio_to_kfifo(r); if (n < r->bytes_per_datum) return -EINVAL; n = rounddown(n, r->bytes_per_datum); ret = kfifo_to_user(&kf->kf, buf, n, &copied); return copied; }
STATIC void init_limits(void) { // First determine where to end byte *end = _fs_end; end = rounddown(end, FLASH_PAGESIZE)-FLASH_PAGESIZE; last_page_index = (_fs_end - end)/FLASH_PAGESIZE; // Now find the start byte *start = roundup(end - CHUNK_SIZE*MAX_CHUNKS_IN_FILE_SYSTEM, FLASH_PAGESIZE); while (start < _fs_start) { start += FLASH_PAGESIZE; } first_page_index = (_fs_end - start)/FLASH_PAGESIZE; chunks_in_file_system = (end-start)>>MBFS_LOG_CHUNK_SIZE; }
/* copy a line under the mouse pointer */ static void mouse_cut_line(scr_stat *scp) { int len; int from; if (scp->status & MOUSE_VISIBLE) { from = rounddown(scp->mouse_pos, scp->xsize); mouse_do_cut(scp, from, from + scp->xsize - 1); len = strlen(cut_buffer); if (cut_buffer[len - 1] == '\r') cut_buffer[len - 1] = '\0'; scp->status |= MOUSE_CUTTING; } }
static int vid_realloc_array(void) { video_adapter_t **new_adp; video_switch_t **new_vidsw; #ifdef FB_INSTALL_CDEV struct cdevsw **new_cdevsw; #endif int newsize; int s; if (!vid_malloc) return ENOMEM; s = spltty(); newsize = rounddown(adapters + ARRAY_DELTA, ARRAY_DELTA); new_adp = malloc(sizeof(*new_adp)*newsize, M_DEVBUF, M_WAITOK | M_ZERO); new_vidsw = malloc(sizeof(*new_vidsw)*newsize, M_DEVBUF, M_WAITOK | M_ZERO); #ifdef FB_INSTALL_CDEV new_cdevsw = malloc(sizeof(*new_cdevsw)*newsize, M_DEVBUF, M_WAITOK | M_ZERO); #endif bcopy(adapter, new_adp, sizeof(*adapter)*adapters); bcopy(vidsw, new_vidsw, sizeof(*vidsw)*adapters); #ifdef FB_INSTALL_CDEV bcopy(vidcdevsw, new_cdevsw, sizeof(*vidcdevsw)*adapters); #endif if (adapters > 1) { free(adapter, M_DEVBUF); free(vidsw, M_DEVBUF); #ifdef FB_INSTALL_CDEV free(vidcdevsw, M_DEVBUF); #endif } adapter = new_adp; vidsw = new_vidsw; #ifdef FB_INSTALL_CDEV vidcdevsw = new_cdevsw; #endif adapters = newsize; splx(s); if (bootverbose) printf("fb: new array size %d\n", adapters); return 0; }
/* * Abstractly iterate over the collection of memory segments to be * dumped; the callback lacks the customary environment-pointer * argument because none of the current users really need one. * * To be used only after dump_seg_prep is called to set things up. */ static int dump_seg_iter(int (*callback)(paddr_t, paddr_t)) { int error, i; #define CALLBACK(start,size) do { \ error = callback(start,size); \ if (error) \ return error; \ } while(0) for (i = 0; i < mem_cluster_cnt; ++i) { /* * The bitmap is scanned within each memory segment, * rather than over its entire domain, in case any * pages outside of the memory proper have been mapped * into kva; they might be devices that wouldn't * appreciate being arbitrarily read, and including * them could also break the assumption that a sparse * dump will always be smaller than a full one. */ if (sparse_dump) { paddr_t p, start, end; int lastset; start = mem_clusters[i].start; end = start + mem_clusters[i].size; start = rounddown(start, PAGE_SIZE); /* unnecessary? */ lastset = 0; for (p = start; p < end; p += PAGE_SIZE) { int thisset = isset(sparse_dump_physmap, p/PAGE_SIZE); if (!lastset && thisset) start = p; if (lastset && !thisset) CALLBACK(start, p - start); lastset = thisset; } if (lastset) CALLBACK(start, p - start); } else CALLBACK(mem_clusters[i].start, mem_clusters[i].size); } return 0; #undef CALLBACK }
static ssize_t device_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos) { size_t read = 0; down(&g_mutex); /* Memory map */ if (*ppos < 0x20000) { count = min(count, (u32)(0x20000 - *ppos)); } else if (0x20000 <= *ppos && *ppos < 0x40000) { // Memory hole here -- just return zeroes count = min(count, (u32)(0x40000 - *ppos)); *ppos += count; if (clear_user(buf, count)) read = -EFAULT; else read = count; goto out; } else if (0x40000 <= *ppos && *ppos < 0x80000) { count = min(count, (u32)(0x80000 - *ppos)); } else { goto out; } while (count) { u32 addr = rounddown(*ppos, MOVI_PAGE_SIZE); u32 off = *ppos - addr; u32 to_read = min(count, MOVI_PAGE_SIZE - off); mmc_movi_read_ram_page(g_card, g_page, addr); if (copy_to_user(&buf[read], &g_page[off], to_read)) { read = -EFAULT; goto out; } count -= to_read; *ppos += to_read; read += to_read; } out: up(&g_mutex); return read; }
int setutxdb(int db, const char *file) { struct stat sb; switch (db) { case UTXDB_ACTIVE: if (file == NULL) file = _PATH_UTX_ACTIVE; break; case UTXDB_LASTLOGIN: if (file == NULL) file = _PATH_UTX_LASTLOGIN; break; case UTXDB_LOG: if (file == NULL) file = _PATH_UTX_LOG; break; default: errno = EINVAL; return (-1); } if (uf != NULL) fclose(uf); uf = fopen(file, "r"); if (uf == NULL) return (-1); if (db != UTXDB_LOG) { /* Safety check: never use broken files. */ if (_fstat(fileno(uf), &sb) != -1 && sb.st_size % sizeof(struct futx) != 0) { fclose(uf); uf = NULL; errno = EFTYPE; return (-1); } /* Prevent reading of partial records. */ (void)setvbuf(uf, NULL, _IOFBF, rounddown(BUFSIZ, sizeof(struct futx))); } udb = db; return (0); }
void read_ag_header(int fd, xfs_agnumber_t agno, wbuf *buf, ag_header_t *ag, xfs_mount_t *mp, int blocksize, int sectorsize) { xfs_daddr_t off; int length; xfs_off_t newpos; size_t diff; /* initial settings */ diff = 0; off = XFS_AG_DADDR(mp, agno, XFS_SB_DADDR); buf->position = (xfs_off_t) off * (xfs_off_t) BBSIZE; length = buf->length = first_agbno * blocksize; if (length == 0) { do_log(_("ag header buffer invalid!\n")); exit(1); } /* handle alignment stuff */ newpos = rounddown(buf->position, (xfs_off_t) buf->min_io_size); if (newpos != buf->position) { diff = buf->position - newpos; buf->position = newpos; buf->length += diff; } /* round up length for direct I/O if necessary */ if (buf->length % buf->min_io_size != 0) buf->length = roundup(buf->length, buf->min_io_size); read_wbuf(fd, buf, mp); ASSERT(buf->length >= length); ag->xfs_sb = (xfs_dsb_t *) (buf->data + diff); ASSERT(be32_to_cpu(ag->xfs_sb->sb_magicnum) == XFS_SB_MAGIC); ag->xfs_agf = (xfs_agf_t *) (buf->data + diff + sectorsize); ASSERT(be32_to_cpu(ag->xfs_agf->agf_magicnum) == XFS_AGF_MAGIC); ag->xfs_agi = (xfs_agi_t *) (buf->data + diff + 2 * sectorsize); ASSERT(be32_to_cpu(ag->xfs_agi->agi_magicnum) == XFS_AGI_MAGIC); ag->xfs_agfl = (xfs_agfl_t *) (buf->data + diff + 3 * sectorsize); }
static void init_limits(void) { /* First determine where to end */ char *end; if (microbit_mp_appended_script()[0] == 'M') { end = microbit_mp_appended_script(); } else { end = microbit_end_of_rom(); } end = rounddown(end, persistent_page_size())-persistent_page_size(); last_page_index = (microbit_end_of_rom() - end)/persistent_page_size(); /** Now find the start */ char *start = roundup(end - CHUNK_SIZE*MAX_CHUNKS_IN_FILE_SYSTEM, persistent_page_size()); while (start < microbit_end_of_code()) { start += persistent_page_size(); } first_page_index = (microbit_end_of_rom() - start)/persistent_page_size(); chunks_in_file_system = (end-start)>>LOG_CHUNK_SIZE; }