static int __init fastbootlog_dump_init(void) { char *fastbootlog_buff; struct fastbootlog_head *head; char *lastlog_start; unsigned int lastlog_size; char *log_start; unsigned int log_size; int use_ioremap = 0; int need_dump_whole = 0; unsigned tmp_len; int ret = 0; if (!check_himntn(HIMNTN_GOBAL_RESETLOG)) { return ret; } if (pfn_valid(__phys_to_pfn(FASTBOOT_DUMP_LOG_ADDR))) { fastbootlog_buff = phys_to_virt(FASTBOOT_DUMP_LOG_ADDR); } else { use_ioremap = 1; fastbootlog_buff = ioremap_wc(FASTBOOT_DUMP_LOG_ADDR, FASTBOOT_DUMP_LOG_SIZE); } if (!fastbootlog_buff) { printk(KERN_ERR "%s: fail to get the virtual address of fastbootlog\n", __func__); return -1; } head = (struct fastbootlog_head *)fastbootlog_buff; check_fastbootlog_head(head, &need_dump_whole); if (need_dump_whole) { head->lastlog_start = 0; head->lastlog_offset = 0; head->log_start = 0; head->log_offset = FASTBOOT_DUMP_LOG_SIZE; } lastlog_start = fastbootlog_buff + head->lastlog_start; if (head->lastlog_offset < head->lastlog_start) { tmp_len = FASTBOOT_DUMP_LOG_SIZE - head->lastlog_start; lastlog_size = tmp_len + head->lastlog_offset - sizeof(struct fastbootlog_head); s_last_fastbootlog_buff = vmalloc(lastlog_size); if (!s_last_fastbootlog_buff) { printk(KERN_ERR "%s: fail to vmalloc %#x bytes s_last_fastbootlog_buff\n", __func__, lastlog_size); ret = -1; goto out; } memcpy(s_last_fastbootlog_buff, lastlog_start, tmp_len); lastlog_start = fastbootlog_buff + sizeof(struct fastbootlog_head); memcpy(s_last_fastbootlog_buff + tmp_len, lastlog_start, lastlog_size - tmp_len); s_last_fastbootlog_size = lastlog_size; } else { lastlog_size = head->lastlog_offset - head->lastlog_start; if (lastlog_size > 0) { s_last_fastbootlog_buff = vmalloc(lastlog_size); if (!s_last_fastbootlog_buff) { printk(KERN_ERR "%s: fail to vmalloc %#x bytes s_last_fastbootlog_buff\n", __func__, lastlog_size); ret = -1; goto out; } memcpy(s_last_fastbootlog_buff, lastlog_start, lastlog_size); s_last_fastbootlog_size = lastlog_size; } } log_start = fastbootlog_buff + head->log_start; if (head->log_offset < head->log_start) { tmp_len = FASTBOOT_DUMP_LOG_SIZE - head->log_start; log_size = tmp_len + head->log_offset - sizeof(struct fastbootlog_head); s_fastbootlog_buff = vmalloc(log_size); if (!s_fastbootlog_buff) { printk(KERN_ERR "%s: fail to vmalloc %#x bytes s_fastbootlog_buff\n", __func__, log_size); ret = -1; goto out; } memcpy(s_fastbootlog_buff, log_start, tmp_len); log_start = fastbootlog_buff + sizeof(struct fastbootlog_head); memcpy(s_fastbootlog_buff + tmp_len, log_start, log_size - tmp_len); s_fastbootlog_size = log_size; } else { log_size = head->log_offset - head->log_start; if (log_size > 0) { s_fastbootlog_buff = vmalloc(log_size); if (!s_fastbootlog_buff) { printk(KERN_ERR "%s: fail to vmalloc %#x bytes s_fastbootlog_buff\n", __func__, log_size); ret = -1; goto out; } memcpy(s_fastbootlog_buff, log_start, log_size); s_fastbootlog_size = log_size; } } out: if (use_ioremap && fastbootlog_buff) { iounmap(fastbootlog_buff); } if (s_last_fastbootlog_buff) { bootloader_logger_dump(s_last_fastbootlog_buff, s_last_fastbootlog_size, "last"); balong_create_log_proc_entry("last_fastboot_log", S_IRUSR | S_IRGRP, &last_fastbootlog_dump_file_fops, NULL); } if (s_fastbootlog_buff) { bootloader_logger_dump(s_fastbootlog_buff, s_fastbootlog_size, "current"); balong_create_log_proc_entry("fastboot_log", S_IRUSR | S_IRGRP, &fastbootlog_dump_file_fops, NULL); } return ret; }
/* * Account for GOT and PLT relocations. We can't add sections for * got and plt but we can increase the core module size. */ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, char *secstrings, struct module *me) { Elf_Shdr *symtab; Elf_Sym *symbols; Elf_Rela *rela; char *strings; int nrela, i, j; /* Find symbol table and string table. */ symtab = NULL; for (i = 0; i < hdr->e_shnum; i++) switch (sechdrs[i].sh_type) { case SHT_SYMTAB: symtab = sechdrs + i; break; } if (!symtab) { printk(KERN_ERR "module %s: no symbol table\n", me->name); return -ENOEXEC; } /* Allocate one syminfo structure per symbol. */ me->arch.nsyms = symtab->sh_size / sizeof(Elf_Sym); me->arch.syminfo = vmalloc(me->arch.nsyms * sizeof(struct mod_arch_syminfo)); if (!me->arch.syminfo) return -ENOMEM; symbols = (void *) hdr + symtab->sh_offset; strings = (void *) hdr + sechdrs[symtab->sh_link].sh_offset; for (i = 0; i < me->arch.nsyms; i++) { if (symbols[i].st_shndx == SHN_UNDEF && strcmp(strings + symbols[i].st_name, "_GLOBAL_OFFSET_TABLE_") == 0) /* "Define" it as absolute. */ symbols[i].st_shndx = SHN_ABS; me->arch.syminfo[i].got_offset = -1UL; me->arch.syminfo[i].plt_offset = -1UL; me->arch.syminfo[i].got_initialized = 0; me->arch.syminfo[i].plt_initialized = 0; } /* Search for got/plt relocations. */ me->arch.got_size = me->arch.plt_size = 0; for (i = 0; i < hdr->e_shnum; i++) { if (sechdrs[i].sh_type != SHT_RELA) continue; nrela = sechdrs[i].sh_size / sizeof(Elf_Rela); rela = (void *) hdr + sechdrs[i].sh_offset; for (j = 0; j < nrela; j++) check_rela(rela + j, me); } /* Increase core size by size of got & plt and set start offsets for got and plt. */ me->core_layout.size = ALIGN(me->core_layout.size, 4); me->arch.got_offset = me->core_layout.size; me->core_layout.size += me->arch.got_size; me->arch.plt_offset = me->core_layout.size; me->core_layout.size += me->arch.plt_size; return 0; }
static int myloader_parse_partitions(struct mtd_info *master, struct mtd_partition **pparts, struct mtd_part_parser_data *data) { struct part_data *buf; struct mylo_partition_table *tab; struct mylo_partition *part; struct mtd_partition *mtd_parts; struct mtd_partition *mtd_part; int num_parts; int ret, i; size_t retlen; char *names; unsigned long offset; unsigned long blocklen; buf = vmalloc(sizeof(*buf)); if (!buf) { return -ENOMEM; goto out; } tab = &buf->tab; blocklen = master->erasesize; if (blocklen < BLOCK_LEN_MIN) blocklen = BLOCK_LEN_MIN; offset = blocklen; /* Find the partition table */ for (i = 0; i < 4; i++, offset += blocklen) { printk(KERN_DEBUG "%s: searching for MyLoader partition table" " at offset 0x%lx\n", master->name, offset); ret = mtd_read(master, offset, sizeof(*buf), &retlen, (void *)buf); if (ret) goto out_free_buf; if (retlen != sizeof(*buf)) { ret = -EIO; goto out_free_buf; } /* Check for Partition Table magic number */ if (tab->magic == le32_to_cpu(MYLO_MAGIC_PARTITIONS)) break; } if (tab->magic != le32_to_cpu(MYLO_MAGIC_PARTITIONS)) { printk(KERN_DEBUG "%s: no MyLoader partition table found\n", master->name); ret = 0; goto out_free_buf; } /* The MyLoader and the Partition Table is always present */ num_parts = 2; /* Detect number of used partitions */ for (i = 0; i < MYLO_MAX_PARTITIONS; i++) { part = &tab->partitions[i]; if (le16_to_cpu(part->type) == PARTITION_TYPE_FREE) continue; num_parts++; } mtd_parts = kzalloc((num_parts * sizeof(*mtd_part) + num_parts * PART_NAME_LEN), GFP_KERNEL); if (!mtd_parts) { ret = -ENOMEM; goto out_free_buf; } mtd_part = mtd_parts; names = (char *)&mtd_parts[num_parts]; strncpy(names, "myloader", PART_NAME_LEN); mtd_part->name = names; mtd_part->offset = 0; mtd_part->size = offset; mtd_part->mask_flags = MTD_WRITEABLE; mtd_part++; names += PART_NAME_LEN; strncpy(names, "partition_table", PART_NAME_LEN); mtd_part->name = names; mtd_part->offset = offset; mtd_part->size = blocklen; mtd_part->mask_flags = MTD_WRITEABLE; mtd_part++; names += PART_NAME_LEN; for (i = 0; i < MYLO_MAX_PARTITIONS; i++) { part = &tab->partitions[i]; if (le16_to_cpu(part->type) == PARTITION_TYPE_FREE) continue; if ((buf->names[i][0]) && (buf->names[i][0] != '\xff')) strncpy(names, buf->names[i], PART_NAME_LEN); else snprintf(names, PART_NAME_LEN, "partition%d", i); mtd_part->offset = le32_to_cpu(part->addr); mtd_part->size = le32_to_cpu(part->size); mtd_part->name = names; mtd_part++; names += PART_NAME_LEN; } *pparts = mtd_parts; ret = num_parts; out_free_buf: vfree(buf); out: return ret; }
static int rps_sock_flow_sysctl(ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { unsigned int orig_size, size; int ret, i; ctl_table tmp = { .data = &size, .maxlen = sizeof(size), .mode = table->mode }; struct rps_sock_flow_table *orig_sock_table, *sock_table; static DEFINE_MUTEX(sock_flow_mutex); mutex_lock(&sock_flow_mutex); orig_sock_table = rcu_dereference_protected(rps_sock_flow_table, lockdep_is_held(&sock_flow_mutex)); size = orig_size = orig_sock_table ? orig_sock_table->mask + 1 : 0; ret = proc_dointvec(&tmp, write, buffer, lenp, ppos); if (write) { if (size) { if (size > 1<<30) { /* Enforce limit to prevent overflow */ mutex_unlock(&sock_flow_mutex); return -EINVAL; } size = roundup_pow_of_two(size); if (size != orig_size) { sock_table = vmalloc(RPS_SOCK_FLOW_TABLE_SIZE(size)); if (!sock_table) { mutex_unlock(&sock_flow_mutex); return -ENOMEM; } sock_table->mask = size - 1; } else sock_table = orig_sock_table; for (i = 0; i < size; i++) sock_table->ents[i] = RPS_NO_CPU; } else sock_table = NULL; if (sock_table != orig_sock_table) { rcu_assign_pointer(rps_sock_flow_table, sock_table); if (sock_table) static_key_slow_inc(&rps_needed); if (orig_sock_table) { static_key_slow_dec(&rps_needed); synchronize_rcu(); vfree(orig_sock_table); } } } mutex_unlock(&sock_flow_mutex); return ret; } #endif /* CONFIG_RPS */ static struct ctl_table net_core_table[] = { #ifdef CONFIG_NET { .procname = "wmem_max", .data = &sysctl_wmem_max, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &min_sndbuf, }, { .procname = "rmem_max",
/** * compare_lebs - find out which logical eraseblock is newer. * @ubi: UBI device description object * @seb: first logical eraseblock to compare * @pnum: physical eraseblock number of the second logical eraseblock to * compare * @vid_hdr: volume identifier header of the second logical eraseblock * * This function compares 2 copies of a LEB and informs which one is newer. In * case of success this function returns a positive value, in case of failure, a * negative error code is returned. The success return codes use the following * bits: * o bit 0 is cleared: the first PEB (described by @seb) is newer then the * second PEB (described by @pnum and @vid_hdr); * o bit 0 is set: the second PEB is newer; * o bit 1 is cleared: no bit-flips were detected in the newer LEB; * o bit 1 is set: bit-flips were detected in the newer LEB; * o bit 2 is cleared: the older LEB is not corrupted; * o bit 2 is set: the older LEB is corrupted. */ static int compare_lebs(struct ubi_device *ubi, const struct ubi_scan_leb *seb, int pnum, const struct ubi_vid_hdr *vid_hdr) { void *buf; int len, err, second_is_newer, bitflips = 0, corrupted = 0; uint32_t data_crc, crc; struct ubi_vid_hdr *vh = NULL; unsigned long long sqnum2 = be64_to_cpu(vid_hdr->sqnum); if (seb->sqnum == 0 && sqnum2 == 0) { long long abs, v1 = seb->leb_ver, v2 = be32_to_cpu(vid_hdr->leb_ver); /* * UBI constantly increases the logical eraseblock version * number and it can overflow. Thus, we have to bear in mind * that versions that are close to %0xFFFFFFFF are less then * versions that are close to %0. * * The UBI WL unit guarantees that the number of pending tasks * is not greater then %0x7FFFFFFF. So, if the difference * between any two versions is greater or equivalent to * %0x7FFFFFFF, there was an overflow and the logical * eraseblock with lower version is actually newer then the one * with higher version. * * FIXME: but this is anyway obsolete and will be removed at * some point. */ dbg_bld("using old crappy leb_ver stuff"); if (v1 == v2) { ubi_err("PEB %d and PEB %d have the same version %ld", seb->pnum, pnum, (long)v1); return -EINVAL; } abs = v1 - v2; if (abs < 0) abs = -abs; if (abs < 0x7FFFFFFF) /* Non-overflow situation */ second_is_newer = (v2 > v1); else second_is_newer = (v2 < v1); } else /* Obviously the LEB with lower sequence counter is older */ second_is_newer = sqnum2 > seb->sqnum; /* * Now we know which copy is newer. If the copy flag of the PEB with * newer version is not set, then we just return, otherwise we have to * check data CRC. For the second PEB we already have the VID header, * for the first one - we'll need to re-read it from flash. * * FIXME: this may be optimized so that we wouldn't read twice. */ if (second_is_newer) { if (!vid_hdr->copy_flag) { /* It is not a copy, so it is newer */ dbg_bld("second PEB %d is newer, copy_flag is unset", pnum); return 1; } } else { pnum = seb->pnum; vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL); if (!vh) return -ENOMEM; err = ubi_io_read_vid_hdr(ubi, pnum, vh, 0); if (err) { if (err == UBI_IO_BITFLIPS) bitflips = 1; else { dbg_err("VID of PEB %d header is bad, but it " "was OK earlier", pnum); if (err > 0) err = -EIO; goto out_free_vidh; } } if (!vh->copy_flag) { /* It is not a copy, so it is newer */ dbg_bld("first PEB %d is newer, copy_flag is unset", pnum); err = bitflips << 1; goto out_free_vidh; } vid_hdr = vh; } /* Read the data of the copy and check the CRC */ len = be32_to_cpu(vid_hdr->data_size); buf = vmalloc(len); if (!buf) { err = -ENOMEM; goto out_free_vidh; } err = ubi_io_read_data(ubi, buf, pnum, 0, len); if (err && err != UBI_IO_BITFLIPS) goto out_free_buf; data_crc = be32_to_cpu(vid_hdr->data_crc); crc = crc32(UBI_CRC32_INIT, buf, len); if (crc != data_crc) { dbg_bld("PEB %d CRC error: calculated %#08x, must be %#08x", pnum, crc, data_crc); corrupted = 1; bitflips = 0; second_is_newer = !second_is_newer; } else { dbg_bld("PEB %d CRC is OK", pnum); bitflips = !!err; } vfree(buf); ubi_free_vid_hdr(ubi, vh); if (second_is_newer) dbg_bld("second PEB %d is newer, copy_flag is set", pnum); else dbg_bld("first PEB %d is newer, copy_flag is set", pnum); return second_is_newer | (bitflips << 1) | (corrupted << 2); out_free_buf: vfree(buf); out_free_vidh: ubi_free_vid_hdr(ubi, vh); return err; }
static int _proc_read_global(char *page, char **start, off_t pos, int count, int *eof, void *data) { #define LVM_PROC_BUF ( i == 0 ? dummy_buf : &buf[sz]) int c, i, l, p, v, vg_counter, pv_counter, lv_counter, lv_open_counter, lv_open_total, pe_t_bytes, hash_table_bytes, lv_block_exception_t_bytes, seconds; static off_t sz; off_t sz_last; static char *buf = NULL; static char dummy_buf[160]; /* sized for 2 lines */ vg_t *vg_ptr; lv_t *lv_ptr; pv_t *pv_ptr; #ifdef DEBUG_LVM_PROC_GET_INFO printk(KERN_DEBUG "%s - lvm_proc_get_global_info CALLED pos: %lu count: %d\n", lvm_name, pos, count); #endif if(pos != 0 && buf != NULL) goto out; sz_last = vg_counter = pv_counter = lv_counter = lv_open_counter = \ lv_open_total = pe_t_bytes = hash_table_bytes = \ lv_block_exception_t_bytes = 0; /* get some statistics */ for (v = 0; v < ABS_MAX_VG; v++) { if ((vg_ptr = vg[v]) != NULL) { vg_counter++; pv_counter += vg_ptr->pv_cur; lv_counter += vg_ptr->lv_cur; if (vg_ptr->lv_cur > 0) { for (l = 0; l < vg[v]->lv_max; l++) { if ((lv_ptr = vg_ptr->lv[l]) != NULL) { pe_t_bytes += lv_ptr->lv_allocated_le; hash_table_bytes += lv_ptr->lv_snapshot_hash_table_size; if (lv_ptr->lv_block_exception != NULL) lv_block_exception_t_bytes += lv_ptr->lv_remap_end; if (lv_ptr->lv_open > 0) { lv_open_counter++; lv_open_total += lv_ptr->lv_open; } } } } } } pe_t_bytes *= sizeof(pe_t); lv_block_exception_t_bytes *= sizeof(lv_block_exception_t); if (buf != NULL) { P_KFREE("%s -- vfree %d\n", lvm_name, __LINE__); lock_kernel(); vfree(buf); unlock_kernel(); buf = NULL; } /* 2 times: first to get size to allocate buffer, 2nd to fill the malloced buffer */ for (i = 0; i < 2; i++) { sz = 0; sz += sprintf(LVM_PROC_BUF, "LVM " #ifdef MODULE "module" #else "driver" #endif " %s\n\n" "Total: %d VG%s %d PV%s %d LV%s ", lvm_version, vg_counter, vg_counter == 1 ? "" : "s", pv_counter, pv_counter == 1 ? "" : "s", lv_counter, lv_counter == 1 ? "" : "s"); sz += sprintf(LVM_PROC_BUF, "(%d LV%s open", lv_open_counter, lv_open_counter == 1 ? "" : "s"); if (lv_open_total > 0) sz += sprintf(LVM_PROC_BUF, " %d times)\n", lv_open_total); else sz += sprintf(LVM_PROC_BUF, ")"); sz += sprintf(LVM_PROC_BUF, "\nGlobal: %lu bytes malloced IOP version: %d ", vg_counter * sizeof(vg_t) + pv_counter * sizeof(pv_t) + lv_counter * sizeof(lv_t) + pe_t_bytes + hash_table_bytes + lv_block_exception_t_bytes + sz_last, lvm_iop_version); seconds = CURRENT_TIME - loadtime; if (seconds < 0) loadtime = CURRENT_TIME + seconds; if (seconds / 86400 > 0) { sz += sprintf(LVM_PROC_BUF, "%d day%s ", seconds / 86400, seconds / 86400 == 0 || seconds / 86400 > 1 ? "s" : ""); } sz += sprintf(LVM_PROC_BUF, "%d:%02d:%02d active\n", (seconds % 86400) / 3600, (seconds % 3600) / 60, seconds % 60); if (vg_counter > 0) { for (v = 0; v < ABS_MAX_VG; v++) { /* volume group */ if ((vg_ptr = vg[v]) != NULL) { sz += _vg_info(vg_ptr, LVM_PROC_BUF); /* physical volumes */ sz += sprintf(LVM_PROC_BUF, "\n PV%s ", vg_ptr->pv_cur == 1 ? ": " : "s:"); c = 0; for (p = 0; p < vg_ptr->pv_max; p++) { if ((pv_ptr = vg_ptr->pv[p]) != NULL) { sz += _pv_info(pv_ptr, LVM_PROC_BUF); c++; if (c < vg_ptr->pv_cur) sz += sprintf(LVM_PROC_BUF, "\n "); } } /* logical volumes */ sz += sprintf(LVM_PROC_BUF, "\n LV%s ", vg_ptr->lv_cur == 1 ? ": " : "s:"); c = 0; for (l = 0; l < vg_ptr->lv_max; l++) { if ((lv_ptr = vg_ptr->lv[l]) != NULL) { sz += _lv_info(vg_ptr, lv_ptr, LVM_PROC_BUF); c++; if (c < vg_ptr->lv_cur) sz += sprintf(LVM_PROC_BUF, "\n "); } } if (vg_ptr->lv_cur == 0) sz += sprintf(LVM_PROC_BUF, "none"); sz += sprintf(LVM_PROC_BUF, "\n"); } } } if (buf == NULL) { lock_kernel(); buf = vmalloc(sz); unlock_kernel(); if (buf == NULL) { sz = 0; return sprintf(page, "%s - vmalloc error at line %d\n", lvm_name, __LINE__); } } sz_last = sz; } out: if (pos > sz - 1) { lock_kernel(); vfree(buf); unlock_kernel(); buf = NULL; return 0; } *start = &buf[pos]; if (sz - pos < count) return sz - pos; else return count; #undef LVM_PROC_BUF }
void mdp_config_vsync(struct msm_fb_data_type *mfd) { /* vsync on primary lcd only for now */ if ((mfd->dest != DISPLAY_LCD) || (mfd->panel_info.pdest != DISPLAY_1) || (!vsync_mode)) { goto err_handle; } vsync_clk_status = 0; if (mfd->panel_info.lcd.vsync_enable) { mfd->total_porch_lines = mfd->panel_info.lcd.v_back_porch + mfd->panel_info.lcd.v_front_porch + mfd->panel_info.lcd.v_pulse_width; mfd->total_lcd_lines = mfd->panel_info.yres + mfd->total_porch_lines; mfd->lcd_ref_usec_time = 100000000 / mfd->panel_info.lcd.refx100; mfd->vsync_handler_pending = FALSE; mfd->last_vsync_timetick.tv64 = 0; #ifdef MDP_HW_VSYNC if (mdp_vsync_clk == NULL) mdp_vsync_clk = clk_get(NULL, "mdp_vsync_clk"); if (IS_ERR(mdp_vsync_clk)) { printk(KERN_ERR "error: can't get mdp_vsync_clk!\n"); mfd->use_mdp_vsync = 0; } else mfd->use_mdp_vsync = 1; if (mfd->use_mdp_vsync) { uint32 vsync_cnt_cfg_dem; uint32 mdp_vsync_clk_speed_hz; mdp_vsync_clk_speed_hz = clk_get_rate(mdp_vsync_clk); if (mdp_vsync_clk_speed_hz == 0) { mfd->use_mdp_vsync = 0; } else { /* * Do this calculation in 2 steps for * rounding uint32 properly. */ vsync_cnt_cfg_dem = (mfd->panel_info.lcd.refx100 * mfd->total_lcd_lines) / 100; vsync_cnt_cfg = (mdp_vsync_clk_speed_hz) / vsync_cnt_cfg_dem; mdp_vsync_cfg_regs(mfd, TRUE); } } #else mfd->use_mdp_vsync = 0; hrtimer_init(&mfd->dma_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); mfd->dma_hrtimer.function = mdp_dma2_vsync_hrtimer_handler; mfd->vsync_width_boundary = vmalloc(mfd->panel_info.xres * 4); #endif #ifdef CONFIG_FB_MSM_MDDI mfd->channel_irq = 0; if (mfd->panel_info.lcd.hw_vsync_mode) { u32 vsync_gpio = mfd->vsync_gpio; u32 ret; if (vsync_gpio == -1) { MSM_FB_INFO("vsync_gpio not defined!\n"); goto err_handle; } ret = gpio_tlmm_config(GPIO_CFG (vsync_gpio, (mfd->use_mdp_vsync) ? 1 : 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), GPIO_CFG_ENABLE); if (ret) goto err_handle; /* * if use_mdp_vsync, then no interrupt need since * mdp_vsync is feed directly to mdp to reset the * write pointer counter. therefore no irq_handler * need to reset write pointer counter. */ if (!mfd->use_mdp_vsync) { mfd->channel_irq = MSM_GPIO_TO_INT(vsync_gpio); if (request_irq (mfd->channel_irq, &mdp_hw_vsync_handler_proxy, IRQF_TRIGGER_FALLING, "VSYNC_GPIO", (void *)mfd)) { MSM_FB_INFO ("irq=%d failed! vsync_gpio=%d\n", mfd->channel_irq, vsync_gpio); goto err_handle; } } } #endif mdp_hw_vsync_clk_enable(mfd); mdp_set_vsync((unsigned long)mfd); } return; err_handle: if (mfd->vsync_width_boundary) vfree(mfd->vsync_width_boundary); mfd->panel_info.lcd.vsync_enable = FALSE; printk(KERN_ERR "%s: failed!\n", __func__); }
int DmxDevFilterStart(dmxdev_filter_t *dmxdevfilter) { dmxdev_t *dmxdev=dmxdevfilter->dev; void *mem; int ret, i; if (dmxdevfilter->state<DMXDEV_STATE_SET) return -EINVAL; if (dmxdevfilter->state>=DMXDEV_STATE_GO) DmxDevFilterStop(dmxdevfilter); mem=dmxdevfilter->buffer.data; if (!mem) { mem=vmalloc(dmxdevfilter->buffer.size); spin_lock_irq(&dmxdevfilter->dev->lock); dmxdevfilter->buffer.data=mem; spin_unlock_irq(&dmxdevfilter->dev->lock); if (!dmxdevfilter->buffer.data) return -ENOMEM; } switch (dmxdevfilter->type) { case DMXDEV_TYPE_SEC: { struct dmxSctFilterParams *para=&dmxdevfilter->params.sec; dmx_section_filter_t **secfilter=&dmxdevfilter->filter.sec; dmx_section_feed_t **secfeed=&dmxdevfilter->feed.sec; *secfilter=0; *secfeed=0; /* find active filter/feed with same PID */ for (i=0; i<dmxdev->filternum; i++) if (dmxdev->filter[i].state>=DMXDEV_STATE_GO && dmxdev->filter[i].pid==para->pid) { if (dmxdev->filter[i].type!=DMXDEV_TYPE_SEC) return -EBUSY; *secfeed=dmxdev->filter[i].feed.sec; break; } /* if no feed found, try to allocate new one */ if (!*secfeed) { ret=dmxdev->demux-> allocate_section_feed(dmxdev->demux, secfeed, DmxDevSectionCallback); if (ret<0) { printk ("could not alloc feed\n"); return ret; } ret=(*secfeed)->set(*secfeed, para->pid, 32768, 0, (para->flags & DMX_CHECK_CRC) ? 1 : 0); if (ret<0) { printk ("could not set feed\n"); DmxDevFeedRestart(dmxdevfilter); return ret; } } else DmxDevFeedStop(dmxdevfilter); ret=(*secfeed)->allocate_filter(*secfeed, secfilter); if (ret<0) { DmxDevFeedRestart(dmxdevfilter); dmxdevfilter->feed.sec-> start_filtering(*secfeed); dprintk ("could not get filter\n"); return ret; } (*secfilter)->priv=(void *) dmxdevfilter; memcpy(&((*secfilter)->filter_value[3]), &(para->filter.filter[1]), DMX_FILTER_SIZE-1); memcpy(&(*secfilter)->filter_mask[3], ¶->filter.mask[1], DMX_FILTER_SIZE-1); (*secfilter)->filter_value[0]=para->filter.filter[0]; (*secfilter)->filter_mask[0]=para->filter.mask[0]; (*secfilter)->filter_mask[1]=0; (*secfilter)->filter_mask[2]=0; dmxdevfilter->todo=0; dmxdevfilter->feed.sec-> start_filtering(dmxdevfilter->feed.sec); DmxDevFilterTimer(dmxdevfilter); break; } case DMXDEV_TYPE_PES: { struct timespec timeout = {0 }; struct dmxPesFilterParams *para=&dmxdevfilter->params.pes; dmxOutput_t otype; int ret; int ts_type; dmx_ts_pes_t ts_pes; dmx_ts_feed_t **tsfeed=&dmxdevfilter->feed.ts; dmxdevfilter->feed.ts=0; otype=para->output; ts_pes=(dmx_ts_pes_t) para->pesType; if (ts_pes<DMX_PES_OTHER) ts_type=TS_DECODER; else ts_type=0; if (otype==DMX_OUT_DECODER && ts_pes==DMX_PES_PCR && para->input==DMX_IN_FRONTEND) { dmxdev->demux->set_pcr_pid(para->pid); break; } if (otype==DMX_OUT_TS_TAP || otype==DMX_OUT_TS_NET) ts_type|=TS_PACKET; if (otype==DMX_OUT_TAP || otype==DMX_OUT_ES_NET) ts_type|=TS_PAYLOAD_ONLY|TS_PACKET; if (otype==DMX_OUT_ES_NET || otype==DMX_OUT_TS_NET) { struct sockaddr_in saddr; ret = sock_create(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &dmxdevfilter->s); if (ret < 0) return ret; memset(saddr.__pad,0,sizeof(saddr.__pad)); saddr.sin_family=AF_INET; saddr.sin_port=para->port; saddr.sin_addr.s_addr=para->ip; ret = dmxdevfilter->s->ops-> connect(dmxdevfilter->s, (struct sockaddr *) &saddr, sizeof(saddr),0); if (ret < 0) { sock_release(dmxdevfilter->s); dmxdevfilter->s=NULL; return ret; } } if (otype==DMX_OUT_TS_NET && dmxdev->dvr_buffer.data==NULL) { dmxdev->dvr_buffer.size=DVR_BUFFER_SIZE; dmxdev->dvr_buffer.data=vmalloc(DVR_BUFFER_SIZE); } ret=dmxdev->demux->allocate_ts_feed(dmxdev->demux, tsfeed, DmxDevTSCallback, ts_type, ts_pes); if (ret<0) return ret; (*tsfeed)->priv=(void *) dmxdevfilter; ret=(*tsfeed)->set(*tsfeed, para->pid, 188, 32768, 0, timeout); if (ret<0) { dmxdev->demux-> release_ts_feed(dmxdev->demux, *tsfeed); return ret; } if ((*tsfeed)->set_type) ret=(*tsfeed)->set_type(*tsfeed, ts_type, ts_pes); if (ret<0) { dmxdev->demux-> release_ts_feed(dmxdev->demux, *tsfeed); return ret; } dmxdevfilter->feed.ts-> start_filtering(dmxdevfilter->feed.ts); break; } default: return -EINVAL; } DmxDevFilterStateSet(dmxdevfilter, DMXDEV_STATE_GO); return 0; }
int linect_allocate_depth_buffers(struct usb_linect *dev) { int i; void *kbuf; LNT_DEBUG("Allocate video buffers\n"); if (dev == NULL) return -ENXIO; // Allocate frame buffer structure if (dev->cam->framebuf_depth == NULL) { kbuf = kzalloc(default_nbrframebuf * sizeof(struct linect_frame_buf), GFP_KERNEL); if (kbuf == NULL) { LNT_ERROR("Failed to allocate frame buffer structure\n"); return -ENOMEM; } dev->cam->framebuf_depth = kbuf; } // Create frame buffers and make circular ring for (i=0; i<default_nbrframebuf; i++) { if (dev->cam->framebuf_depth[i].data == NULL) { kbuf = vmalloc(LNT_FRAME_SIZE); if (kbuf == NULL) { LNT_ERROR("Failed to allocate frame buffer %d\n", i); return -ENOMEM; } dev->cam->framebuf_depth[i].data = kbuf; memset(kbuf, 0, LNT_FRAME_SIZE); } } // Allocate image buffer; double buffer for mmap() kbuf = linect_rvmalloc(dev->cam->nbuffers_depth * dev->cam->len_per_image_depth); if (kbuf == NULL) { LNT_ERROR("Failed to allocate image buffer(s). needed (%d)\n", dev->cam->nbuffers_depth * dev->cam->len_per_image_depth); return -ENOMEM; } dev->cam->image_data_depth = kbuf; for (i = 0; i < dev->cam->nbuffers_depth; i++) { dev->cam->images_depth[i].offset = i * dev->cam->len_per_image_depth; dev->cam->images_depth[i].vma_use_count = 0; } for (; i < LNT_MAX_IMAGES; i++) dev->cam->images_depth[i].offset = 0; kbuf = NULL; kbuf = linect_rvmalloc(640*480*2); if (kbuf == NULL) { LNT_ERROR("Failed to allocate image temp buffer. needed (%d)\n", 640*480*2); return -ENOMEM; } dev->cam->image_tmp = kbuf; /*kbuf = linect_rvmalloc(4096); if (kbuf == NULL) { LNT_ERROR("Failed to allocate depth gamma buffer. needed (%d)\n", 4096); return -ENOMEM; } dev->cam->depth_gamma = kbuf; // Init gamma for (i=0; i<2048; i++) { v = i/2048.0; v = v*v*v* 6; dev->cam->depth_gamma[i] = v*6*256; }*/ return 0; }
static ssize_t ncp_file_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct dentry *dentry = file->f_path.dentry; struct inode *inode = dentry->d_inode; size_t already_read = 0; off_t pos; size_t bufsize; int error; void* freepage; size_t freelen; ncp_dbg(1, "enter %pd2\n", dentry); pos = *ppos; if ((ssize_t) count < 0) { return -EINVAL; } if (!count) return 0; if (pos > inode->i_sb->s_maxbytes) return 0; if (pos + count > inode->i_sb->s_maxbytes) { count = inode->i_sb->s_maxbytes - pos; } error = ncp_make_open(inode, O_RDONLY); if (error) { ncp_dbg(1, "open failed, error=%d\n", error); return error; } bufsize = NCP_SERVER(inode)->buffer_size; error = -EIO; freelen = ncp_read_bounce_size(bufsize); freepage = vmalloc(freelen); if (!freepage) goto outrel; error = 0; /* First read in as much as possible for each bufsize. */ while (already_read < count) { int read_this_time; size_t to_read = min_t(unsigned int, bufsize - (pos % bufsize), count - already_read); error = ncp_read_bounce(NCP_SERVER(inode), NCP_FINFO(inode)->file_handle, pos, to_read, buf, &read_this_time, freepage, freelen); if (error) { error = -EIO; /* NW errno -> Linux errno */ break; } pos += read_this_time; buf += read_this_time; already_read += read_this_time; if (read_this_time != to_read) { break; } } vfree(freepage); *ppos = pos; file_accessed(file); ncp_dbg(1, "exit %pd2\n", dentry); outrel: ncp_inode_close(inode); return already_read ? already_read : error; }
int rtl8723ae_init_sw_vars( struct ieee80211_hw *hw ) { struct rtl_priv *rtlpriv = rtl_priv( hw ); struct rtl_pci *rtlpci = rtl_pcidev( rtl_pcipriv( hw ) ); struct rtl_hal *rtlhal = rtl_hal( rtl_priv( hw ) ); int err; rtl8723ae_bt_reg_init( hw ); rtlpriv->dm.dm_initialgain_enable = 1; rtlpriv->dm.dm_flag = 0; rtlpriv->dm.disable_framebursting = 0; rtlpriv->dm.thermalvalue = 0; rtlpci->transmit_config = CFENDFORM | BIT( 12 ) | BIT( 13 ); /* compatible 5G band 88ce just 2.4G band & smsp */ rtlpriv->rtlhal.current_bandtype = BAND_ON_2_4G; rtlpriv->rtlhal.bandset = BAND_ON_2_4G; rtlpriv->rtlhal.macphymode = SINGLEMAC_SINGLEPHY; rtlpci->receive_config = ( RCR_APPFCS | RCR_APP_MIC | RCR_APP_ICV | RCR_APP_PHYST_RXFF | RCR_HTC_LOC_CTRL | RCR_AMF | RCR_ACF | RCR_ADF | RCR_AICV | RCR_AB | RCR_AM | RCR_APM | 0 ); rtlpci->irq_mask[0] = ( u32 ) ( PHIMR_ROK | PHIMR_RDU | PHIMR_VODOK | PHIMR_VIDOK | PHIMR_BEDOK | PHIMR_BKDOK | PHIMR_MGNTDOK | PHIMR_HIGHDOK | PHIMR_C2HCMD | PHIMR_HISRE_IND | PHIMR_TSF_BIT32_TOGGLE | PHIMR_TXBCNOK | PHIMR_PSTIMEOUT | 0 ); rtlpci->irq_mask[1] = ( u32 )( PHIMR_RXFOVW | 0 ); /* for debug level */ rtlpriv->dbg.global_debuglevel = rtlpriv->cfg->mod_params->debug; /* for LPS & IPS */ rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps; rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps; rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps; rtlpriv->psc.reg_fwctrl_lps = 3; rtlpriv->psc.reg_max_lps_awakeintvl = 5; /* for ASPM, you can close aspm through * set const_support_pciaspm = 0 */ rtl8723ae_init_aspm_vars( hw ); if ( rtlpriv->psc.reg_fwctrl_lps == 1 ) rtlpriv->psc.fwctrl_psmode = FW_PS_MIN_MODE; else if ( rtlpriv->psc.reg_fwctrl_lps == 2 ) rtlpriv->psc.fwctrl_psmode = FW_PS_MAX_MODE; else if ( rtlpriv->psc.reg_fwctrl_lps == 3 ) rtlpriv->psc.fwctrl_psmode = FW_PS_DTIM_MODE; /* for firmware buf */ rtlpriv->rtlhal.pfirmware = vmalloc( 0x6000 ); if ( !rtlpriv->rtlhal.pfirmware ) { RT_TRACE( rtlpriv, COMP_ERR, DBG_EMERG, "Can't alloc buffer for fw.\n" ); return 1; } if ( IS_VENDOR_8723_A_CUT( rtlhal->version ) ) rtlpriv->cfg->fw_name = "rtlwifi/rtl8723fw.bin"; else if ( IS_81xxC_VENDOR_UMC_B_CUT( rtlhal->version ) ) rtlpriv->cfg->fw_name = "rtlwifi/rtl8723fw_B.bin"; rtlpriv->max_fw_size = 0x6000; pr_info( "Using firmware %s\n", rtlpriv->cfg->fw_name ); err = request_firmware_nowait( THIS_MODULE, 1, rtlpriv->cfg->fw_name, rtlpriv->io.dev, GFP_KERNEL, hw, rtl_fw_cb ); if ( err ) { RT_TRACE( rtlpriv, COMP_ERR, DBG_EMERG, "Failed to request firmware!\n" ); return 1; } return 0; }
static ssize_t ncp_file_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { struct dentry *dentry = file->f_path.dentry; struct inode *inode = dentry->d_inode; size_t already_written = 0; off_t pos; size_t bufsize; int errno; void* bouncebuffer; ncp_dbg(1, "enter %pd2\n", dentry); if ((ssize_t) count < 0) return -EINVAL; pos = *ppos; if (file->f_flags & O_APPEND) { pos = i_size_read(inode); } if (pos + count > MAX_NON_LFS && !(file->f_flags&O_LARGEFILE)) { if (pos >= MAX_NON_LFS) { return -EFBIG; } if (count > MAX_NON_LFS - (u32)pos) { count = MAX_NON_LFS - (u32)pos; } } if (pos >= inode->i_sb->s_maxbytes) { if (count || pos > inode->i_sb->s_maxbytes) { return -EFBIG; } } if (pos + count > inode->i_sb->s_maxbytes) { count = inode->i_sb->s_maxbytes - pos; } if (!count) return 0; errno = ncp_make_open(inode, O_WRONLY); if (errno) { ncp_dbg(1, "open failed, error=%d\n", errno); return errno; } bufsize = NCP_SERVER(inode)->buffer_size; already_written = 0; errno = file_update_time(file); if (errno) goto outrel; bouncebuffer = vmalloc(bufsize); if (!bouncebuffer) { errno = -EIO; /* -ENOMEM */ goto outrel; } while (already_written < count) { int written_this_time; size_t to_write = min_t(unsigned int, bufsize - (pos % bufsize), count - already_written); if (copy_from_user(bouncebuffer, buf, to_write)) { errno = -EFAULT; break; } if (ncp_write_kernel(NCP_SERVER(inode), NCP_FINFO(inode)->file_handle, pos, to_write, bouncebuffer, &written_this_time) != 0) { errno = -EIO; break; } pos += written_this_time; buf += written_this_time; already_written += written_this_time; if (written_this_time != to_write) { break; } } vfree(bouncebuffer); *ppos = pos; if (pos > i_size_read(inode)) { mutex_lock(&inode->i_mutex); if (pos > i_size_read(inode)) i_size_write(inode, pos); mutex_unlock(&inode->i_mutex); } ncp_dbg(1, "exit %pd2\n", dentry); outrel: ncp_inode_close(inode); return already_written ? already_written : errno; }
static int fm10k_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) { struct fm10k_intfc *interface = netdev_priv(netdev); struct fm10k_ring *temp_ring; int i, err = 0; u32 new_rx_count, new_tx_count; if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) return -EINVAL; new_tx_count = clamp_t(u32, ring->tx_pending, FM10K_MIN_TXD, FM10K_MAX_TXD); new_tx_count = ALIGN(new_tx_count, FM10K_REQ_TX_DESCRIPTOR_MULTIPLE); new_rx_count = clamp_t(u32, ring->rx_pending, FM10K_MIN_RXD, FM10K_MAX_RXD); new_rx_count = ALIGN(new_rx_count, FM10K_REQ_RX_DESCRIPTOR_MULTIPLE); if ((new_tx_count == interface->tx_ring_count) && (new_rx_count == interface->rx_ring_count)) { /* nothing to do */ return 0; } while (test_and_set_bit(__FM10K_RESETTING, &interface->state)) usleep_range(1000, 2000); if (!netif_running(interface->netdev)) { for (i = 0; i < interface->num_tx_queues; i++) interface->tx_ring[i]->count = new_tx_count; for (i = 0; i < interface->num_rx_queues; i++) interface->rx_ring[i]->count = new_rx_count; interface->tx_ring_count = new_tx_count; interface->rx_ring_count = new_rx_count; goto clear_reset; } /* allocate temporary buffer to store rings in */ i = max_t(int, interface->num_tx_queues, interface->num_rx_queues); temp_ring = vmalloc(i * sizeof(struct fm10k_ring)); if (!temp_ring) { err = -ENOMEM; goto clear_reset; } fm10k_down(interface); /* Setup new Tx resources and free the old Tx resources in that order. * We can then assign the new resources to the rings via a memcpy. * The advantage to this approach is that we are guaranteed to still * have resources even in the case of an allocation failure. */ if (new_tx_count != interface->tx_ring_count) { for (i = 0; i < interface->num_tx_queues; i++) { memcpy(&temp_ring[i], interface->tx_ring[i], sizeof(struct fm10k_ring)); temp_ring[i].count = new_tx_count; err = fm10k_setup_tx_resources(&temp_ring[i]); if (err) { while (i) { i--; fm10k_free_tx_resources(&temp_ring[i]); } goto err_setup; } } for (i = 0; i < interface->num_tx_queues; i++) { fm10k_free_tx_resources(interface->tx_ring[i]); memcpy(interface->tx_ring[i], &temp_ring[i], sizeof(struct fm10k_ring)); } interface->tx_ring_count = new_tx_count; } /* Repeat the process for the Rx rings if needed */ if (new_rx_count != interface->rx_ring_count) { for (i = 0; i < interface->num_rx_queues; i++) { memcpy(&temp_ring[i], interface->rx_ring[i], sizeof(struct fm10k_ring)); temp_ring[i].count = new_rx_count; err = fm10k_setup_rx_resources(&temp_ring[i]); if (err) { while (i) { i--; fm10k_free_rx_resources(&temp_ring[i]); } goto err_setup; } } for (i = 0; i < interface->num_rx_queues; i++) { fm10k_free_rx_resources(interface->rx_ring[i]); memcpy(interface->rx_ring[i], &temp_ring[i], sizeof(struct fm10k_ring)); } interface->rx_ring_count = new_rx_count; } err_setup: fm10k_up(interface); vfree(temp_ring); clear_reset: clear_bit(__FM10K_RESETTING, &interface->state); return err; }
/* * The XIP kernel text is mapped in the module area for modules and * some other stuff to work without any indirect relocations. * MODULES_VADDR is redefined here and not in asm/memory.h to avoid * recompiling the whole kernel when CONFIG_XIP_KERNEL is turned on/off. */ #undef MODULES_VADDR #define MODULES_VADDR (((unsigned long)_etext + ~PGDIR_MASK) & PGDIR_MASK) #endif #ifdef CONFIG_MMU void *module_alloc(unsigned long size) { struct vm_struct *area; size = PAGE_ALIGN(size); if (!size) return NULL; area = __get_vm_area(size, VM_ALLOC, MODULES_VADDR, MODULES_END); if (!area) return NULL; return __vmalloc_area(area, GFP_KERNEL, PAGE_KERNEL_EXEC); } #else /* CONFIG_MMU */ void *module_alloc(unsigned long size) { return size == 0 ? NULL : vmalloc(size); }
static int pblk_lines_alloc_metadata(struct pblk *pblk) { struct pblk_line_mgmt *l_mg = &pblk->l_mg; struct pblk_line_meta *lm = &pblk->lm; int i; /* smeta is always small enough to fit on a kmalloc memory allocation, * emeta depends on the number of LUNs allocated to the pblk instance */ for (i = 0; i < PBLK_DATA_LINES; i++) { l_mg->sline_meta[i] = kmalloc(lm->smeta_len, GFP_KERNEL); if (!l_mg->sline_meta[i]) goto fail_free_smeta; } /* emeta allocates three different buffers for managing metadata with * in-memory and in-media layouts */ for (i = 0; i < PBLK_DATA_LINES; i++) { struct pblk_emeta *emeta; emeta = kmalloc(sizeof(struct pblk_emeta), GFP_KERNEL); if (!emeta) goto fail_free_emeta; if (lm->emeta_len[0] > KMALLOC_MAX_CACHE_SIZE) { l_mg->emeta_alloc_type = PBLK_VMALLOC_META; emeta->buf = vmalloc(lm->emeta_len[0]); if (!emeta->buf) { kfree(emeta); goto fail_free_emeta; } emeta->nr_entries = lm->emeta_sec[0]; l_mg->eline_meta[i] = emeta; } else { l_mg->emeta_alloc_type = PBLK_KMALLOC_META; emeta->buf = kmalloc(lm->emeta_len[0], GFP_KERNEL); if (!emeta->buf) { kfree(emeta); goto fail_free_emeta; } emeta->nr_entries = lm->emeta_sec[0]; l_mg->eline_meta[i] = emeta; } } l_mg->vsc_list = kcalloc(l_mg->nr_lines, sizeof(__le32), GFP_KERNEL); if (!l_mg->vsc_list) goto fail_free_emeta; for (i = 0; i < l_mg->nr_lines; i++) l_mg->vsc_list[i] = cpu_to_le32(EMPTY_ENTRY); return 0; fail_free_emeta: while (--i >= 0) { vfree(l_mg->eline_meta[i]->buf); kfree(l_mg->eline_meta[i]); } fail_free_smeta: for (i = 0; i < PBLK_DATA_LINES; i++) kfree(l_mg->sline_meta[i]); return -ENOMEM; }
/** * allocate a frame buffer so the encoded bits can be copied to * * @param1 * * @return * * @remarks * */ static VC03_FrameBuffer_t *allocateFrameBuffer_cb( unsigned int stream_Num, int length_in_bytes ) { (void) stream_Num; char * ptr = vmalloc( length_in_bytes ); return( VC03_FrameBuffer_t *)ptr; }
static int proc_fasttimer_read(char *buf, char **start, off_t offset, int len ,int *eof, void *data_unused) { unsigned long flags; int i = 0; int num_to_show; struct fasttime_t tv; struct fast_timer *t, *nextt; static char *bigbuf = NULL; static unsigned long used; if (!bigbuf && !(bigbuf = vmalloc(BIG_BUF_SIZE))) { used = 0; if (buf) buf[0] = '\0'; return 0; } if (!offset || !used) { do_gettimeofday_fast(&tv); used = 0; used += sprintf(bigbuf + used, "Fast timers added: %i\n", fast_timers_added); used += sprintf(bigbuf + used, "Fast timers started: %i\n", fast_timers_started); used += sprintf(bigbuf + used, "Fast timer interrupts: %i\n", fast_timer_ints); used += sprintf(bigbuf + used, "Fast timers expired: %i\n", fast_timers_expired); used += sprintf(bigbuf + used, "Fast timers deleted: %i\n", fast_timers_deleted); used += sprintf(bigbuf + used, "Fast timer running: %s\n", fast_timer_running ? "yes" : "no"); used += sprintf(bigbuf + used, "Current time: %lu.%06lu\n", (unsigned long)tv.tv_jiff, (unsigned long)tv.tv_usec); #ifdef FAST_TIMER_SANITY_CHECKS used += sprintf(bigbuf + used, "Sanity failed: %i\n", sanity_failed); #endif used += sprintf(bigbuf + used, "\n"); #ifdef DEBUG_LOG_INCLUDED { int end_i = debug_log_cnt; i = 0; if (debug_log_cnt_wrapped) { i = debug_log_cnt; } while ((i != end_i || (debug_log_cnt_wrapped && !used)) && used+100 < BIG_BUF_SIZE) { used += sprintf(bigbuf + used, debug_log_string[i], debug_log_value[i]); i = (i+1) % DEBUG_LOG_MAX; } } used += sprintf(bigbuf + used, "\n"); #endif num_to_show = (fast_timers_started < NUM_TIMER_STATS ? fast_timers_started: NUM_TIMER_STATS); used += sprintf(bigbuf + used, "Timers started: %i\n", fast_timers_started); for (i = 0; i < num_to_show && (used+100 < BIG_BUF_SIZE) ; i++) { int cur = (fast_timers_started - i - 1) % NUM_TIMER_STATS; #if 1 //ndef FAST_TIMER_LOG used += sprintf(bigbuf + used, "div: %i freq: %i delay: %i" "\n", timer_div_settings[cur], timer_freq_settings[cur], timer_delay_settings[cur] ); #endif #ifdef FAST_TIMER_LOG t = &timer_started_log[cur]; used += sprintf(bigbuf + used, "%-14s s: %6lu.%06lu e: %6lu.%06lu " "d: %6li us data: 0x%08lX" "\n", t->name, (unsigned long)t->tv_set.tv_jiff, (unsigned long)t->tv_set.tv_usec, (unsigned long)t->tv_expires.tv_jiff, (unsigned long)t->tv_expires.tv_usec, t->delay_us, t->data ); #endif } used += sprintf(bigbuf + used, "\n"); #ifdef FAST_TIMER_LOG num_to_show = (fast_timers_added < NUM_TIMER_STATS ? fast_timers_added: NUM_TIMER_STATS); used += sprintf(bigbuf + used, "Timers added: %i\n", fast_timers_added); for (i = 0; i < num_to_show && (used+100 < BIG_BUF_SIZE); i++) { t = &timer_added_log[(fast_timers_added - i - 1) % NUM_TIMER_STATS]; used += sprintf(bigbuf + used, "%-14s s: %6lu.%06lu e: %6lu.%06lu " "d: %6li us data: 0x%08lX" "\n", t->name, (unsigned long)t->tv_set.tv_jiff, (unsigned long)t->tv_set.tv_usec, (unsigned long)t->tv_expires.tv_jiff, (unsigned long)t->tv_expires.tv_usec, t->delay_us, t->data ); } used += sprintf(bigbuf + used, "\n"); num_to_show = (fast_timers_expired < NUM_TIMER_STATS ? fast_timers_expired: NUM_TIMER_STATS); used += sprintf(bigbuf + used, "Timers expired: %i\n", fast_timers_expired); for (i = 0; i < num_to_show && (used+100 < BIG_BUF_SIZE); i++) { t = &timer_expired_log[(fast_timers_expired - i - 1) % NUM_TIMER_STATS]; used += sprintf(bigbuf + used, "%-14s s: %6lu.%06lu e: %6lu.%06lu " "d: %6li us data: 0x%08lX" "\n", t->name, (unsigned long)t->tv_set.tv_jiff, (unsigned long)t->tv_set.tv_usec, (unsigned long)t->tv_expires.tv_jiff, (unsigned long)t->tv_expires.tv_usec, t->delay_us, t->data ); } used += sprintf(bigbuf + used, "\n"); #endif used += sprintf(bigbuf + used, "Active timers:\n"); local_irq_save(flags); t = fast_timer_list; while (t != NULL && (used+100 < BIG_BUF_SIZE)) { nextt = t->next; local_irq_restore(flags); used += sprintf(bigbuf + used, "%-14s s: %6lu.%06lu e: %6lu.%06lu " "d: %6li us data: 0x%08lX" /* " func: 0x%08lX" */ "\n", t->name, (unsigned long)t->tv_set.tv_jiff, (unsigned long)t->tv_set.tv_usec, (unsigned long)t->tv_expires.tv_jiff, (unsigned long)t->tv_expires.tv_usec, t->delay_us, t->data /* , t->function */ ); local_irq_save(flags); if (t->next != nextt) { printk(KERN_WARNING "timer removed!\n"); } t = nextt; } local_irq_restore(flags); } if (used - offset < len) { len = used - offset; } memcpy(buf, bigbuf + offset, len); *start = buf; *eof = 1; return len; }
/** * qib_get_eeprom_info- get the GUID et al. from the TSWI EEPROM device * @dd: the qlogic_ib device * * We have the capability to use the nguid field, and get * the guid from the first chip's flash, to use for all of them. */ void qib_get_eeprom_info(struct qib_devdata *dd) { void *buf; struct qib_flash *ifp; __be64 guid; int len, eep_stat; u8 csum, *bguid; int t = dd->unit; struct qib_devdata *dd0 = qib_lookup(0); if (t && dd0->nguid > 1 && t <= dd0->nguid) { u8 oguid; dd->base_guid = dd0->base_guid; bguid = (u8 *) &dd->base_guid; oguid = bguid[7]; bguid[7] += t; if (oguid > bguid[7]) { if (bguid[6] == 0xff) { if (bguid[5] == 0xff) { qib_dev_err(dd, "Can't set %s GUID" " from base, wraps to" " OUI!\n", qib_get_unit_name(t)); dd->base_guid = 0; goto bail; } bguid[5]++; } bguid[6]++; } dd->nguid = 1; goto bail; } /* * Read full flash, not just currently used part, since it may have * been written with a newer definition. * */ len = sizeof(struct qib_flash); buf = vmalloc(len); if (!buf) { qib_dev_err(dd, "Couldn't allocate memory to read %u " "bytes from eeprom for GUID\n", len); goto bail; } /* * Use "public" eeprom read function, which does locking and * figures out device. This will migrate to chip-specific. */ eep_stat = qib_eeprom_read(dd, 0, buf, len); if (eep_stat) { qib_dev_err(dd, "Failed reading GUID from eeprom\n"); goto done; } ifp = (struct qib_flash *)buf; csum = flash_csum(ifp, 0); if (csum != ifp->if_csum) { qib_devinfo(dd->pcidev, "Bad I2C flash checksum: " "0x%x, not 0x%x\n", csum, ifp->if_csum); goto done; } if (*(__be64 *) ifp->if_guid == cpu_to_be64(0) || *(__be64 *) ifp->if_guid == ~cpu_to_be64(0)) { qib_dev_err(dd, "Invalid GUID %llx from flash; ignoring\n", *(unsigned long long *) ifp->if_guid); /* don't allow GUID if all 0 or all 1's */ goto done; } /* complain, but allow it */ if (*(u64 *) ifp->if_guid == 0x100007511000000ULL) qib_devinfo(dd->pcidev, "Warning, GUID %llx is " "default, probably not correct!\n", *(unsigned long long *) ifp->if_guid); bguid = ifp->if_guid; if (!bguid[0] && !bguid[1] && !bguid[2]) { /* * Original incorrect GUID format in flash; fix in * core copy, by shifting up 2 octets; don't need to * change top octet, since both it and shifted are 0. */ bguid[1] = bguid[3]; bguid[2] = bguid[4]; bguid[3] = 0; bguid[4] = 0; guid = *(__be64 *) ifp->if_guid; } else guid = *(__be64 *) ifp->if_guid; dd->base_guid = guid; dd->nguid = ifp->if_numguid; /* * Things are slightly complicated by the desire to transparently * support both the Pathscale 10-digit serial number and the QLogic * 13-character version. */ if ((ifp->if_fversion > 1) && ifp->if_sprefix[0] && ((u8 *) ifp->if_sprefix)[0] != 0xFF) { char *snp = dd->serial; /* * This board has a Serial-prefix, which is stored * elsewhere for backward-compatibility. */ memcpy(snp, ifp->if_sprefix, sizeof ifp->if_sprefix); snp[sizeof ifp->if_sprefix] = '\0'; len = strlen(snp); snp += len; len = (sizeof dd->serial) - len; if (len > sizeof ifp->if_serial) len = sizeof ifp->if_serial; memcpy(snp, ifp->if_serial, len); } else memcpy(dd->serial, ifp->if_serial, sizeof ifp->if_serial); if (!strstr(ifp->if_comment, "Tested successfully")) qib_dev_err(dd, "Board SN %s did not pass functional " "test: %s\n", dd->serial, ifp->if_comment); memcpy(&dd->eep_st_errs, &ifp->if_errcntp, QIB_EEP_LOG_CNT); /* * Power-on (actually "active") hours are kept as little-endian value * in EEPROM, but as seconds in a (possibly as small as 24-bit) * atomic_t while running. */ atomic_set(&dd->active_time, 0); dd->eep_hrs = ifp->if_powerhour[0] | (ifp->if_powerhour[1] << 8); done: vfree(buf); bail:; }
void *module_alloc(unsigned long size) { if (size == 0) return NULL; return vmalloc(size); }
/** * qib_update_eeprom_log - copy active-time and error counters to eeprom * @dd: the qlogic_ib device * * Although the time is kept as seconds in the qib_devdata struct, it is * rounded to hours for re-write, as we have only 16 bits in EEPROM. * First-cut code reads whole (expected) struct qib_flash, modifies, * re-writes. Future direction: read/write only what we need, assuming * that the EEPROM had to have been "good enough" for driver init, and * if not, we aren't making it worse. * */ int qib_update_eeprom_log(struct qib_devdata *dd) { void *buf; struct qib_flash *ifp; int len, hi_water; uint32_t new_time, new_hrs; u8 csum; int ret, idx; unsigned long flags; /* first, check if we actually need to do anything. */ ret = 0; for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) { if (dd->eep_st_new_errs[idx]) { ret = 1; break; } } new_time = atomic_read(&dd->active_time); if (ret == 0 && new_time < 3600) goto bail; /* * The quick-check above determined that there is something worthy * of logging, so get current contents and do a more detailed idea. * read full flash, not just currently used part, since it may have * been written with a newer definition */ len = sizeof(struct qib_flash); buf = vmalloc(len); ret = 1; if (!buf) { qib_dev_err(dd, "Couldn't allocate memory to read %u " "bytes from eeprom for logging\n", len); goto bail; } /* Grab semaphore and read current EEPROM. If we get an * error, let go, but if not, keep it until we finish write. */ ret = mutex_lock_interruptible(&dd->eep_lock); if (ret) { qib_dev_err(dd, "Unable to acquire EEPROM for logging\n"); goto free_bail; } ret = qib_twsi_blk_rd(dd, dd->twsi_eeprom_dev, 0, buf, len); if (ret) { mutex_unlock(&dd->eep_lock); qib_dev_err(dd, "Unable read EEPROM for logging\n"); goto free_bail; } ifp = (struct qib_flash *)buf; csum = flash_csum(ifp, 0); if (csum != ifp->if_csum) { mutex_unlock(&dd->eep_lock); qib_dev_err(dd, "EEPROM cks err (0x%02X, S/B 0x%02X)\n", csum, ifp->if_csum); ret = 1; goto free_bail; } hi_water = 0; spin_lock_irqsave(&dd->eep_st_lock, flags); for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) { int new_val = dd->eep_st_new_errs[idx]; if (new_val) { /* * If we have seen any errors, add to EEPROM values * We need to saturate at 0xFF (255) and we also * would need to adjust the checksum if we were * trying to minimize EEPROM traffic * Note that we add to actual current count in EEPROM, * in case it was altered while we were running. */ new_val += ifp->if_errcntp[idx]; if (new_val > 0xFF) new_val = 0xFF; if (ifp->if_errcntp[idx] != new_val) { ifp->if_errcntp[idx] = new_val; hi_water = offsetof(struct qib_flash, if_errcntp) + idx; } /* * update our shadow (used to minimize EEPROM * traffic), to match what we are about to write. */ dd->eep_st_errs[idx] = new_val; dd->eep_st_new_errs[idx] = 0; } }
static int __init mtd_stresstest_init(void) { int err; int i, op; uint64_t tmp; printk(KERN_INFO "\n"); printk(KERN_INFO "=================================================\n"); if (dev < 0) { pr_info("Please specify a valid mtd-device via module parameter\n"); pr_crit("CAREFUL: This test wipes all data on the specified MTD device!\n"); return -EINVAL; } pr_info("MTD device: %d\n", dev); mtd = get_mtd_device(NULL, dev); if (IS_ERR(mtd)) { err = PTR_ERR(mtd); pr_err("error: cannot get MTD device\n"); return err; } if (mtd->writesize == 1) { pr_info("not NAND flash, assume page size is 512 " "bytes.\n"); pgsize = 512; } else pgsize = mtd->writesize; tmp = mtd->size; do_div(tmp, mtd->erasesize); ebcnt = tmp; pgcnt = mtd->erasesize / pgsize; pr_info("MTD device size %llu, eraseblock size %u, " "page size %u, count of eraseblocks %u, pages per " "eraseblock %u, OOB size %u\n", (unsigned long long)mtd->size, mtd->erasesize, pgsize, ebcnt, pgcnt, mtd->oobsize); if (ebcnt < 2) { pr_err("error: need at least 2 eraseblocks\n"); err = -ENOSPC; goto out_put_mtd; } /* Read or write up 2 eraseblocks at a time */ bufsize = mtd->erasesize * 2; err = -ENOMEM; readbuf = vmalloc(bufsize); writebuf = vmalloc(bufsize); offsets = kmalloc(ebcnt * sizeof(int), GFP_KERNEL); if (!readbuf || !writebuf || !offsets) { pr_err("error: cannot allocate memory\n"); goto out; } for (i = 0; i < ebcnt; i++) offsets[i] = mtd->erasesize; prandom_bytes(writebuf, bufsize); err = scan_for_bad_eraseblocks(); if (err) goto out; /* Do operations */ pr_info("doing operations\n"); for (op = 0; op < count; op++) { if ((op & 1023) == 0) pr_info("%d operations done\n", op); err = do_operation(); if (err) goto out; cond_resched(); } pr_info("finished, %d operations done\n", op); out: kfree(offsets); kfree(bbt); vfree(writebuf); vfree(readbuf); out_put_mtd: put_mtd_device(mtd); if (err) pr_info("error %d occurred\n", err); printk(KERN_INFO "=================================================\n"); return err; }
tmain() { Obj_t *o, *next; Void_t *huge; size_t hugesz; Vmstat_t sb; ssize_t k, p; srandom(0); hugesz = Z_HUGE; /* one huge block to be resized occasionally */ if(!(huge = vmalloc(Vmregion, hugesz)) ) terror("Can't allocate block"); for(k = 0; k < N_OBJ; ++k) { /* free/resize all on this list */ for(o = List[k]; o; o = next) { next = o->next; if((RAND()%2) == 0 ) /* flip a coin to see if freeing */ vmfree(Vmregion, o->obj); else /* resizing */ { o->size = ALLOCSIZE(); if(!(o->obj = vmresize(Vmregion,o->obj,o->size,VM_RSMOVE)) ) terror("Vmresize failed"); TIME(p, k, o->size); /* add to a future list */ o->next = List[p]; List[p] = o; } } if(COMPACT(k)) /* global compaction */ { if(vmstat(Vmregion, &sb) < 0) terror("Vmstat failed"); tinfo("Arena: busy=(%u,%u) free=(%u,%u) extent=%u #segs=%d", sb.n_busy,sb.s_busy, sb.n_free,sb.s_free, sb.extent, sb.n_seg); if(vmcompact(Vmregion) < 0 ) terror("Vmcompact failed"); if(vmstat(Vmregion, &sb) < 0) terror("Vmstat failed"); tinfo("Compact: busy=(%u,%u) free=(%u,%u) extent=%u #segs=%d", sb.n_busy,sb.s_busy, sb.n_free,sb.s_free, sb.extent, sb.n_seg); } if(RESIZE(k)) /* make the huge block bigger */ { hugesz += Z_HUGE; if(!(huge = vmresize(Vmregion, huge, hugesz, VM_RSMOVE)) ) terror("Bad resize of huge block"); } o = Obj+k; /* allocate a new block */ o->size = ALLOCSIZE(); if(!(o->obj = vmalloc(Vmregion, o->size)) ) terror("Vmalloc failed"); TIME(p, k, o->size); o->next = List[p]; List[p] = o; } if(vmdbcheck(Vmregion) < 0) terror("Corrupted region"); if(vmstat(Vmregion, &sb) < 0) terror("Vmstat failed"); tinfo("Full: Busy=(%u,%u) Free=(%u,%u) Extent=%u #segs=%d\n", sb.n_busy, sb.s_busy, sb.n_free, sb.s_free, sb.extent, sb.n_seg); if(vmcompact(Vmregion) < 0 ) terror("Vmcompact failed"); if(vmstat(Vmregion, &sb) < 0) terror("Vmstat failed"); tinfo("Compact: Busy=(%u,%u) Free=(%u,%u) Extent=%u #segs=%d\n", sb.n_busy, sb.s_busy, sb.n_free, sb.s_free, sb.extent, sb.n_seg); /* now free all left-overs */ for(o = List[N_OBJ]; o; o = o->next) vmfree(Vmregion,o->obj); vmfree(Vmregion,huge); if(vmstat(Vmregion, &sb) < 0) terror("Vmstat failed"); tinfo("Free: Busy=(%u,%u) Free=(%u,%u) Extent=%u #segs=%d\n", sb.n_busy, sb.s_busy, sb.n_free, sb.s_free, sb.extent, sb.n_seg); if(vmcompact(Vmregion) < 0 ) terror("Vmcompact failed2"); if(vmstat(Vmregion, &sb) < 0) terror("Vmstat failed"); tinfo("Compact: Busy=(%u,%u) Free=(%u,%u) Extent=%u #segs=%d\n", sb.n_busy, sb.s_busy, sb.n_free, sb.s_free, sb.extent, sb.n_seg); if(!(huge = vmalloc(Vmregion, 10))) terror("Vmalloc failed"); if(vmstat(Vmregion, &sb) < 0) terror("Vmstat failed"); tinfo("Small: Busy=(%u,%u) Free=(%u,%u) Extent=%u #segs=%d\n", sb.n_busy, sb.s_busy, sb.n_free, sb.s_free, sb.extent, sb.n_seg); texit(0); }
static int ipoib_cm_nonsrq_init_rx(struct net_device *dev, struct ib_cm_id *cm_id, struct ipoib_cm_rx *rx) { struct ipoib_dev_priv *priv = netdev_priv(dev); struct { struct ib_recv_wr wr; struct ib_sge sge[IPOIB_CM_RX_SG]; } *t; int ret; int i; <<<<<<< HEAD rx->rx_ring = vzalloc(ipoib_recvq_size * sizeof *rx->rx_ring); ======= rx->rx_ring = vmalloc(ipoib_recvq_size * sizeof *rx->rx_ring); >>>>>>> 296c66da8a02d52243f45b80521febece5ed498a if (!rx->rx_ring) { printk(KERN_WARNING "%s: failed to allocate CM non-SRQ ring (%d entries)\n", priv->ca->name, ipoib_recvq_size); return -ENOMEM; } <<<<<<< HEAD ======= memset(rx->rx_ring, 0, ipoib_recvq_size * sizeof *rx->rx_ring); >>>>>>> 296c66da8a02d52243f45b80521febece5ed498a t = kmalloc(sizeof *t, GFP_KERNEL); if (!t) { ret = -ENOMEM;
/** * compare_lebs - find out which logical eraseblock is newer. * @ubi: UBI device description object * @seb: first logical eraseblock to compare * @pnum: physical eraseblock number of the second logical eraseblock to * compare * @vid_hdr: volume identifier header of the second logical eraseblock * * This function compares 2 copies of a LEB and informs which one is newer. In * case of success this function returns a positive value, in case of failure, a * negative error code is returned. The success return codes use the following * bits: * o bit 0 is cleared: the first PEB (described by @seb) is newer than the * second PEB (described by @pnum and @vid_hdr); * o bit 0 is set: the second PEB is newer; * o bit 1 is cleared: no bit-flips were detected in the newer LEB; * o bit 1 is set: bit-flips were detected in the newer LEB; * o bit 2 is cleared: the older LEB is not corrupted; * o bit 2 is set: the older LEB is corrupted. */ static int compare_lebs(struct ubi_device *ubi, const struct ubi_scan_leb *seb, int pnum, const struct ubi_vid_hdr *vid_hdr) { void *buf; int len, err, second_is_newer, bitflips = 0, corrupted = 0; uint32_t data_crc, crc; struct ubi_vid_hdr *vh = NULL; unsigned long long sqnum2 = be64_to_cpu(vid_hdr->sqnum); if (sqnum2 == seb->sqnum) { /* * This must be a really ancient UBI image which has been * created before sequence numbers support has been added. At * that times we used 32-bit LEB versions stored in logical * eraseblocks. That was before UBI got into mainline. We do not * support these images anymore. Well, those images still work, * but only if no unclean reboots happened. */ ubi_err("unsupported on-flash UBI format\n"); return -EINVAL; } /* Obviously the LEB with lower sequence counter is older */ second_is_newer = !!(sqnum2 > seb->sqnum); /* * Now we know which copy is newer. If the copy flag of the PEB with * newer version is not set, then we just return, otherwise we have to * check data CRC. For the second PEB we already have the VID header, * for the first one - we'll need to re-read it from flash. * * Note: this may be optimized so that we wouldn't read twice. */ if (second_is_newer) { if (!vid_hdr->copy_flag) { /* It is not a copy, so it is newer */ dbg_bld("second PEB %d is newer, copy_flag is unset", pnum); return 1; } } else { if (!seb->copy_flag) { /* It is not a copy, so it is newer */ dbg_bld("first PEB %d is newer, copy_flag is unset", pnum); return bitflips << 1; } vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL); if (!vh) return -ENOMEM; pnum = seb->pnum; err = ubi_io_read_vid_hdr(ubi, pnum, vh, 0); if (err) { if (err == UBI_IO_BITFLIPS) bitflips = 1; else { dbg_err("VID of PEB %d header is bad, but it " "was OK earlier, err %d", pnum, err); if (err > 0) err = -EIO; goto out_free_vidh; } } vid_hdr = vh; } /* Read the data of the copy and check the CRC */ len = be32_to_cpu(vid_hdr->data_size); buf = vmalloc(len); if (!buf) { err = -ENOMEM; goto out_free_vidh; } err = ubi_io_read_data(ubi, buf, pnum, 0, len); if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err)) goto out_free_buf; data_crc = be32_to_cpu(vid_hdr->data_crc); crc = crc32(UBI_CRC32_INIT, buf, len); if (crc != data_crc) { dbg_bld("PEB %d CRC error: calculated %#08x, must be %#08x", pnum, crc, data_crc); corrupted = 1; bitflips = 0; second_is_newer = !second_is_newer; } else { dbg_bld("PEB %d CRC is OK", pnum); bitflips = !!err; } vfree(buf); ubi_free_vid_hdr(ubi, vh); if (second_is_newer) dbg_bld("second PEB %d is newer, copy_flag is set", pnum); else dbg_bld("first PEB %d is newer, copy_flag is set", pnum); return second_is_newer | (bitflips << 1) | (corrupted << 2); out_free_buf: vfree(buf); out_free_vidh: ubi_free_vid_hdr(ubi, vh); return err; }
static int z2_open( struct inode *inode, struct file *filp ) { int device; int max_z2_map = ( Z2RAM_SIZE / Z2RAM_CHUNKSIZE ) * sizeof( z2ram_map[0] ); int max_chip_map = ( amiga_chip_size / Z2RAM_CHUNKSIZE ) * sizeof( z2ram_map[0] ); int rc = -ENOMEM; device = iminor(inode); if ( current_device != -1 && current_device != device ) { rc = -EBUSY; goto err_out; } if ( current_device == -1 ) { z2_count = 0; chip_count = 0; list_count = 0; z2ram_size = 0; /* Use a specific list entry. */ if (device >= Z2MINOR_MEMLIST1 && device <= Z2MINOR_MEMLIST4) { int index = device - Z2MINOR_MEMLIST1 + 1; unsigned long size, paddr, vaddr; if (index >= m68k_realnum_memory) { printk( KERN_ERR DEVICE_NAME ": no such entry in z2ram_map\n" ); goto err_out; } paddr = m68k_memory[index].addr; size = m68k_memory[index].size & ~(Z2RAM_CHUNKSIZE-1); #ifdef __powerpc__ /* FIXME: ioremap doesn't build correct memory tables. */ { vfree(vmalloc (size)); } vaddr = (unsigned long) __ioremap (paddr, size, _PAGE_WRITETHRU); #else vaddr = (unsigned long)z_remap_nocache_nonser(paddr, size); #endif z2ram_map = kmalloc((size/Z2RAM_CHUNKSIZE)*sizeof(z2ram_map[0]), GFP_KERNEL); if ( z2ram_map == NULL ) { printk( KERN_ERR DEVICE_NAME ": cannot get mem for z2ram_map\n" ); goto err_out; } while (size) { z2ram_map[ z2ram_size++ ] = vaddr; size -= Z2RAM_CHUNKSIZE; vaddr += Z2RAM_CHUNKSIZE; list_count++; } if ( z2ram_size != 0 ) printk( KERN_INFO DEVICE_NAME ": using %iK List Entry %d Memory\n", list_count * Z2RAM_CHUNK1024, index ); } else switch ( device ) { case Z2MINOR_COMBINED: z2ram_map = kmalloc( max_z2_map + max_chip_map, GFP_KERNEL ); if ( z2ram_map == NULL ) { printk( KERN_ERR DEVICE_NAME ": cannot get mem for z2ram_map\n" ); goto err_out; } get_z2ram(); get_chipram(); if ( z2ram_size != 0 ) printk( KERN_INFO DEVICE_NAME ": using %iK Zorro II RAM and %iK Chip RAM (Total %dK)\n", z2_count * Z2RAM_CHUNK1024, chip_count * Z2RAM_CHUNK1024, ( z2_count + chip_count ) * Z2RAM_CHUNK1024 ); break; case Z2MINOR_Z2ONLY: z2ram_map = kmalloc( max_z2_map, GFP_KERNEL ); if ( z2ram_map == NULL ) { printk( KERN_ERR DEVICE_NAME ": cannot get mem for z2ram_map\n" ); goto err_out; } get_z2ram(); if ( z2ram_size != 0 ) printk( KERN_INFO DEVICE_NAME ": using %iK of Zorro II RAM\n", z2_count * Z2RAM_CHUNK1024 ); break; case Z2MINOR_CHIPONLY: z2ram_map = kmalloc( max_chip_map, GFP_KERNEL ); if ( z2ram_map == NULL ) { printk( KERN_ERR DEVICE_NAME ": cannot get mem for z2ram_map\n" ); goto err_out; } get_chipram(); if ( z2ram_size != 0 ) printk( KERN_INFO DEVICE_NAME ": using %iK Chip RAM\n", chip_count * Z2RAM_CHUNK1024 ); break; default: rc = -ENODEV; goto err_out; break; } if ( z2ram_size == 0 ) { printk( KERN_NOTICE DEVICE_NAME ": no unused ZII/Chip RAM found\n" ); goto err_out_kfree; } current_device = device; z2ram_size <<= Z2RAM_CHUNKSHIFT; set_capacity(z2ram_gendisk, z2ram_size >> 9); }
static void lkdtm_do_action(enum ctype which) { switch (which) { case CT_PANIC: panic("dumptest"); break; case CT_BUG: BUG(); break; case CT_WARNING: WARN_ON(1); break; case CT_EXCEPTION: *((int *) 0) = 0; break; case CT_LOOP: for (;;) ; break; case CT_OVERFLOW: (void) recursive_loop(recur_count); break; case CT_CORRUPT_STACK: corrupt_stack(); break; case CT_UNALIGNED_LOAD_STORE_WRITE: { static u8 data[5] __attribute__((aligned(4))) = {1, 2, 3, 4, 5}; u32 *p; u32 val = 0x12345678; p = (u32 *)(data + 1); if (*p == 0) val = 0x87654321; *p = val; break; } case CT_OVERWRITE_ALLOCATION: { size_t len = 1020; u32 *data = kmalloc(len, GFP_KERNEL); data[1024 / sizeof(u32)] = 0x12345678; kfree(data); break; } case CT_WRITE_AFTER_FREE: { size_t len = 1024; u32 *data = kmalloc(len, GFP_KERNEL); kfree(data); schedule(); memset(data, 0x78, len); break; } case CT_SOFTLOCKUP: preempt_disable(); for (;;) cpu_relax(); break; case CT_HARDLOCKUP: local_irq_disable(); for (;;) cpu_relax(); break; case CT_SPINLOCKUP: /* Must be called twice to trigger. */ spin_lock(&lock_me_up); /* Let sparse know we intended to exit holding the lock. */ __release(&lock_me_up); break; case CT_HUNG_TASK: set_current_state(TASK_UNINTERRUPTIBLE); schedule(); break; case CT_EXEC_DATA: execute_location(data_area); break; case CT_EXEC_STACK: { u8 stack_area[EXEC_SIZE]; execute_location(stack_area); break; } case CT_EXEC_KMALLOC: { u32 *kmalloc_area = kmalloc(EXEC_SIZE, GFP_KERNEL); execute_location(kmalloc_area); kfree(kmalloc_area); break; } case CT_EXEC_VMALLOC: { u32 *vmalloc_area = vmalloc(EXEC_SIZE); execute_location(vmalloc_area); vfree(vmalloc_area); break; } case CT_EXEC_USERSPACE: { unsigned long user_addr; user_addr = vm_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_ANONYMOUS | MAP_PRIVATE, 0); if (user_addr >= TASK_SIZE) { pr_warn("Failed to allocate user memory\n"); return; } execute_user_location((void *)user_addr); vm_munmap(user_addr, PAGE_SIZE); break; } case CT_ACCESS_USERSPACE: { unsigned long user_addr, tmp; unsigned long *ptr; user_addr = vm_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_ANONYMOUS | MAP_PRIVATE, 0); if (user_addr >= TASK_SIZE) { pr_warn("Failed to allocate user memory\n"); return; } ptr = (unsigned long *)user_addr; pr_info("attempting bad read at %p\n", ptr); tmp = *ptr; tmp += 0xc0dec0de; pr_info("attempting bad write at %p\n", ptr); *ptr = tmp; vm_munmap(user_addr, PAGE_SIZE); break; } case CT_WRITE_RO: { unsigned long *ptr; ptr = (unsigned long *)&rodata; pr_info("attempting bad write at %p\n", ptr); *ptr ^= 0xabcd1234; break; } case CT_WRITE_KERN: { size_t size; unsigned char *ptr; size = (unsigned long)do_overwritten - (unsigned long)do_nothing; ptr = (unsigned char *)do_overwritten; pr_info("attempting bad %zu byte write at %p\n", size, ptr); memcpy(ptr, (unsigned char *)do_nothing, size); flush_icache_range((unsigned long)ptr, (unsigned long)(ptr + size)); do_overwritten(); break; } case CT_NONE: default: break; } }
char* lab_ymodem_receive(int* length, int ymodemg) { int stop; int firstblk; unsigned char gotch; int pktsize; int size = 0; char* rxbuf = NULL; int rbytes; int waitpkts; stop = 0; firstblk = 1; lab_delay(6); rbytes = 0; waitpkts = 16; while (!stop) { if (firstblk) { if (ymodemg) lab_putc(GRC); else lab_putc(CRC); } gotch = getchar(); if (gotch == 0) // timeout { waitpkts--; if (!waitpkts) { printk("WARNING: YMODEM receive timed out!\n"); if (rxbuf) vfree(rxbuf); return NULL; } continue; } switch (gotch) { case SOH: pktsize = 128; goto havesize; case STX: pktsize = 1024; havesize: { unsigned char blk; unsigned char blk255; unsigned char dbytes[1028]; int i; blk = getchar(); blk255 = getchar(); for (i=0; i<pktsize+2; i++) dbytes[i] = getchar(); if (crc16_buf(dbytes, pktsize+2)) { /* CRC failed, try again */ lab_putc(NAK); break; } if (blk255 != (255-blk)) { lab_putc(NAK); break; } if (firstblk) { int i; char* buf; buf = dbytes + strlen(dbytes) + 1; i = 0; size = 0; while (*buf >= '0' && *buf <= '9') { size *= 10; size += *buf - '0'; buf++; } *length = size; size += 1024; rxbuf = vmalloc(size+1024); // a little more safety... // printk(">>> YMODEM: getting file of size %d (buf addr: %08X)\n", size-1024, rxbuf); lab_putc(ACK); if (ymodemg) lab_putc(GRC); else lab_putc(CRC); firstblk = 0; break; } if ((rbytes + pktsize) > size) { lab_putc(CAN); lab_putc(CAN); /* BUFFER OVERRUN!!! */ stop = 1; break; } memcpy(rxbuf+rbytes, dbytes, pktsize); rbytes += pktsize; if (!ymodemg) lab_putc(ACK); break; } case EOT: lab_putc(ACK); lab_delay(1); lab_putc(CRC); lab_delay(1); lab_putc(ACK); lab_delay(1); lab_putc(CAN); lab_putc(CAN); lab_putc(CAN); lab_putc(CAN); lab_puts("\x08\x08\x08\x08 \x08\x08\x08\x08"); eatbytes(); stop = 1; break; case CAN: lab_putc(CAN); lab_putc(CAN); lab_putc(CAN); lab_putc(CAN); lab_puts("\x08\x08\x08\x08 \x08\x08\x08\x08"); stop = 1; lab_delay(1); lab_puts("YMODEM transfer aborted\r\n"); if (rxbuf) vfree(rxbuf); rxbuf = NULL; eatbytes(); break; case 0x03: case 0xFF: /* Control-C. We should NAK it if it was line noise, * but it's more likely to be the user banging on the * keyboard trying to abort a screwup. */ lab_putc(CAN); lab_putc(CAN); lab_putc(CAN); lab_putc(CAN); lab_puts("\x08\x08\x08\x08 \x08\x08\x08\x08"); eatbytes(); if (rxbuf) vfree(rxbuf); rxbuf = NULL; stop=1; break; default: lab_putc(NAK); } } return rxbuf; }
static ssize_t etb_read(struct file *file, char __user *data, size_t len, loff_t *ppos) { int total, i; long length; struct etm_trace_context_t *t = file->private_data; u32 first = 0, buffer_end = 0; u32 *buf; int wpos; int skip; long wlength; loff_t pos = *ppos; mutex_lock(&t->mutex); if (t->state == TRACE_STATE_TRACING) { length = 0; pr_err("Need to stop trace\n"); goto out; } etb_unlock(t); total = etb_get_data_length(t); if (total == t->etb_total_buf_size) { first = etb_readl(t, ETBRWP); if (t->use_etr) { first = (first - t->etr_phys) / 4; } } if (pos > total * 4) { skip = 0; wpos = total; } else { skip = (int)pos % 4; wpos = (int)pos / 4; } total -= wpos; first = (first + wpos) % t->etb_total_buf_size; etb_writel(t, first, ETBRRP); wlength = min(total, DIV_ROUND_UP(skip + (int)len, 4)); length = min(total * 4 - skip, (int)len); if (wlength == 0) { goto out; } buf = vmalloc(wlength * 4); pr_info("ETB read %ld bytes to %lld from %ld words at %d\n", length, pos, wlength, first); pr_info("ETB buffer length: %d\n", total + wpos); pr_info("ETB status reg: 0x%x\n", etb_readl(t, ETBSTS)); if (t->use_etr) { /* * XXX: ETBRRP cannot wrap around correctly on ETR. * The workaround is to read the buffer from WTBRWP directly. */ pr_info("ETR virt = 0x%x, phys = 0x%x\n", t->etr_virt, t->etr_phys); /* translate first and buffer_end from phys to virt */ first *= 4; first += t->etr_virt; buffer_end = t->etr_virt + (t->etr_len * 4); pr_info("first(virt) = 0x%x\n\n", first); for (i = 0; i < wlength; i++) { buf[i] = *((unsigned int*)(first)); first += 4; if (first >= buffer_end) { first = t->etr_virt; } } } else { for (i = 0; i < wlength; i++) { buf[i] = etb_readl(t, ETBRRD); } } etb_lock(t); length -= copy_to_user(data, (u8 *)buf + skip, length); vfree(buf); *ppos = pos + length; out: mutex_unlock(&t->mutex); return length; }
static ssize_t vol_cdev_read(struct file *file, __user char *buf, size_t count, loff_t *offp) { struct ubi_volume_desc *desc = file->private_data; struct ubi_volume *vol = desc->vol; struct ubi_device *ubi = vol->ubi; int err, lnum, off, len, vol_id = desc->vol->vol_id, tbuf_size; size_t count_save = count; void *tbuf; uint64_t tmp; dbg_msg("read %zd bytes from offset %lld of volume %d", count, *offp, vol_id); if (vol->updating) { dbg_err("updating"); return -EBUSY; } if (vol->upd_marker) { dbg_err("damaged volume, update marker is set"); return -EBADF; } if (*offp == vol->used_bytes || count == 0) return 0; if (vol->corrupted) dbg_msg("read from corrupted volume %d", vol_id); if (*offp + count > vol->used_bytes) count_save = count = vol->used_bytes - *offp; tbuf_size = vol->usable_leb_size; if (count < tbuf_size) tbuf_size = ALIGN(count, ubi->min_io_size); tbuf = vmalloc(tbuf_size); if (!tbuf) return -ENOMEM; len = count > tbuf_size ? tbuf_size : count; tmp = *offp; off = do_div(tmp, vol->usable_leb_size); lnum = tmp; do { cond_resched(); if (off + len >= vol->usable_leb_size) len = vol->usable_leb_size - off; err = ubi_eba_read_leb(ubi, vol_id, lnum, tbuf, off, len, 0); if (err) break; off += len; if (off == vol->usable_leb_size) { lnum += 1; off -= vol->usable_leb_size; } count -= len; *offp += len; err = copy_to_user(buf, tbuf, len); if (err) { err = -EFAULT; break; } buf += len; len = count > tbuf_size ? tbuf_size : count; } while (count); vfree(tbuf); return err ? err : count_save - count; }
void *Alloc_mem(unsigned long size) { return vmalloc(size); }