void main(void) { struct bldr_command_handler handler; blkdev_t *bootdev; u32 addr = 0; char *name; bldr_pre_process(); handler.priv = NULL; handler.attr = 0; handler.cb = bldr_cmd_handler; bldr_handshake(&handler); if (NULL == (bootdev = blkdev_get(CFG_BOOT_DEV))) { print("%s can't find boot device(%d)\n", MOD, CFG_BOOT_DEV); goto error; } #if CFG_LOAD_DSP_ROM /* DSP is no more available in MT6589/MT6583 */ #endif #if CFG_LOAD_MD_FS addr = CFG_MD_FS_MEMADDR; if (bldr_load_part(PART_MD_FS, bootdev, &addr) != 0) goto error; #endif #if CFG_LOAD_MD_ROM if (platform_is_three_g()) { addr = CFG_MD_3G_ROM_MEMADDR; name = PART_MD_3G_ROM; } else { addr = CFG_MD_2G_ROM_MEMADDR; name = PART_MD_2G_ROM; } if (bldr_load_part(name, bootdev, &addr) != 0) goto error; #endif #if CFG_LOAD_AP_ROM addr = CFG_AP_ROM_MEMADDR; if (bldr_load_part(PART_AP_ROM, bootdev, &addr) != 0) goto error; #endif #if CFG_LOAD_UBOOT addr = CFG_UBOOT_MEMADDR; if (bldr_load_part(PART_UBOOT, bootdev, &addr) != 0) goto error; #endif bldr_post_process(); bldr_jump(addr, BOOT_ARGUMENT_ADDR, sizeof(boot_arg_t)); error: platform_error_handler(); }
/* * Trigger a partition detection. */ int dasd_scan_partitions(struct dasd_block *block) { struct block_device *bdev; bdev = bdget_disk(block->gdp, 0); if (!bdev || blkdev_get(bdev, FMODE_READ) < 0) return -ENODEV; /* * See fs/partition/check.c:register_disk,rescan_partitions * Can't call rescan_partitions directly. Use ioctl. */ ioctl_by_bdev(bdev, BLKRRPART, 0); /* * Since the matching blkdev_put call to the blkdev_get in * this function is not called before dasd_destroy_partitions * the offline open_count limit needs to be increased from * 0 to 1. This is done by setting device->bdev (see * dasd_generic_set_offline). As long as the partition * detection is running no offline should be allowed. That * is why the assignment to device->bdev is done AFTER * the BLKRRPART ioctl. */ block->bdev = bdev; return 0; }
BOOL is_lk_img(void) { part_hdr_t *part_hdr = (part_hdr_t*)COMMON_BUFFER_ADDR; blkdev_t *bdev = blkdev_get(CFG_BOOT_DEV); part_t *part; u64 src; if (NULL == (part = part_get(PART_UBOOT))) goto error; src = part->startblk * bdev->blksz; /* retrieve partition header. */ if ((blkdev_read(bdev, src, sizeof(part_hdr_t), (u8*)part_hdr) == 0) && (!memcmp(part_hdr->info.name, "LK", strlen("LK")))) { SMSG ("[%s] Found LK... \n",MOD); return TRUE; }else{ SMSG ("[%s] LK does not exist, use uboot... \n",MOD); return FALSE; } error: SMSG ("[%s] part_get (PART_UBOOT) Fail\n",MOD); return FALSE; }
void sec_cfg_save (U8* src) { U32 i = 0; blkdev_t *bootdev = NULL; /* --------------------- */ /* write sec cfg */ /* --------------------- */ SMSG("[%s] write '0x%x'\n",MOD,sec_cfg_info.addr); if (NULL == (bootdev = blkdev_get(CFG_BOOT_DEV))) { SMSG("[%s] can't find boot device(%d)\n", MOD, CFG_BOOT_DEV); ASSERT(0); } #ifndef MTK_EMMC_SUPPORT nand_erase_data(sec_cfg_info.addr, g_nand_chip.chipsize, get_sec_cfg_cnt_size()); #endif blkdev_write(bootdev, sec_cfg_info.addr, get_sec_cfg_cnt_size(), (u8*)src, sec_cfg_info.part_id); /* dump first 8 bytes for debugging */ for(i=0;i<8;i++) SMSG("0x%x,",src[i]); SMSG("\n"); }
static int emmc_read(struct mmc_emergency_info *emmc, void *holder, char *buffer, off_t offset, int count, bool to_user) { unsigned char *read_ptr; unsigned int sector_no; off_t sector_offset; Sector sect; int rc; if (!emmc) { pr_err("%s:invalid emmc infomation\n", __func__); return 0; } if (!emmc->bdev) { pr_err("%s:invalid emmc block device\n", __func__); return 0; } sector_no = offset >> SECTOR_SIZE_SHIFT; sector_offset = offset & (SECTOR_SIZE - 1); if (sector_no >= emmc->block_count) { pr_err("%s: reading an invalid address\n", __func__); return -EINVAL; } /* make sure the block device is open rw */ rc = blkdev_get(emmc->bdev, FMODE_READ | FMODE_WRITE, holder); if (rc < 0) { pr_err("%s: blk_dev_get failed!\n", __func__); return 0; } read_ptr = read_dev_sector(emmc->bdev, sector_no + emmc->start_block, §); if (!read_ptr) { put_dev_sector(sect); return -EINVAL; } /* count and read_ptr are updated to match flash page size */ if (count + sector_offset > SECTOR_SIZE) count = SECTOR_SIZE - sector_offset; if (sector_offset) read_ptr += sector_offset; if (to_user) { if (copy_to_user(buffer, read_ptr, count)) { pr_err( "%s: Failed to copy buffer to User\n", __func__); return 0; } } else memcpy(buffer, read_ptr, count); put_dev_sector(sect); return count; }
int part_init(void) { blkdev_t *bdev; part_t *part; u32 erasesz; unsigned long lastblk; part_num = 0; memset(part_info, 0x00, sizeof(part_info)); #ifdef PL_PROFILING u32 profiling_time; profiling_time = 0; #endif cust_part_init(); bdev = blkdev_get(CFG_BOOT_DEV); part = cust_part_tbl(); if (!bdev || !part) return -1; erasesz = bdev->blksz; part->blks = TO_BLKS_ALIGN(part->size, erasesz); #ifndef MTK_EMMC_SUPPORT if(part->type == TYPE_LOW) lastblk = part->startblk + part->blks*2; else lastblk = part->startblk + part->blks; #endif while (1) { part++; if (!part->name) break; if (part->startblk == 0) part->startblk = lastblk; part->blks = TO_BLKS_ALIGN(part->size, erasesz); #ifndef MTK_EMMC_SUPPORT if(part->type == TYPE_LOW) lastblk = part->startblk + part->blks*2; else lastblk = part->startblk + part->blks; #endif } #if CFG_PMT_SUPPORT #ifdef PL_PROFILING profiling_time = get_timer(0); #endif pmt_init(); #ifdef PL_PROFILING printf("#T#pmt_init=%d\n", get_timer(profiling_time)); #endif #endif return 0; }
static void mmc_panic_erase(void) { int i = 0; int err; struct apanic_data *ctx = &drv_ctx; struct block_device *bdev; struct bio bio; struct bio_vec bio_vec; struct completion complete; struct page *page; bdev = lookup_bdev(ctx->devpath); if (IS_ERR(bdev)) { printk(KERN_ERR DRVNAME "failed to look up device %s (%ld)\n", ctx->devpath, PTR_ERR(bdev)); return; } err = blkdev_get(bdev, FMODE_WRITE); if (err) { printk(KERN_ERR DRVNAME "failed to open device %s (%d)\n", ctx->devpath, err); return; } page = virt_to_page(ctx->bounce); memset(ctx->bounce, 0, PAGE_SIZE); while (i < bdev->bd_part->nr_sects) { bio_init(&bio); bio.bi_io_vec = &bio_vec; bio_vec.bv_offset = 0; bio_vec.bv_page = page; bio.bi_vcnt = 1; bio.bi_idx = 0; bio.bi_sector = i; if (bdev->bd_part->nr_sects - i >= 8) { bio_vec.bv_len = PAGE_SIZE; bio.bi_size = PAGE_SIZE; i += 8; } else { bio_vec.bv_len = (bdev->bd_part->nr_sects - i) * 512; bio.bi_size = (bdev->bd_part->nr_sects - i) * 512; i = bdev->bd_part->nr_sects; } bio.bi_bdev = bdev; init_completion(&complete); bio.bi_private = &complete; bio.bi_end_io = mmc_bio_complete; submit_bio(WRITE, &bio); wait_for_completion(&complete); } blkdev_put(bdev, FMODE_WRITE); return; }
static int load_pt_from_fixed_addr(u8 *buf) { int reval = ERR_NO_EXIST; u64 pt_start; u64 mpt_start; int pt_size = PMT_REGION_SIZE; int buffer_size = pt_size; u8 *pmt_buf = (u8 *)emmc_pmt_buf; blkdev_t *dev = blkdev_get(BOOTDEV_SDMMC); pt_start = g_emmc_size - PMT_REGION_OFFSET; mpt_start = pt_start + PMT_REGION_SIZE; printf("============func=%s===scan pmt from %llx=====\n", __func__, pt_start); /* try to find the pmt at fixed address, signature:0x50547631 */ dev->bread(dev, (u32)(pt_start / 512), buffer_size / 512, (u8 *)pmt_buf); if (is_valid_pt(pmt_buf)) { if (!memcmp(pmt_buf + PT_SIG_SIZE, PMT_VER_V1, PMT_VER_SIZE)) { if (is_valid_pt(&pmt_buf[pt_size - PT_SIG_SIZE])) { printf("find pt at %llx\n", pt_start); memcpy(buf, pmt_buf + PT_SIG_SIZE + PMT_VER_SIZE, PART_MAX_COUNT * sizeof(pt_resident)); reval = DM_ERR_OK; return reval; } else { printf("invalid tail pt format\n"); reval = ERR_NO_EXIST; } } else { printf("invalid pt version %s\n", pmt_buf + PT_SIG_SIZE); reval = ERR_NO_EXIST; } } dev->bread(dev, (u32)(mpt_start / 512), buffer_size / 512, (u8 *)pmt_buf); if (is_valid_mpt(pmt_buf)) { if (!memcmp(pmt_buf + PT_SIG_SIZE, PMT_VER_V1, PMT_VER_SIZE)) { if (is_valid_mpt(&pmt_buf[pt_size - PT_SIG_SIZE])) { memcpy(buf, pmt_buf + PT_SIG_SIZE + PMT_VER_SIZE, PART_MAX_COUNT * sizeof(pt_resident)); reval = DM_ERR_OK; printf("find mpt at %llx\n", mpt_start); return reval; } else { reval = ERR_NO_EXIST; printf("invalid tail mpt format\n"); } } else { reval = ERR_NO_EXIST; printf("invalid mpt version %s\n", pmt_buf + PT_SIG_SIZE); } } return reval; }
u32 cal_chksum_per_pkt (u8 * pkt_buf, u32 pktsz) { blkdev_t *blkdev = blkdev_get(CFG_BOOT_DEV); u32 i, chk_sum = dm_ctx.chk_sum; // skip spare because FAT format image doesn't have any spare region for (i = 0; i < blkdev->blksz; i++) chk_sum ^= *pkt_buf++; dm_ctx.chk_sum = chk_sum; return dm_ctx.chk_sum; }
static void emmc_panic_erase(unsigned char *buffer, Sector *sect) { struct emmc_ipanic_data *ctx = &drv_ctx; struct mmc_emergency_info *emmc = ctx->emmc; unsigned char *read_buf_ptr = buffer; Sector new_sect; int rc; if (!emmc) { pr_err("%s:invalid emmc infomation\n", __func__); return; } if (!read_buf_ptr || !sect) { sect = &new_sect; if (!emmc->bdev) { pr_err("%s:invalid emmc block device\n", __func__); goto out; } /* make sure the block device is open rw */ rc = blkdev_get(emmc->bdev, FMODE_READ | FMODE_WRITE, emmc_panic_erase); if (rc < 0) { pr_err("%s: blk_dev_get failed!\n", __func__); goto out; } /*read panic header */ read_buf_ptr = read_dev_sector(emmc->bdev, emmc->start_block, sect); if (!read_buf_ptr) { pr_err("%s: read sector error(%llu)!\n", __func__, (u64) emmc->start_block); goto out; } } /*write all zero to panic header */ lock_page(sect->v); memset(read_buf_ptr, 0, SECTOR_SIZE); set_page_dirty(sect->v); unlock_page(sect->v); sync_blockdev(emmc->bdev); if (!read_buf_ptr) put_dev_sector(*sect); out: memset(&ctx->hdr, 0, SECTOR_SIZE); return; }
/****************************************************************************** * WRITE IMAGE FOR S-BOOT USAGE (FROM NAND or eMMC DEVICE) ******************************************************************************/ static U32 sec_util_write_image (U8* img_name, U8 *buf, U32 offset, U32 size) { BOOL ret = SEC_OK; U32 i = 0; U32 cnt = 0; U32 now_offset = 0; U32 total_pages = 0; U32 start_offset = offset; blkdev_t *bootdev = NULL; part_t *part = NULL; U64 dest; if (NULL == (bootdev = blkdev_get(CFG_BOOT_DEV))) { SMSG("[%s] can't find boot device(%d)\n", MOD, CFG_BOOT_DEV); ASSERT(0); } /* ======================== */ /* get part info */ /* ======================== */ /* part_get should be device abstraction function */ if(NULL == (part = part_get (sec2plname(img_name)))) { SMSG("[%s] part_get fail\n", MOD); ASSERT(0); } /* ======================== */ /* write part data */ /* ======================== */ /* part_load should be device abstraction function */ if(TRUE == bDumpPartInfo) { SMSG("[%s] part load '0x%x'\n", MOD, part->startblk * bootdev->blksz); bDumpPartInfo = FALSE; } dest = part->startblk * bootdev->blksz + offset; if (-1 == blkdev_write(bootdev, dest, size, buf)) { SMSG("[%s] part_store fail\n", MOD); ASSERT(0); } return ret; }
u32 get_part_range (DM_IMG_TYPE img_type) { part_t *part; blkdev_t *blkdev = blkdev_get(CFG_BOOT_DEV); u8 *name = get_part_name (img_type); DM_ASSERT (name); if (name == NULL) return 0; part = part_get(name); DM_ASSERT (part); return ((part->pgnum) * blkdev->blksz); }
static ssize_t mmc_protect_clear(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { char *device_path; struct block_device *target = NULL; u32 start; u32 size; bool device_holding = false; struct mmc_card *card; card = get_mmc_card(); if (!card) { return count; } device_path = kmalloc(PATH_MAX + count, GFP_KERNEL); if (!device_path) { return -ENOMEM; } snprintf(device_path, PATH_MAX, "/dev/block/%s", buf); target = lookup_bdev(device_path); if (!target) { kfree(device_path); return count; } if (!target->bd_part) { if (blkdev_get(target, FMODE_READ | FMODE_NDELAY, 0)) { kfree(device_path); return count; } device_holding = true; } start = (u32)target->bd_part->start_sect; size = (u32)target->bd_part->nr_sects; clear_write_protect(card, start, size); if (device_holding) { blkdev_put(target, FMODE_READ | FMODE_NDELAY); } kfree(device_path); return count; }
/* * Open/close code for raw IO. * * We just rewrite the i_mapping for the /dev/raw/rawN file descriptor to * point at the blockdev's address_space and set the file handle to use * O_DIRECT. * * Set the device's soft blocksize to the minimum possible. This gives the * finest possible alignment and has no adverse impact on performance. */ static int raw_open(struct inode *inode, struct file *filp) { const int minor = iminor(inode); struct block_device *bdev; int err; if (minor == 0) { /* It is the control device */ filp->f_op = &raw_ctl_fops; return 0; } down(&raw_mutex); /* * All we need to do on open is check that the device is bound. */ bdev = raw_devices[minor].binding; err = -ENODEV; if (!bdev) goto out; igrab(bdev->bd_inode); err = blkdev_get(bdev, filp->f_mode, 0); if (err) goto out; err = bd_claim(bdev, raw_open); if (err) goto out1; err = set_blocksize(bdev, bdev_hardsect_size(bdev)); if (err) goto out2; filp->f_flags |= O_DIRECT; filp->f_mapping = bdev->bd_inode->i_mapping; if (++raw_devices[minor].inuse == 1) filp->f_dentry->d_inode->i_mapping = bdev->bd_inode->i_mapping; filp->private_data = bdev; up(&raw_mutex); return 0; out2: bd_release(bdev); out1: blkdev_put(bdev); out: up(&raw_mutex); return err; }
int open_cmdline(struct inode *i, struct file *f) { struct cmdline_priv *p; int ret ; p = kzalloc(sizeof(*p), GFP_KERNEL); if (i->i_private) p->osip_id = (int) i->i_private; f->private_data = 0; access_osip_record(osip_find_cmdline, (void *)p); /* need to open it again */ p->bdev = get_emmc_bdev(); if (!p->bdev) { pr_err("%s:access_osip_record failed!\n", __func__); ret = -ENODEV; goto free; } ret = blkdev_get(p->bdev, f->f_mode); if (ret < 0) { pr_err("%s: blk_dev_get failed!\n", __func__); goto put; } if (p->lba >= get_capacity(p->bdev->bd_disk)) { pr_err("%s: %d out of disk bound!\n", __func__, p->lba); ret = -EINVAL; goto put; } p->cmdline = read_dev_sector(p->bdev, p->lba, &p->sect); if (!p->cmdline) { pr_err("%s:read_dev_sector failed!\n", __func__); ret = -ENODEV; goto put; } f->private_data = p; return 0; put: blkdev_put(p->bdev, f->f_mode); free: kfree(p); return -ENODEV; }
void part_dump(void) { blkdev_t *bdev; part_t *part; u32 blksz; u64 start, end; bdev = blkdev_get(CFG_BOOT_DEV); part = cust_part_tbl(); blksz = bdev->blksz; print("\n[%s] blksz: %dB\n", MOD, blksz); while (part->name) { start = (u64)part->startblk * blksz; end = (u64)(part->startblk + part->blks) * blksz - 1; print("[%s] [0x%llx-0x%llx] \"%s\" (%d blocks) \n", MOD, start, end, part->name, part->blks); part++; } }
static int swsusp_swap_check(void) /* This is called before saving image */ { int res; res = swap_type_of(swsusp_resume_device, swsusp_resume_block, &hib_resume_bdev); if (res < 0) return res; root_swap = res; res = blkdev_get(hib_resume_bdev, FMODE_WRITE); if (res) return res; res = set_blocksize(resume_bdev, PAGE_SIZE); if (res < 0) blkdev_put(hib_resume_bdev, FMODE_WRITE); return res; }
int part_init(void) { blkdev_t *bdev; part_t *part; u32 erasesz; unsigned long lastblk; unsigned int last_part_id; part_num = 0; memset(part_info, 0x00, sizeof(part_info)); cust_part_init(); bdev = blkdev_get(CFG_BOOT_DEV); part = cust_part_tbl(); if (!bdev || !part) return -1; erasesz = bdev->blksz; part->blks = TO_BLKS_ALIGN(part->size, erasesz); lastblk = part->startblk + part->blks; last_part_id = part->part_id; while (1) { part++; if (!part->name) break; if (part->startblk == 0 && part->part_id == last_part_id) { part->startblk = lastblk; } part->blks = TO_BLKS_ALIGN(part->size, erasesz); lastblk = part->startblk + part->blks; last_part_id = part->part_id; } #if CFG_PMT_SUPPORT pmt_init(); #endif return 0; }
U8* sec_cfg_load (void) { U32 i = 0; U8 *buf = (U8*)SEC_WORKING_BUFFER_START; U32 seccfg_size = 0; blkdev_t *bootdev = NULL; /* --------------------- */ /* initialize buffer */ /* --------------------- */ seccfg_size = get_sec_cfg_cnt_size(); memset(buf, 0x0, seccfg_size); /* --------------------- */ /* read sec cfg */ /* --------------------- */ SMSG("\n\n[%s] read '0x%x'\n",MOD,sec_cfg_info.addr); if (NULL == (bootdev = blkdev_get(CFG_BOOT_DEV))) { SMSG("[%s] can't find boot device(%d)\n", MOD, CFG_BOOT_DEV); return NULL; } blkdev_read(bootdev, sec_cfg_info.addr, seccfg_size, (u8*)buf, sec_cfg_info.part_id); /* dump first 8 bytes for debugging */ for(i=0;i<8;i++) SMSG("0x%x,",buf[i]); SMSG("\n"); return buf; }
static struct block_device *stackbd_bdev_open(char dev_path[]) { /* Open underlying device */ struct block_device *bdev_raw = lookup_bdev(dev_path); printk("Opened %s\n", dev_path); if (IS_ERR(bdev_raw)) { printk("stackbd: error opening raw device <%lu>\n", PTR_ERR(bdev_raw)); return NULL; } if (!bdget(bdev_raw->bd_dev)) { printk("stackbd: error bdget()\n"); return NULL; } /* FIXME:VER */ /* if (blkdev_get(bdev_raw, STACKBD_BDEV_MODE, &stackbd))*/ if (blkdev_get(bdev_raw, STACKBD_BDEV_MODE)) { printk("stackbd: error blkdev_get()\n"); bdput(bdev_raw); return NULL; } if (bd_claim(bdev_raw, &stackbd)) { printk("stackbd: error bd_claim()\n"); bdput(bdev_raw); return NULL; } return bdev_raw; }
/* Startup */ static int __init init_blkmtd(void) { struct file *file = NULL; struct inode *inode; mtd_raw_dev_data_t *rawdevice = NULL; int maj, min; int i, blocksize, blocksize_bits; loff_t size = 0; int readonly = 0; int erase_size = CONFIG_MTD_BLKDEV_ERASESIZE; kdev_t rdev; int err; int mode; int totalsize = 0, total_sectors = 0; int regions; mtd_info = NULL; // Check args if(device == 0) { printk("blkmtd: error, missing `device' name\n"); return 1; } if(ro) readonly = 1; if(erasesz) erase_size = erasesz; DEBUG(1, "blkmtd: got device = `%s' erase size = %dK readonly = %s\n", device, erase_size, readonly ? "yes" : "no"); // Get a handle on the device mode = (readonly) ? O_RDONLY : O_RDWR; file = filp_open(device, mode, 0); if(IS_ERR(file)) { DEBUG(2, "blkmtd: open_namei returned %ld\n", PTR_ERR(file)); return 1; } /* determine is this is a block device and if so get its major and minor numbers */ inode = file->f_dentry->d_inode; if(!S_ISBLK(inode->i_mode)) { printk("blkmtd: %s not a block device\n", device); filp_close(file, NULL); return 1; } rdev = inode->i_rdev; //filp_close(file, NULL); DEBUG(1, "blkmtd: found a block device major = %d, minor = %d\n", MAJOR(rdev), MINOR(rdev)); maj = MAJOR(rdev); min = MINOR(rdev); if(maj == MTD_BLOCK_MAJOR) { printk("blkmtd: attempting to use an MTD device as a block device\n"); return 1; } DEBUG(1, "blkmtd: devname = %s\n", bdevname(rdev)); blocksize = BLOCK_SIZE; if(bs) { blocksize = bs; } else { if (blksize_size[maj] && blksize_size[maj][min]) { DEBUG(2, "blkmtd: blksize_size = %d\n", blksize_size[maj][min]); blocksize = blksize_size[maj][min]; } } i = blocksize; blocksize_bits = 0; while(i != 1) { blocksize_bits++; i >>= 1; } if(count) { size = count; } else { if (blk_size[maj]) { size = ((loff_t) blk_size[maj][min] << BLOCK_SIZE_BITS) >> blocksize_bits; } } total_sectors = size; size *= blocksize; totalsize = size; DEBUG(1, "blkmtd: size = %ld\n", (long int)size); if(size == 0) { printk("blkmtd: cant determine size\n"); return 1; } rawdevice = (mtd_raw_dev_data_t *)kmalloc(sizeof(mtd_raw_dev_data_t), GFP_KERNEL); if(rawdevice == NULL) { err = -ENOMEM; goto init_err; } memset(rawdevice, 0, sizeof(mtd_raw_dev_data_t)); // get the block device rawdevice->binding = bdget(kdev_t_to_nr(MKDEV(maj, min))); err = blkdev_get(rawdevice->binding, mode, 0, BDEV_RAW); if (err) { goto init_err; } rawdevice->totalsize = totalsize; rawdevice->total_sectors = total_sectors; rawdevice->sector_size = blocksize; rawdevice->sector_bits = blocksize_bits; rawdevice->readonly = readonly; DEBUG(2, "sector_size = %d, sector_bits = %d\n", rawdevice->sector_size, rawdevice->sector_bits); mtd_info = (struct mtd_info *)kmalloc(sizeof(struct mtd_info), GFP_KERNEL); if (mtd_info == NULL) { err = -ENOMEM; goto init_err; } memset(mtd_info, 0, sizeof(*mtd_info)); // Setup the MTD structure mtd_info->name = "blkmtd block device"; if(readonly) { mtd_info->type = MTD_ROM; mtd_info->flags = MTD_CAP_ROM; mtd_info->erasesize = erase_size << 10; } else { mtd_info->type = MTD_RAM; mtd_info->flags = MTD_CAP_RAM; mtd_info->erasesize = erase_size << 10; } mtd_info->size = size; mtd_info->erase = blkmtd_erase; mtd_info->read = blkmtd_read; mtd_info->write = blkmtd_write; mtd_info->sync = blkmtd_sync; mtd_info->point = 0; mtd_info->unpoint = 0; mtd_info->priv = rawdevice; regions = calc_erase_regions(NULL, erase_size << 10, size); DEBUG(1, "blkmtd: init: found %d erase regions\n", regions); mtd_info->eraseregions = kmalloc(regions * sizeof(struct mtd_erase_region_info), GFP_KERNEL); if(mtd_info->eraseregions == NULL) { } mtd_info->numeraseregions = regions; calc_erase_regions(mtd_info->eraseregions, erase_size << 10, size); /* setup the page cache info */ INIT_LIST_HEAD(&rawdevice->as.clean_pages); INIT_LIST_HEAD(&rawdevice->as.dirty_pages); INIT_LIST_HEAD(&rawdevice->as.locked_pages); rawdevice->as.nrpages = 0; rawdevice->as.a_ops = &blkmtd_aops; rawdevice->as.host = inode; rawdevice->as.i_mmap = NULL; rawdevice->as.i_mmap_shared = NULL; spin_lock_init(&rawdevice->as.i_shared_lock); rawdevice->as.gfp_mask = GFP_KERNEL; rawdevice->file = file; file->private_data = rawdevice; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,2,0) mtd_info->module = THIS_MODULE; #endif if (add_mtd_device(mtd_info)) { err = -EIO; goto init_err; } init_waitqueue_head(&thr_wq); init_waitqueue_head(&mtbd_sync_wq); DEBUG(3, "blkmtd: init: kernel task @ %p\n", write_queue_task); DEBUG(2, "blkmtd: init: starting kernel task\n"); kernel_thread(write_queue_task, NULL, CLONE_FS | CLONE_FILES | CLONE_SIGHAND); DEBUG(2, "blkmtd: init: started\n"); printk("blkmtd loaded: version = %s using %s erase_size = %dK %s\n", VERSION, device, erase_size, (readonly) ? "(read-only)" : ""); return 0; init_err: if(!rawdevice) { if(rawdevice->binding) blkdev_put(rawdevice->binding, BDEV_RAW); kfree(rawdevice); rawdevice = NULL; } if(mtd_info) { if(mtd_info->eraseregions) kfree(mtd_info->eraseregions); kfree(mtd_info); mtd_info = NULL; } return err; }
static int apanic_trigger_check(struct file *file, const char __user *devpath, unsigned long count, void *data) { struct apanic_data *ctx = &drv_ctx; struct panic_header *hdr = ctx->bounce; struct block_device *bdev; struct bio bio; struct bio_vec bio_vec; struct completion complete; struct page *page; int err = 0; bdev = lookup_bdev(devpath); if (IS_ERR(bdev)) { printk(KERN_ERR DRVNAME "failed to look up device %s (%ld)\n", devpath, PTR_ERR(bdev)); return -1; } err = blkdev_get(bdev, FMODE_READ); if (err) { printk(KERN_ERR DRVNAME "failed to open device %s (%d)\n", devpath, err); return err; } strncpy(ctx->devpath, devpath, sizeof(ctx->devpath)); page = virt_to_page(ctx->bounce); bio_init(&bio); bio.bi_io_vec = &bio_vec; bio_vec.bv_page = page; bio_vec.bv_len = PAGE_SIZE; bio_vec.bv_offset = 0; bio.bi_vcnt = 1; bio.bi_idx = 0; bio.bi_size = PAGE_SIZE; bio.bi_bdev = bdev; bio.bi_sector = 0; init_completion(&complete); bio.bi_private = &complete; bio.bi_end_io = mmc_bio_complete; submit_bio(READ, &bio); wait_for_completion(&complete); blkdev_put(bdev, FMODE_READ); printk(KERN_ERR DRVNAME "using block device '%s'\n", devpath); if (hdr->magic != PANIC_MAGIC) { printk(KERN_INFO DRVNAME "no panic data available\n"); return -1; } if (hdr->version != PHDR_VERSION) { printk(KERN_INFO DRVNAME "version mismatch (%d != %d)\n", hdr->version, PHDR_VERSION); return -1; } memcpy(&ctx->curr, hdr, sizeof(struct panic_header)); printk(KERN_INFO DRVNAME "c(%u, %u) t(%u, %u)\n", hdr->console_offset, hdr->console_length, hdr->threads_offset, hdr->threads_length); if (hdr->console_length) { ctx->apanic_console = create_proc_entry("apanic_console", S_IFREG | S_IRUGO, NULL); if (!ctx->apanic_console) printk(KERN_ERR DRVNAME "failed creating procfile\n"); else { ctx->apanic_console->read_proc = apanic_proc_read; ctx->apanic_console->write_proc = apanic_proc_write; ctx->apanic_console->size = hdr->console_length; ctx->apanic_console->data = (void *)PROC_APANIC_CONSOLE; } } if (hdr->threads_length) { ctx->apanic_threads = create_proc_entry("apanic_threads", S_IFREG | S_IRUGO, NULL); if (!ctx->apanic_threads) printk(KERN_ERR DRVNAME "failed creating procfile\n"); else { ctx->apanic_threads->read_proc = apanic_proc_read; ctx->apanic_threads->write_proc = apanic_proc_write; ctx->apanic_threads->size = hdr->threads_length; ctx->apanic_threads->data = (void *)PROC_APANIC_THREADS; } } return err; }
void refresh_sd_flags(PVBUS_EXT vbus_ext) { static int major[] = { SCSI_DISK0_MAJOR, SCSI_DISK1_MAJOR, SCSI_DISK2_MAJOR, SCSI_DISK3_MAJOR, SCSI_DISK4_MAJOR, SCSI_DISK5_MAJOR, SCSI_DISK6_MAJOR, SCSI_DISK7_MAJOR, SCSI_DISK8_MAJOR, SCSI_DISK9_MAJOR, SCSI_DISK10_MAJOR, SCSI_DISK11_MAJOR, SCSI_DISK12_MAJOR, SCSI_DISK13_MAJOR, SCSI_DISK14_MAJOR, SCSI_DISK15_MAJOR, 0 }; int id; Scsi_Device *SDptr; vbus_ext->needs_refresh = 0; for (id=0; id<osm_max_targets; id++) { SDptr = scsi_device_lookup(vbus_ext->host, 0, id, 0); vbus_ext->sd_flags[id] &= ~SD_FLAG_IN_USE; if (SDptr) { int i, minor; for (i=0; major[i]; i++) { for (minor=0; minor<=240; minor+=16) { struct block_device *bdev = bdget(MKDEV(major[i], minor)); if (bdev && #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,38) blkdev_get(bdev, FMODE_READ,NULL) #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28) blkdev_get(bdev, FMODE_READ) #else blkdev_get(bdev, FMODE_READ, 0 __BDEV_RAW) #endif ==0) { if (bdev->bd_disk && bdev->bd_disk->driverfs_dev==&SDptr->sdev_gendev) { if (vbus_ext->sd_flags[id] & SD_FLAG_REVALIDATE) { if (bdev->bd_disk->fops->revalidate_disk) bdev->bd_disk->fops->revalidate_disk(bdev->bd_disk); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15) mutex_lock(&bdev->bd_inode->i_mutex); #else down(&bdev->bd_inode->i_sem); #endif i_size_write(bdev->bd_inode, (loff_t)get_capacity(bdev->bd_disk)<<9); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15) mutex_unlock(&bdev->bd_inode->i_mutex); #else up(&bdev->bd_inode->i_sem); #endif vbus_ext->sd_flags[id] &= ~SD_FLAG_REVALIDATE; } if (bdev->bd_openers>1) vbus_ext->sd_flags[id] |= SD_FLAG_IN_USE; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28) blkdev_put(bdev, FMODE_READ); #else blkdev_put(bdev __BDEV_RAW); #endif goto next; } #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28) blkdev_put(bdev, FMODE_READ); #else blkdev_put(bdev __BDEV_RAW); #endif } } } next: scsi_device_put(SDptr); } } }
static int apanic_proc_read(char *buffer, char **start, off_t offset, int count, int *peof, void *dat) { int i, index = 0; int err; int start_sect; int end_sect; size_t file_length; off_t file_offset; struct apanic_data *ctx = &drv_ctx; struct block_device *bdev; struct bio bio; struct bio_vec bio_vec; struct completion complete; struct page *page; if (!count) return 0; mutex_lock(&drv_mutex); switch ((int) dat) { case PROC_APANIC_CONSOLE: file_length = ctx->curr.console_length; file_offset = ctx->curr.console_offset; break; case PROC_APANIC_THREADS: file_length = ctx->curr.threads_length; file_offset = ctx->curr.threads_offset; break; default: pr_err("bad apanic source (%d)\n", (int) dat); mutex_unlock(&drv_mutex); return -EINVAL; } if ((offset + count) > file_length) { mutex_unlock(&drv_mutex); return 0; } bdev = lookup_bdev(ctx->devpath); if (IS_ERR(bdev)) { printk(KERN_ERR DRVNAME "failed to look up device %s (%ld)\n", ctx->devpath, PTR_ERR(bdev)); return -1; } err = blkdev_get(bdev, FMODE_READ); if (err) { printk(KERN_ERR DRVNAME "failed to open device %s (%d)\n", ctx->devpath, err); return err; } page = virt_to_page(ctx->bounce); start_sect = (file_offset + offset) / 512; end_sect = (file_offset + offset + count - 1) / 512; for (i = start_sect; i <= end_sect; i++) { bio_init(&bio); bio.bi_io_vec = &bio_vec; bio_vec.bv_page = page; bio_vec.bv_len = 512; bio_vec.bv_offset = 0; bio.bi_vcnt = 1; bio.bi_idx = 0; bio.bi_size = 512; bio.bi_bdev = bdev; bio.bi_sector = i; init_completion(&complete); bio.bi_private = &complete; bio.bi_end_io = mmc_bio_complete; submit_bio(READ, &bio); wait_for_completion(&complete); if (!test_bit(BIO_UPTODATE, &bio.bi_flags)) { err = -EIO; goto out_blkdev; } if ((i == start_sect) && ((file_offset + offset) % 512 != 0)) { /* first sect, may be the only sect */ memcpy(buffer, ctx->bounce + (file_offset + offset) % 512, min((unsigned long)count, (unsigned long) (512 - (file_offset + offset) % 512))); index += min((unsigned long)count, (unsigned long) (512 - (file_offset + offset) % 512)); } else if ((i == end_sect) && ((file_offset + offset + count) % 512 != 0)) { /* last sect */ memcpy(buffer + index, ctx->bounce, (file_offset + offset + count) % 512); } else { /* middle sect */ memcpy(buffer + index, ctx->bounce, 512); index += 512; } } *start = (char *)count; if ((offset + count) == file_length) *peof = 1; mutex_unlock(&drv_mutex); err = count; out_blkdev: blkdev_put(bdev, FMODE_READ); mutex_unlock(&drv_mutex); return err; }
static int access_osip_record(osip_callback_t callback, void *cb_data) { Sector sect; struct block_device *bdev; char *buffer; struct OSIP_header *osip; struct OSIP_header *osip_backup; int ret = 0; int dirty = 0; bdev = get_emmc_bdev(); if (bdev == NULL) { pr_err("%s: get_emmc failed!\n", __func__); return -ENODEV; } /* make sure the block device is open rw */ ret = blkdev_get(bdev, FMODE_READ|FMODE_WRITE, NULL); if (ret < 0) { pr_err("%s: blk_dev_get failed!\n", __func__); return -ret; } /* get memmap of the OSIP header */ buffer = read_dev_sector(bdev, 0, §); if (buffer == NULL) { ret = -ENODEV; goto bd_put; } osip = (struct OSIP_header *) buffer; /* some sanity checks */ if (osip->header_size <= 0 || osip->header_size > PAGE_SIZE) { pr_err("%s: corrupted osip!\n", __func__); ret = -EINVAL; goto put_sector; } if (calc_checksum(osip, osip->header_size) != 0) { pr_err("%s: corrupted osip!\n", __func__); ret = -EINVAL; goto put_sector; } /* store the OSIP backup which will be used to recover in PrOS */ osip_backup = kmalloc(sizeof(struct OSIP_header), GFP_KERNEL); if (osip_backup == NULL) goto put_sector; memcpy(osip_backup, osip, sizeof(struct OSIP_header)); lock_page(sect.v); dirty = callback(osip, cb_data); if (dirty) { memcpy(buffer + OSIP_BACKUP_OFFSET, osip_backup, sizeof(struct OSIP_header)); osip->header_checksum = 0; osip->header_checksum = calc_checksum(osip, osip->header_size); set_page_dirty(sect.v); } unlock_page(sect.v); sync_blockdev(bdev); kfree(osip_backup); put_sector: put_dev_sector(sect); bd_put: blkdev_put(bdev, FMODE_READ|FMODE_WRITE); return 0; }
static int bldr_load_images(u32 *jump_addr) { int ret = 0; blkdev_t *bootdev; u32 addr = 0; char *name; u32 size = 0; u32 spare0 = 0; u32 spare1 = 0; if (NULL == (bootdev = blkdev_get(CFG_BOOT_DEV))) { print("%s can't find boot device(%d)\n", MOD, CFG_BOOT_DEV); /* FIXME, should change to global error code */ return -1; } #if CFG_LOAD_MD_ROM if (1 == aarch64_slt_done()) { /* do not check the correctness */ addr = CFG_MD1_ROM_MEMADDR; //bldr_load_part(PART_MD1_ROM, bootdev, &addr, &size); bldr_load_part("MD1_ROM", bootdev, &addr, &size); addr = CFG_MD2_ROM_MEMADDR; //bldr_load_part(PART_MD2_ROM, bootdev, &addr, &size); bldr_load_part("MD2_ROM", bootdev, &addr, &size); } #endif #if CFG_LOAD_MD_RAMDISK if (1 == aarch64_slt_done()) { /* do not check the correctness */ addr = CFG_MD1_RAMDISK_MEMADDR; bldr_load_part("MD1_RAMDISK", bootdev, &addr, &size); addr = CFG_MD2_RAMDISK_MEMADDR; bldr_load_part("MD2_RAMDISK", bootdev, &addr, &size); } #endif #if CFG_LOAD_MD_DSP if (1 == aarch64_slt_done()) { addr = CFG_MD_DSP_MEMADDR; bldr_load_part("MD_DSP",bootdev, &addr, &size); } #endif #if CFG_LOAD_SLT_MD_RAMDISK if (1 == aarch64_slt_done()) { /* do not check the correctness */ addr = CFG_MD1_RAMDISK_MEMADDR; //bldr_load_part(PART_FDD_MD_RAMDISK, bootdev, &addr, &size); bldr_load_part("FDD_MD_RAMDISK", bootdev, &addr, &size); addr = CFG_MD1_RAMDISK_MEMADDR; //bldr_load_part(PART_TDD_ONLY_MD_RAMDISK, bootdev, &addr, &size); bldr_load_part("TDD_ONLY_MD_RAMDISK", bootdev, &addr, &size); addr = CFG_MD1_RAMDISK_MEMADDR; //bldr_load_part(PART_TDD2G_MD_RAMDISK, bootdev, &addr, &size); bldr_load_part("TDD2G_MD_RAMDISK", bootdev, &addr, &size); } #endif #if CFG_LOAD_SLT_MD_DSP if (1 == aarch64_slt_done()) { addr = CFG_MD_DSP_MEMADDR; //bldr_load_part(PART_FDD_MD_DSP,bootdev, &addr, &size); bldr_load_part("FDD_MD_DSP",bootdev, &addr, &size); addr = CFG_MD_DSP_MEMADDR; //bldr_load_part(PART_TDD_ONLY_MD_DSP,bootdev, &addr, &size); bldr_load_part("TDD_ONLY_MD_DSP",bootdev, &addr, &size); addr = CFG_MD_DSP_MEMADDR; //bldr_load_part(PART_TDD2G_MD_DSP,bootdev, &addr, &size); bldr_load_part("TDD2G_MD_DSP",bootdev, &addr, &size); } #endif #if CFG_LOAD_CONN_SYS if (1 == aarch64_slt_done()) { addr = CFG_CONN_SYS_MEMADDR; //bldr_load_part(PART_CONN_SYS,bootdev, &addr, &size); bldr_load_part("CONN_SYS",bootdev, &addr, &size); } #endif #if CFG_LOAD_SLT_MD if (1 == aarch64_slt_done()) { addr = CFG_FDD_MD_ROM_MEMADDR; //bldr_load_part(PART_HVT_MD_ROM, bootdev, &addr, &size); bldr_load_part("HVT_MD_ROM", bootdev, &addr, &size); addr = CFG_FDD_MD_ROM_MEMADDR; //bldr_load_part(PART_FDD_MD_ROM, bootdev, &addr, &size); bldr_load_part("FDD_MD_ROM", bootdev, &addr, &size); addr = CFG_FDD_MD_ROM_MEMADDR; //bldr_load_part(PART_TDD_ONLY_ROM, bootdev, &addr, &size); bldr_load_part("TDD_ONLY_ROM", bootdev, &addr, &size); addr = CFG_FDD_MD_ROM_MEMADDR; //bldr_load_part(PART_TDD2G_MD_ROM, bootdev, &addr, &size); bldr_load_part("TDD2G_MD_ROM", bootdev, &addr, &size); } #endif #if CFG_LOAD_SLT_MD32 if (1 == aarch64_slt_done()) { u32 p_addr; u32 d_addr; //SPM power on MD32 and MD32 SRAM DRV_WriteReg32(0x10006000, 0x0b160001); DRV_WriteReg32(0x100062c8, 0xfffffff0); p_addr = CFG_MD32P_ROM_MEMADDR; //bldr_load_part(PART_MD32_P,bootdev, &p_addr, &size); bldr_load_part("MD32_P",bootdev, &p_addr, &size); d_addr = CFG_MD32D_ROM_MEMADDR; //bldr_load_part(PART_MD32_D,bootdev, &d_addr, &size); bldr_load_part("MD32_D",bootdev, &d_addr, &size); } #endif #if CFG_LOAD_AP_ROM if (1 == aarch64_slt_done()) { u32 p_addr; addr = CFG_AP_ROM_MEMADDR; //ret = bldr_load_part(PART_AP_ROM, bootdev, &addr, &size); ret = bldr_load_part("AP_ROM", bootdev, &addr, &size); if (ret) return ret; *jump_addr = addr; } #elif CFG_LOAD_UBOOT addr = CFG_UBOOT_MEMADDR; ret = bldr_load_part("lk", bootdev, &addr, &size); if (ret) return ret; *jump_addr = addr; #endif #if CFG_LOAD_SLT_AARCH64_KERNEL if (0 == aarch64_slt_done()) { addr = CFG_BOOTA64_MEMADDR; ret = bldr_load_part("boota64", bootdev, &addr, &size); addr = CFG_DTB_MEMADDR; ret = bldr_load_part("DTB", bootdev, &addr, &size); addr = CFG_IMAGE_AARCH64_MEMADDR; ret = bldr_load_part("Image_aarch64", bootdev, &addr, &size); } #endif #if CFG_ATF_SUPPORT addr = CFG_ATF_ROM_MEMADDR; ret = bldr_load_tee_part("tee1", bootdev, &addr, 0, &size); if (ret) { addr = CFG_ATF_ROM_MEMADDR; ret = bldr_load_tee_part("tee2", bootdev, &addr, 0, &size); if (ret) return ret; } print("%s bldr load tee part ret=0x%x, addr=0x%x\n", MOD, ret, addr); addr = CFG_BOOTIMG_HEADER_MEMADDR; size = 0x100; bldr_load_bootimg_header("boot", bootdev, &addr, 0, &size); // ret = bldr_load_bootimg_header("boot", bootdev, &addr, 0, &size); print("%s part_load_images ret=0x%x\n", MOD, ret); #endif return ret; }
void main(void) { #if !defined(CFG_MEM_PRESERVED_MODE) struct bldr_command_handler handler; #endif blkdev_t *bootdev; u32 addr; char *name; #if defined(CFG_SRAM_PRELOADER_MODE) //jump to mem preloader directly //mem_baseaddr is defined in link_sram_descriptor.ld addr = (u32) &mem_baseaddr; jump(addr, BOOT_ARGUMENT_ADDR, sizeof(boot_arg_t)); #else //#if defined(CFG_SRAM_PRELOADER_MODE) #ifdef PL_PROFILING u32 profiling_time; profiling_time = 0; #endif //Change setting to improve L2 CACHE SRAM access stability //CACHE_MEM_DELSEL: 0x10200014 //bit 3:0 l2data_delsel Adjusts memory marco timing //change setting: default=0xA new=0xB *(volatile unsigned int *)0x10200014 = 0xAAAB; addr = 0; bldr_pre_process(); #ifdef TINY_BOOTLOADER while(1); #endif #if !defined(CFG_MEM_PRESERVED_MODE) handler.priv = NULL; handler.attr = 0; handler.cb = bldr_cmd_handler; #ifdef PL_PROFILING profiling_time = get_timer(0); #endif bldr_handshake(&handler); #ifdef PL_PROFILING printf("#T#bldr_hdshk=%d\n", get_timer(profiling_time)); #endif #endif if (NULL == (bootdev = blkdev_get(CFG_BOOT_DEV))) { print("%s can't find boot device(%d)\n", MOD, CFG_BOOT_DEV); goto error; } #if defined(LOAD_NORMAL_BOOT_PRELOADER) #ifdef PL_PROFILING profiling_time = get_timer(0); #endif { volatile u32 cache_cfg; #define L2C_SIZE_CFG_OFF 5 //enable L2 sram for DA cache_cfg = DRV_Reg(APMCUSYS_CONFIG_BASE); cache_cfg &= ~(0x7 << L2C_SIZE_CFG_OFF); DRV_WriteReg(APMCUSYS_CONFIG_BASE, cache_cfg); //enable audio sysram clk for DA *(volatile unsigned int *)(0x10000084) = 0x02000000; } addr = CFG_UBOOT_MEMADDR; printf("load preloader=0x%x\n",addr); if (bldr_load_part(PART_PRELOADER, bootdev, &addr) != 0) goto error; addr = 0x02007200; printf("memcpy preloader=0x%x\n", addr); memcpy((void *)addr,(void *) CFG_UBOOT_MEMADDR,(int) 0x18E00); #ifdef PL_PROFILING printf("#T#ld_lk=%d\n", get_timer(profiling_time)); #endif addr = 0x02007500; apmcu_disable_dcache(); apmcu_dcache_clean_invalidate(); apmcu_dsb(); apmcu_icache_invalidate(); apmcu_disable_icache(); apmcu_isb(); apmcu_disable_smp(); printf("jump to preloader=0x%x\n", addr); // while( *(volatile unsigned int *)(0x10001428) != 0x000000AA) ; jump((u32) addr, BOOT_ARGUMENT_ADDR, sizeof(boot_arg_t)); #endif #if CFG_LOAD_DSP_ROM /* DSP is no more available in MT6589/MT6583 */ #endif #if CFG_LOAD_MD_FS #ifdef PL_PROFILING profiling_time = get_timer(0); #endif addr = CFG_USE_HEADER_MEMADDR; name = PART_BOOTIMG; if (bldr_load_part(name, bootdev, &addr) != 0) ; //goto error; // MD_FS partition may be empty #ifdef PL_PROFILING printf("#T#ld_MDFS=%d\n", get_timer(profiling_time)); #endif #endif #if CFG_LOAD_MD_ROM #ifdef PL_PROFILING profiling_time = get_timer(0); #endif addr = CFG_USE_HEADER_MEMADDR; name = PART_RECOVERY; if (bldr_load_part(name, bootdev, &addr) != 0) goto error; #ifdef PL_PROFILING printf("#T#ld_MDROM=%d\n", get_timer(profiling_time)); #endif #endif #if CFG_LOAD_AP_ROM #ifdef PL_PROFILING profiling_time = get_timer(0); #endif addr = CFG_USE_HEADER_MEMADDR; name = PART_UBOOT; if (bldr_load_part(name, bootdev, &addr) != 0) goto error; #ifdef PL_PROFILING printf("#T#ld_APROM=%d\n", get_timer(profiling_time)); #endif #endif #if CFG_LOAD_UBOOT #ifdef PL_PROFILING profiling_time = get_timer(0); #endif addr = CFG_UBOOT_MEMADDR; if (bldr_load_part(PART_UBOOT, bootdev, &addr) != 0) goto error; #ifdef PL_PROFILING printf("#T#ld_lk=%d\n", get_timer(profiling_time)); #endif #endif bldr_post_process(); bldr_jump(addr, BOOT_ARGUMENT_ADDR, sizeof(boot_arg_t)); error: platform_error_handler(); #endif //end of #if !defined(CFG_SRAM_PRELOADER_MODE) }
int intel_scu_ipc_read_mip(u8 *data, int len, int offset, int issigned) { int ret = 0; Sector sect; struct block_device *bdev; char *buffer = NULL; int *holderId = NULL; int sect_no, remainder; /* Only SMIP read for Cloverview is supported */ if ((intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_CLOVERVIEW) && (issigned != 1)) { /* CTP read UMIP from eMMC */ /* Opening the mmcblk0boot0 */ bdev = get_emmc_bdev(); if (bdev == NULL) { pr_err("%s: get_emmc failed!\n", __func__); return -ENODEV; } /* make sure the block device is open read only */ ret = blkdev_get(bdev, FMODE_READ, holderId); if (ret < 0) { pr_err("%s: blk_dev_get failed!\n", __func__); return -ret; } /* Get sector number of where data located */ sect_no = offset / SECTOR_SIZE; remainder = offset % SECTOR_SIZE; buffer = read_dev_sector(bdev, sect_no + UMIP_HEADER_HEADROOM_SECTOR, §); /* Shouldn't need to access UMIP sector 0/1 */ if (sect_no < UMIP_TOTAL_HEADER_SECTOR_NO) { pr_err("invalid umip offset\n"); ret = -EINVAL; goto bd_put; } else if (data == NULL || buffer == NULL) { pr_err("buffer is empty\n"); ret = -ENODEV; goto bd_put; } else if (len > (SECTOR_SIZE - remainder)) { pr_err("not enough data to read\n"); ret = -EINVAL; goto bd_put; } memcpy(data, buffer + remainder, len); bd_put: if (buffer) put_dev_sector(sect); blkdev_put(bdev, FMODE_READ); return ret; } else { if (!intel_mip_base) return -ENODEV; if (offset + len > IPC_MIP_MAX_ADDR) return -EINVAL; rpmsg_global_lock(); ret = read_mip(data, len, offset, issigned); rpmsg_global_unlock(); return ret; } }
/************************************************************************** * Handle Data **************************************************************************/ void handle_data (u32 pktsz, u8 * buf) { bool res = TRUE; unsigned int i = 0; u32 starting_block = 0; u32 spare_start, spare_offset, spare_len; bool first_page = TRUE; bool need_erase_nand = TRUE; bool invalid_addr = FALSE; blkdev_t *blkdev; blkdev = blkdev_get(CFG_BOOT_DEV); DM_ENTRY_LOG (); if (dm_ctx.dm_status == DM_STATUS_ERR_ONGOING) { while (dm_ctx.curr_cnt <= dm_ctx.pkt_cnt) { mt_usbtty_getcn (pktsz, buf); dm_ctx.curr_cnt++; }; dm_ctx.dm_status = DM_STATUS_ERR_FINISHED; return; } /* make sure the starting block is good */ starting_block = g_boot_dev.dev_find_safe_block (dm_ctx.page_off); if (dm_ctx.page_off != starting_block) { dm_ctx.page_off = starting_block; } do { /* fill USB buffer */ DM_TIME_BEGIN; mt_usbtty_getcn (pktsz, buf); DM_TIME_END_USB_READ; /* calculate check sum of received buffer */ #if (DM_CAL_CKSM_FROM_USB_BUFFER || DM_DBG_LOG) #if DM_CAL_CKSM_FROM_USB_BUFFER DM_TIME_BEGIN; cal_chksum_per_pkt (buf, pktsz); DM_TIME_END_CHECKSM; #endif #endif /* check image boundary always check image boundary at begining to ensure that "won't write any data to next partition" */ if (dm_ctx.page_off >= dm_ctx.img_info.addr_boundary) { //print ("current page_off (0%x) >= addr_boundary (0x%x)\n", dm_ctx.page_off, dm_ctx.img_info.addr_boundary); invalid_addr = TRUE; } /* if addr is invalid, skip the nand writing process */ if (invalid_addr == TRUE) { goto _next; } if (TRUE == need_erase_nand) { /* erase nand flash */ handle_erase (&dm_ctx.img_info, pktsz); need_erase_nand = FALSE; } /* when the address is block alignment, check if this block is good */ DM_TIME_BEGIN; if (dm_ctx.page_off % blkdev->erasesz == 0) { dm_ctx.page_off = g_boot_dev.dev_find_safe_block (dm_ctx.page_off); } DM_TIME_END_NAND_BAD_BLOCK; /* write nand flash */ DM_TIME_BEGIN; g_boot_dev.dev_write_data (buf, dm_ctx.page_off) ; DM_TIME_END_NAND_WRITE; _next: /* update the latest safe nand addr */ g_dl_safe_start_addr = dm_ctx.page_off; /* increase must after flash data */ dm_ctx.curr_cnt++; dm_ctx.curr_off += pktsz; dm_ctx.page_off += dm_ctx.page_size; //delay (1000); } while ((dm_ctx.curr_cnt <= dm_ctx.pkt_cnt) && (dm_ctx.dm_status == DM_STATUS_SECT_ONGING)); dm_ctx.dm_status = DM_STATUS_SECT_FINISHED; return; }
int intel_scu_ipc_write_umip(u8 *data, int len, int offset) { int i, ret = 0, offset_align; int remainder, len_align = 0; u32 dptr, sptr, cmd; u8 cs, tbl_cs = 0, *buf = NULL; Sector sect; struct block_device *bdev; char *buffer = NULL; int *holderId = NULL; int sect_no; u8 checksum; if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_CLOVERVIEW) { /* Opening the mmcblk0boot0 */ bdev = get_emmc_bdev(); if (bdev == NULL) { pr_err("%s: get_emmc failed!\n", __func__); return -ENODEV; } /* make sure the block device is open rw */ ret = blkdev_get(bdev, FMODE_READ|FMODE_WRITE, holderId); if (ret < 0) { pr_err("%s: blk_dev_get failed!\n", __func__); return -ret; } /* get memmap of the UMIP header */ sect_no = offset / SECTOR_SIZE; remainder = offset % SECTOR_SIZE; buffer = read_dev_sector(bdev, sect_no + UMIP_HEADER_HEADROOM_SECTOR, §); /* Shouldn't need to access UMIP sector 0/1 */ if (sect_no < UMIP_TOTAL_HEADER_SECTOR_NO) { pr_err("invalid umip offset\n"); ret = -EINVAL; goto bd_put; } else if (data == NULL || buffer == NULL) { pr_err("buffer is empty\n"); ret = -ENODEV; goto bd_put; } else if (len > (SECTOR_SIZE - remainder)) { pr_err("too much data to write\n"); ret = -EINVAL; goto bd_put; } lock_page(sect.v); memcpy(buffer + remainder, data, len); checksum = calc_checksum(buffer, SECTOR_SIZE); set_page_dirty(sect.v); unlock_page(sect.v); sync_blockdev(bdev); put_dev_sector(sect); /* * Updating the checksum, sector 0 (starting from UMIP * offset 0x08), we maintains 4 bytes for tracking each of * sector changes individually. For example, the dword at * offset 0x08 is used to checksum data integrity of sector * number 2, and so on so forth. It's worthnoting that only * the first byte in each 4 bytes stores checksum. * For detail, please check CTP FAS UMIP header definition */ buffer = read_dev_sector(bdev, UMIP_HEADER_SECTOR + UMIP_HEADER_HEADROOM_SECTOR, §); if (buffer == NULL) { pr_err("buffer is empty\n"); ret = -ENODEV; goto bd_put; } lock_page(sect.v); memcpy(buffer + 4 * (sect_no - UMIP_TOTAL_HEADER_SECTOR_NO) + UMIP_START_CHKSUM_ADDR, &checksum, 1/* one byte */); /* Change UMIP prologue chksum to zero */ *(buffer + UMIP_HEADER_CHKSUM_ADDR) = 0; for (i = 0; i < UMIP_TOTAL_CHKSUM_ENTRY; i++) { tbl_cs ^= *(u8 *)(buffer + 4 * i + UMIP_START_CHKSUM_ADDR); } /* Finish up with re-calcuating UMIP prologue checksum */ cs = dword_to_byte_chksum(xorblock((u32 *)buffer, SECTOR_SIZE)); *(buffer + UMIP_HEADER_CHKSUM_ADDR) = tbl_cs ^ cs; set_page_dirty(sect.v); unlock_page(sect.v); sync_blockdev(bdev); bd_put: if (buffer) put_dev_sector(sect); blkdev_put(bdev, FMODE_READ|FMODE_WRITE); return ret; } else { if (!intel_mip_base) return -ENODEV; if (offset + len > IPC_MIP_MAX_ADDR) return -EINVAL; rpmsg_global_lock(); offset_align = offset & (~0x3); len_align = (len + (offset - offset_align) + 3) & (~0x3); if (len != len_align) { buf = kzalloc(len_align, GFP_KERNEL); if (!buf) { pr_err("Alloc memory failed\n"); ret = -ENOMEM; goto fail; } ret = read_mip(buf, len_align, offset_align, 0); if (ret) goto fail; memcpy(buf + offset - offset_align, data, len); } else { buf = data; } dptr = offset_align; sptr = len_align / 4; cmd = IPC_CMD_UMIP_WR << 12 | IPCMSG_MIP_ACCESS; memcpy(intel_mip_base, buf, len_align); ret = rpmsg_send_raw_command(mip_instance, cmd, 0, NULL, NULL, 0, 0, sptr, dptr); fail: if (buf && len_align != len) kfree(buf); rpmsg_global_unlock(); return ret; } }