示例#1
0
static
#endif
uint32_t nand_dev_do_cmd(nand_dev_controller_state *s, uint32_t cmd)
{
    uint32_t size;
    uint64_t addr;
    nand_dev *dev;

    if (cmd == NAND_CMD_WRITE_BATCH || cmd == NAND_CMD_READ_BATCH ||
        cmd == NAND_CMD_ERASE_BATCH) {
        struct batch_data bd;
        uint64_t bd_addr = ((uint64_t)s->batch_addr_high << 32) | s->batch_addr_low;

        cpu_physical_memory_read(bd_addr, (void*)&bd, sizeof(struct batch_data));
        s->dev = bd.dev;
        s->addr_low = bd.addr_low;
        s->addr_high = bd.addr_high;
        s->transfer_size = bd.transfer_size;
        s->data = bd.data;
    }
    addr = s->addr_low | ((uint64_t)s->addr_high << 32);
    size = s->transfer_size;
    if(s->dev >= nand_dev_count)
        return 0;
    dev = nand_devs + s->dev;

    switch(cmd) {
    case NAND_CMD_GET_DEV_NAME:
        if(size > dev->devname_len)
            size = dev->devname_len;
#ifdef TARGET_I386
        if (kvm_enabled())
                cpu_synchronize_state(cpu_single_env, 0);
#endif
        cpu_memory_rw_debug(cpu_single_env, s->data, (uint8_t*)dev->devname, size, 1);
        return size;
    case NAND_CMD_READ_BATCH:
    case NAND_CMD_READ:
        if(addr >= dev->max_size)
            return 0;
        if(size > dev->max_size - addr)
            size = dev->max_size - addr;
        if(dev->fd >= 0)
            return nand_dev_read_file(dev, s->data, addr, size);
#ifdef TARGET_I386
        if (kvm_enabled())
                cpu_synchronize_state(cpu_single_env, 0);
#endif
        cpu_memory_rw_debug(cpu_single_env,s->data, &dev->data[addr], size, 1);
        return size;
    case NAND_CMD_WRITE_BATCH:
    case NAND_CMD_WRITE:
        if(dev->flags & NAND_DEV_FLAG_READ_ONLY)
            return 0;
        if(addr >= dev->max_size)
            return 0;
        if(size > dev->max_size - addr)
            size = dev->max_size - addr;
        if(dev->fd >= 0)
            return nand_dev_write_file(dev, s->data, addr, size);
#ifdef TARGET_I386
        if (kvm_enabled())
                cpu_synchronize_state(cpu_single_env, 0);
#endif
        cpu_memory_rw_debug(cpu_single_env,s->data, &dev->data[addr], size, 0);
        return size;
    case NAND_CMD_ERASE_BATCH:
    case NAND_CMD_ERASE:
        if(dev->flags & NAND_DEV_FLAG_READ_ONLY)
            return 0;
        if(addr >= dev->max_size)
            return 0;
        if(size > dev->max_size - addr)
            size = dev->max_size - addr;
        if(dev->fd >= 0)
            return nand_dev_erase_file(dev, addr, size);
        memset(&dev->data[addr], 0xff, size);
        return size;
    case NAND_CMD_BLOCK_BAD_GET: // no bad block support
        return 0;
    case NAND_CMD_BLOCK_BAD_SET:
        if(dev->flags & NAND_DEV_FLAG_READ_ONLY)
            return 0;
        return 0;
    default:
        cpu_abort(cpu_single_env, "nand_dev_do_cmd: Bad command %x\n", cmd);
        return 0;
    }
}
示例#2
0
static
#endif
uint32_t nand_dev_do_cmd(nand_dev_state *s, uint32_t cmd)
{
    uint32_t size;
    uint64_t addr;
    nand_dev *dev;

    addr = s->addr_low | ((uint64_t)s->addr_high << 32);
    size = s->transfer_size;
    if(s->dev >= nand_dev_count)
        return 0;
    dev = nand_devs + s->dev;

    switch(cmd) {
    case NAND_CMD_GET_DEV_NAME:
        if(size > dev->devname_len)
            size = dev->devname_len;
        cpu_memory_rw_debug(cpu_single_env, s->data, dev->devname, size, 1);
        return size;
    case NAND_CMD_READ:
        if(addr >= dev->size)
            return 0;
        if(size + addr > dev->size)
            size = dev->size - addr;
        if(dev->fd >= 0)
            return nand_dev_read_file(dev, s->data, addr, size);
        cpu_memory_rw_debug(cpu_single_env,s->data, &dev->data[addr], size, 1);
        return size;
    case NAND_CMD_WRITE:
        if(dev->flags & NAND_DEV_FLAG_READ_ONLY)
            return 0;
        if(addr >= dev->size)
            return 0;
        if(size + addr > dev->size)
            size = dev->size - addr;
        if(dev->fd >= 0)
            return nand_dev_write_file(dev, s->data, addr, size);
        cpu_memory_rw_debug(cpu_single_env,s->data, &dev->data[addr], size, 0);
        return size;
    case NAND_CMD_ERASE:
        if(dev->flags & NAND_DEV_FLAG_READ_ONLY)
            return 0;
        if(addr >= dev->size)
            return 0;
        if(size + addr > dev->size)
            size = dev->size - addr;
        if(dev->fd >= 0)
            return nand_dev_erase_file(dev, addr, size);
        memset(&dev->data[addr], 0xff, size);
        return size;
    case NAND_CMD_BLOCK_BAD_GET: // no bad block support
        return 0;
    case NAND_CMD_BLOCK_BAD_SET:
        if(dev->flags & NAND_DEV_FLAG_READ_ONLY)
            return 0;
        return 0;
    default:
        cpu_abort(cpu_single_env, "nand_dev_do_cmd: Bad command %x\n", cmd);
        return 0;
    }
}