Example #1
0
File: vm.c Project: grub4android/lk
/* mark the physical pages backing a range of virtual as in use.
 * allocate the physical pages and throw them away */
static void mark_pages_in_use(vaddr_t va, size_t len)
{
    LTRACEF("va 0x%lx, len 0x%zx\n", va, len);

    struct list_node list;
    list_initialize(&list);

    /* make sure we are inclusive of all of the pages in the address range */
    len = PAGE_ALIGN(len + (va & (PAGE_SIZE - 1)));
    va = ROUNDDOWN(va, PAGE_SIZE);

    LTRACEF("aligned va 0x%lx, len 0x%zx\n", va, len);

    for (size_t offset = 0; offset < len; offset += PAGE_SIZE) {
        uint flags;
        paddr_t pa;

        status_t err = arch_mmu_query(va + offset, &pa, &flags);
        if (err >= 0) {
            //LTRACEF("va 0x%x, pa 0x%x, flags 0x%x, err %d\n", va + offset, pa, flags, err);

            /* alloate the range, throw the results away */
            pmm_alloc_range(pa, 1, &list);
        } else {
            panic("Could not find pa for va 0x%lx\n", va);
        }
    }
}
Example #2
0
File: arch.c Project: M1cha/khook
paddr_t khook_arch_virt2phys(khook_t* khook, kvaddr_t va) {
	paddr_t pa;
	if(arch_mmu_query(khook, va, &pa, NULL, NULL))
		return 0;

	return pa;
}
Example #3
0
File: vm.c Project: grub4android/lk
paddr_t kvaddr_to_paddr(void *ptr)
{
    status_t rc;
    paddr_t  pa;

    rc = arch_mmu_query((vaddr_t)ptr, &pa, NULL);
    if (rc)
        return (paddr_t) NULL;
    return pa;
}
Example #4
0
static int mem_test(int argc, const cmd_args *argv)
{
    if (argc < 2) {
        printf("not enough arguments\n");
usage:
        printf("usage: %s <length>\n", argv[0].str);
        return -1;
    }

    void *ptr;
    size_t len = argv[1].u;

#if WITH_KERNEL_VMM
    /* rounding up len to the next page */
    len = PAGE_ALIGN(len);
    if (len == 0) {
        printf("invalid length\n");
        goto usage;
    }

    /* allocate a region to test in */
    status_t err = vmm_alloc_contiguous(vmm_get_kernel_aspace(), "memtest", len, &ptr, 0, 0, ARCH_MMU_FLAG_UNCACHED);
    if (err < 0) {
        printf("error %d allocating test region\n", err);
        return -1;
    }

    paddr_t pa;
    arch_mmu_query((vaddr_t)ptr, &pa, 0);
    printf("physical address 0x%lx\n", pa);
#else
    /* allocate from the heap */
    ptr = malloc(len);
    if (!ptr ) {
        printf("error allocating test area from heap\n");
        return -1;
    }

#endif

    printf("got buffer at %p of length 0x%lx\n", ptr, len);

    /* run the tests */
    do_mem_tests(ptr, len);

#if WITH_KERNEL_VMM
    // XXX free memory region here
    printf("NOTE: leaked memory\n");
#else
    free(ptr);
#endif

    return 0;
}
Example #5
0
File: dcc.c Project: astarasikov/lk
void lkboot_dcc_init(void)
{
    paddr_t pa;
    __UNUSED status_t err;

    buffer_desc.version = PDCC_VERSION;

    err = arch_mmu_query((vaddr_t)htod_buffer, &pa, NULL);
    DEBUG_ASSERT(err == NO_ERROR);

    buffer_desc.htod_buffer_phys = pa;
    buffer_desc.htod_buffer_len = DCC_BUFLEN;

    err = arch_mmu_query((vaddr_t)dtoh_buffer, &pa, NULL);
    DEBUG_ASSERT(err == NO_ERROR);

    buffer_desc.dtoh_buffer_phys = pa;
    buffer_desc.dtoh_buffer_len = DCC_BUFLEN;

    err = arch_mmu_query((vaddr_t)&buffer_desc, &buffer_desc_phys, NULL);
    DEBUG_ASSERT(err == NO_ERROR);

    arch_clean_cache_range((vaddr_t)&buffer_desc, sizeof(buffer_desc));
}
Example #6
0
File: vm.c Project: grub4android/lk
static int cmd_vm(int argc, const cmd_args *argv)
{
    if (argc < 2) {
notenoughargs:
        printf("not enough arguments\n");
usage:
        printf("usage:\n");
        printf("%s phys2virt <address>\n", argv[0].str);
        printf("%s virt2phys <address>\n", argv[0].str);
        printf("%s map <phys> <virt> <count> <flags>\n", argv[0].str);
        printf("%s unmap <virt> <count>\n", argv[0].str);
        return ERR_GENERIC;
    }

    if (!strcmp(argv[1].str, "phys2virt")) {
        if (argc < 3) goto notenoughargs;

        void *ptr = paddr_to_kvaddr(argv[2].u);
        printf("paddr_to_kvaddr returns %p\n", ptr);
    } else if (!strcmp(argv[1].str, "virt2phys")) {
        if (argc < 3) goto notenoughargs;

        paddr_t pa;
        uint flags;
        status_t err = arch_mmu_query(argv[2].u, &pa, &flags);
        printf("arch_mmu_query returns %d\n", err);
        if (err >= 0) {
            printf("\tpa 0x%lx, flags 0x%x\n", pa, flags);
        }
    } else if (!strcmp(argv[1].str, "map")) {
        if (argc < 6) goto notenoughargs;

        int err = arch_mmu_map(argv[3].u, argv[2].u, argv[4].u, argv[5].u);
        printf("arch_mmu_map returns %d\n", err);
    } else if (!strcmp(argv[1].str, "unmap")) {
        if (argc < 4) goto notenoughargs;

        int err = arch_mmu_unmap(argv[2].u, argv[3].u);
        printf("arch_mmu_unmap returns %d\n", err);
    } else {
        printf("unknown command\n");
        goto usage;
    }

    return NO_ERROR;
}
Example #7
0
static status_t attach_backing(struct virtio_gpu_dev *gdev, uint32_t resource_id, void *ptr, size_t buf_len)
{
    status_t err;

    LTRACEF("gdev %p, resource_id %u, ptr %p, buf_len %zu\n", gdev, resource_id, ptr, buf_len);

    DEBUG_ASSERT(gdev);
    DEBUG_ASSERT(ptr);

    /* grab a lock to keep this single message at a time */
    mutex_acquire(&gdev->lock);

    /* construct the request */
    struct {
        struct virtio_gpu_resource_attach_backing req;
        struct virtio_gpu_mem_entry mem;
    } req;
    memset(&req, 0, sizeof(req));

    req.req.hdr.type = VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING;
    req.req.resource_id = resource_id;
    req.req.nr_entries = 1;

    paddr_t pa;
    arch_mmu_query((vaddr_t)ptr, &pa, NULL);
    req.mem.addr = pa;
    req.mem.length = buf_len;

    /* send the command and get a response */
    struct virtio_gpu_ctrl_hdr *res;
    err = send_command_response(gdev, &req, sizeof(req), (void **)&res, sizeof(*res));
    DEBUG_ASSERT(err == NO_ERROR);

    /* see if we got a valid response */
    LTRACEF("response type 0x%x\n", res->type);
    err = (res->type == VIRTIO_GPU_RESP_OK_NODATA) ? NO_ERROR : ERR_NO_MEMORY;

    /* release the lock */
    mutex_release(&gdev->lock);

    return err;
}
Example #8
0
static int alloc_page_table(paddr_t *paddrp, uint page_size_shift)
{
    int ret;
    int count;
    size_t size = 1U << page_size_shift;
    void *vaddr;

    if (size >= PAGE_SIZE) {
        count = size / PAGE_SIZE;
        ret = pmm_alloc_contiguous(count, page_size_shift, paddrp, NULL, PMM_ARENA_FLAG_ANY);
        if (ret != count)
            return ERR_NO_MEMORY;
    } else {
        vaddr = heap_alloc(size, size);
        if (!vaddr)
            return ERR_NO_MEMORY;
        ret = arch_mmu_query((vaddr_t)vaddr, paddrp, NULL);
        if (ret) {
            heap_free(vaddr);
            return ret;
        }
    }
    return 0;
}
Example #9
0
status_t virtio_block_init(struct virtio_device *dev, uint32_t host_features)
{
    LTRACEF("dev %p, host_features 0x%x\n", dev, host_features);

    /* allocate a new block device */
    struct virtio_block_dev *bdev = malloc(sizeof(struct virtio_block_dev));
    if (!bdev)
        return ERR_NO_MEMORY;

    mutex_init(&bdev->lock);
    event_init(&bdev->io_event, false, EVENT_FLAG_AUTOUNSIGNAL);

    bdev->dev = dev;
    dev->priv = bdev;

    bdev->blk_req = memalign(sizeof(struct virtio_blk_req), sizeof(struct virtio_blk_req));
#if WITH_KERNEL_VM
    arch_mmu_query((vaddr_t)bdev->blk_req, &bdev->blk_req_phys, NULL);
#else
    bdev->blk_freq_phys = (uint64_t)(uintptr_t)bdev->blk_req;
#endif
    LTRACEF("blk_req structure at %p (0x%lx phys)\n", bdev->blk_req, bdev->blk_req_phys);

#if WITH_KERNEL_VM
    arch_mmu_query((vaddr_t)&bdev->blk_response, &bdev->blk_response_phys, NULL);
#else
    bdev->blk_response_phys = (uint64_t)(uintptr_t)&bdev->blk_response;
#endif

    /* make sure the device is reset */
    virtio_reset_device(dev);

    volatile struct virtio_blk_config *config = (struct virtio_blk_config *)dev->config_ptr;

    LTRACEF("capacity 0x%llx\n", config->capacity);
    LTRACEF("size_max 0x%x\n", config->size_max);
    LTRACEF("seg_max  0x%x\n", config->seg_max);
    LTRACEF("blk_size 0x%x\n", config->blk_size);

    /* ack and set the driver status bit */
    virtio_status_acknowledge_driver(dev);

    // XXX check features bits and ack/nak them

    /* allocate a virtio ring */
    virtio_alloc_ring(dev, 0, 256);

    /* set our irq handler */
    dev->irq_driver_callback = &virtio_block_irq_driver_callback;

    /* set DRIVER_OK */
    virtio_status_driver_ok(dev);

    /* construct the block device */
    static uint8_t found_index = 0;
    char buf[16];
    snprintf(buf, sizeof(buf), "virtio%u", found_index++);
    bio_initialize_bdev(&bdev->bdev, buf,
                        config->blk_size, config->capacity,
                        0, NULL);

    /* override our block device hooks */
    bdev->bdev.read_block = &virtio_bdev_read_block;
    bdev->bdev.write_block = &virtio_bdev_write_block;

    bio_register_device(&bdev->bdev);

    printf("found virtio block device of size %lld\n", config->capacity * config->blk_size);

    return NO_ERROR;
}
Example #10
0
ssize_t virtio_block_read_write(struct virtio_device *dev, void *buf, off_t offset, size_t len, bool write)
{
    struct virtio_block_dev *bdev = (struct virtio_block_dev *)dev->priv;

    uint16_t i;
    struct vring_desc *desc;
    paddr_t pa;
    vaddr_t va = (vaddr_t)buf;

    LTRACEF("dev %p, buf %p, offset 0x%llx, len %zu\n", dev, buf, offset, len);

    mutex_acquire(&bdev->lock);

    /* set up the request */
    bdev->blk_req->type = write ? VIRTIO_BLK_T_OUT : VIRTIO_BLK_T_IN;
    bdev->blk_req->ioprio = 0;
    bdev->blk_req->sector = offset / 512;
    LTRACEF("blk_req type %u ioprio %u sector %llu\n",
            bdev->blk_req->type, bdev->blk_req->ioprio, bdev->blk_req->sector);

    /* put together a transfer */
    desc = virtio_alloc_desc_chain(dev, 0, 3, &i);
    LTRACEF("after alloc chain desc %p, i %u\n", desc, i);

    // XXX not cache safe.
    // At the moment only tested on arm qemu, which doesn't emulate cache.

    /* set up the descriptor pointing to the head */
    desc->addr = bdev->blk_req_phys;
    desc->len = sizeof(struct virtio_blk_req);
    desc->flags |= VRING_DESC_F_NEXT;

    /* set up the descriptor pointing to the buffer */
    desc = virtio_desc_index_to_desc(dev, 0, desc->next);
#if WITH_KERNEL_VM
    /* translate the first buffer */
    arch_mmu_query(va, &pa, NULL);
    desc->addr = (uint64_t)pa;
    /* desc->len is filled in below */
#else
    desc->addr = (uint64_t)(uintptr_t)buf;
    desc->len = len;
#endif
    desc->flags |= write ? 0 : VRING_DESC_F_WRITE; /* mark buffer as write-only if its a block read */
    desc->flags |= VRING_DESC_F_NEXT;

#if WITH_KERNEL_VM
    /* see if we need to add more descriptors due to scatter gather */
    paddr_t next_pa = PAGE_ALIGN(pa + 1);
    desc->len = MIN(next_pa - pa, len);
    LTRACEF("first descriptor va 0x%lx desc->addr 0x%llx desc->len %u\n", va, desc->addr, desc->len);
    len -= desc->len;
    while (len > 0) {
        /* amount of source buffer handled by this iteration of the loop */
        size_t len_tohandle = MIN(len, PAGE_SIZE);

        /* translate the next page in the buffer */
        va = PAGE_ALIGN(va + 1);
        arch_mmu_query(va, &pa, NULL);
        LTRACEF("va now 0x%lx, pa 0x%lx, next_pa 0x%lx, remaining len %zu\n", va, pa, next_pa, len);

        /* is the new translated physical address contiguous to the last one? */
        if (next_pa == pa) {
            LTRACEF("extending last one by %zu bytes\n", len_tohandle);
            desc->len += len_tohandle;
        } else {
            uint16_t next_i = virtio_alloc_desc(dev, 0);
            struct vring_desc *next_desc = virtio_desc_index_to_desc(dev, 0, next_i);
            DEBUG_ASSERT(next_desc);

            LTRACEF("doesn't extend, need new desc, allocated desc %i (%p)\n", next_i, next_desc);

            /* fill this descriptor in and put it after the last one but before the response descriptor */
            next_desc->addr = (uint64_t)pa;
            next_desc->len = len_tohandle;
            next_desc->flags = write ? 0 : VRING_DESC_F_WRITE; /* mark buffer as write-only if its a block read */
            next_desc->flags |= VRING_DESC_F_NEXT;
            next_desc->next = desc->next;
            desc->next = next_i;

            desc = next_desc;
        }
        len -= len_tohandle;
        next_pa += PAGE_SIZE;
    }
#endif

    /* set up the descriptor pointing to the response */
    desc = virtio_desc_index_to_desc(dev, 0, desc->next);
    desc->addr = bdev->blk_response_phys;
    desc->len = 1;
    desc->flags = VRING_DESC_F_WRITE;

    /* submit the transfer */
    virtio_submit_chain(dev, 0, i);

    /* kick it off */
    virtio_kick(dev, 0);

    /* wait for the transfer to complete */
    event_wait(&bdev->io_event);

    LTRACEF("status 0x%hhx\n", bdev->blk_response);

    mutex_release(&bdev->lock);

    return len;
}
Example #11
0
File: virtio.c Project: srodrig1/lk
status_t virtio_alloc_ring(struct virtio_device *dev, uint index, uint16_t len)
{
    LTRACEF("dev %p, index %u, len %u\n", dev, index, len);

    DEBUG_ASSERT(dev);
    DEBUG_ASSERT(len > 0 && ispow2(len));
    DEBUG_ASSERT(index < MAX_VIRTIO_RINGS);

    if (len == 0 || !ispow2(len))
        return ERR_INVALID_ARGS;

    struct vring *ring = &dev->ring[index];

    /* allocate a ring */
    size_t size = vring_size(len, PAGE_SIZE);
    LTRACEF("need %zu bytes\n", size);

#if WITH_KERNEL_VM
    void *vptr;
    status_t err = vmm_alloc_contiguous(vmm_get_kernel_aspace(), "virtio_ring", size, &vptr, 0, 0, ARCH_MMU_FLAG_UNCACHED_DEVICE);
    if (err < 0)
        return ERR_NO_MEMORY;

    LTRACEF("allocated virtio_ring at va %p\n", vptr);

    /* compute the physical address */
    paddr_t pa;
    err = arch_mmu_query((vaddr_t)vptr, &pa, NULL);
    if (err < 0) {
        return ERR_NO_MEMORY;
    }

    LTRACEF("virtio_ring at pa 0x%lx\n", pa);
#else
    void *vptr = memalign(PAGE_SIZE, size);
    if (!vptr)
        return ERR_NO_MEMORY;

    LTRACEF("ptr %p\n", vptr);
    memset(vptr, 0, size);

    /* compute the physical address */
    paddr_t pa = (paddr_t)vptr;
#endif

    /* initialize the ring */
    vring_init(ring, len, vptr, PAGE_SIZE);
    dev->ring[index].free_list = 0xffff;
    dev->ring[index].free_count = 0;

    /* add all the descriptors to the free list */
    for (uint i = 0; i < len; i++) {
        virtio_free_desc(dev, index, i);
    }

    /* register the ring with the device */
    DEBUG_ASSERT(dev->mmio_config);
    dev->mmio_config->guest_page_size = PAGE_SIZE;
    dev->mmio_config->queue_sel = index;
    dev->mmio_config->queue_num = len;
    dev->mmio_config->queue_align = PAGE_SIZE;
    dev->mmio_config->queue_pfn = pa / PAGE_SIZE;

    /* mark the ring active */
    dev->active_rings_bitmap |= (1 << index);

    return NO_ERROR;
}
Example #12
0
static int do_boot(lkb_t *lkb, size_t len, const char **result)
{
    LTRACEF("lkb %p, len %zu, result %p\n", lkb, len, result);

    void *buf;
    paddr_t buf_phys;

    if (vmm_alloc_contiguous(vmm_get_kernel_aspace(), "lkboot_iobuf",
        len, &buf, log2_uint(1024*1024), 0, ARCH_MMU_FLAG_UNCACHED) < 0) {
        *result = "not enough memory";
        return -1;
    }
    arch_mmu_query((vaddr_t)buf, &buf_phys, NULL);
    LTRACEF("iobuffer %p (phys 0x%lx)\n", buf, buf_phys);

    if (lkb_read(lkb, buf, len)) {
        *result = "io error";
        // XXX free buffer here
        return -1;
    }

    /* construct a boot argument list */
    const size_t bootargs_size = PAGE_SIZE;
#if 0
    void *args = (void *)((uintptr_t)lkb_iobuffer + lkb_iobuffer_size - bootargs_size);
    paddr_t args_phys = lkb_iobuffer_phys + lkb_iobuffer_size - bootargs_size;
#elif PLATFORM_ZYNQ
    /* grab the top page of sram */
    /* XXX do this better */
    paddr_t args_phys = SRAM_BASE + SRAM_SIZE - bootargs_size;
    void *args = paddr_to_kvaddr(args_phys);
#else
#error need better way
#endif
    LTRACEF("boot args %p, phys 0x%lx, len %zu\n", args, args_phys, bootargs_size);

    bootargs_start(args, bootargs_size);
    bootargs_add_command_line(args, bootargs_size, "what what");
    arch_clean_cache_range((vaddr_t)args, bootargs_size);

    ulong lk_args[4];
    bootargs_generate_lk_arg_values(args_phys, lk_args);

    const void *ptr;

    /* sniff it to see if it's a bootimage or a raw image */
    bootimage_t *bi;
    if (bootimage_open(buf, len, &bi) >= 0) {
        size_t len;

        /* it's a bootimage */
        TRACEF("detected bootimage\n");

        /* find the lk image */
        if (bootimage_get_file_section(bi, TYPE_LK, &ptr, &len) >= 0) {
            TRACEF("found lk section at %p\n", ptr);

            /* add the boot image to the argument list */
            size_t bootimage_size;
            bootimage_get_range(bi, NULL, &bootimage_size);

            bootargs_add_bootimage_pointer(args, bootargs_size, "pmem", buf_phys, bootimage_size);
        }
    } else {
        /* raw image, just chain load it directly */
        TRACEF("raw image, chainloading\n");

        ptr = buf;
    }

    /* start a boot thread to complete the startup */
    static struct chainload_args cl_args;

    cl_args.func = (void *)ptr;
    cl_args.args[0] = lk_args[0];
    cl_args.args[1] = lk_args[1];
    cl_args.args[2] = lk_args[2];
    cl_args.args[3] = lk_args[3];

    thread_resume(thread_create("boot", &chainload_thread, &cl_args,
        DEFAULT_PRIORITY, DEFAULT_STACK_SIZE));

    return 0;
}
Example #13
0
static int cmd_display_mem(int argc, const cmd_args *argv)
{
    /* save the last address and len so we can continue where we left off */
    static unsigned long address;
    static size_t len;

    if (argc < 3 && len == 0) {
        printf("not enough arguments\n");
        printf("%s [address] [length]\n", argv[0].str);
        return -1;
    }

    int size;
    if (strcmp(argv[0].str, "dw") == 0) {
        size = 4;
    } else if (strcmp(argv[0].str, "dh") == 0) {
        size = 2;
    } else {
        size = 1;
    }

    if (argc >= 2) {
        address = argv[1].u;
    }
    if (argc >= 3) {
        len = argv[2].u;
    }

    unsigned long stop = address + len;
    int count = 0;

    if ((address & (size - 1)) != 0) {
        printf("unaligned address, cannot display\n");
        return -1;
    }

#if WITH_KERNEL_VM
    /* preflight the start address to see if it's mapped */
    if (arch_mmu_query((vaddr_t)address, NULL, NULL) < 0) {
        printf("ERROR: address 0x%lx is unmapped\n", address);
        return -1;
    }
#endif

    for ( ; address < stop; address += size) {
        if (count == 0)
            printf("0x%08lx: ", address);
        switch (size) {
        case 4:
            printf("%08x ", *(uint32_t *)address);
            break;
        case 2:
            printf("%04hx ", *(uint16_t *)address);
            break;
        case 1:
            printf("%02hhx ", *(uint8_t *)address);
            break;
        }
        count += size;
        if (count == 16) {
            printf("\n");
            count = 0;
        }
    }

    if (count != 0)
        printf("\n");

    return 0;
}
Example #14
0
ssize_t virtio_block_read_write(struct virtio_device *dev, void *buf, off_t offset, size_t len, bool write)
{
    struct virtio_block_dev *bdev = (struct virtio_block_dev *)dev->priv;

    uint16_t i;
    struct vring_desc *desc;
    paddr_t pa;

    LTRACEF("dev %p, buf %p, offset 0x%llx, len %zu\n", dev, buf, offset, len);

    mutex_acquire(&bdev->lock);

    /* set up the request */
    bdev->blk_req->type = write ? VIRTIO_BLK_T_OUT : VIRTIO_BLK_T_IN;
    bdev->blk_req->ioprio = 0;
    bdev->blk_req->sector = offset / 512;
    LTRACEF("blk_req type %u ioprio %u sector %llu\n",
            bdev->blk_req->type, bdev->blk_req->ioprio, bdev->blk_req->sector);

    /* put together a transfer */
    desc = virtio_alloc_desc_chain(dev, 0, 3, &i);
    LTRACEF("after alloc chain desc %p, i %u\n", desc, i);

    // XXX not cache safe.
    // At the moment only tested on arm qemu, which doesn't emulate cache.

    /* set up the descriptor pointing to the head */
    desc->addr = bdev->blk_req_phys;
    desc->len = sizeof(struct virtio_blk_req);
    desc->flags |= VRING_DESC_F_NEXT;

    /* set up the descriptor pointing to the buffer */
    desc = virtio_desc_index_to_desc(dev, 0, desc->next);
#if WITH_KERNEL_VM
    // XXX handle bufs that cross page boundaries
    arch_mmu_query((vaddr_t)buf, &pa, NULL);
    desc->addr = (uint64_t)pa;
#else
    desc->addr = (uint64_t)(uintptr_t)buf;
#endif
    desc->len = len;
    desc->flags |= write ? 0 : VRING_DESC_F_WRITE; /* mark buffer as write-only if its a block read */
    desc->flags |= VRING_DESC_F_NEXT;

    /* set up the descriptor pointing to the response */
    desc = virtio_desc_index_to_desc(dev, 0, desc->next);
    desc->addr = bdev->blk_response_phys;
    desc->len = 1;
    desc->flags = VRING_DESC_F_WRITE;

    /* submit the transfer */
    virtio_submit_chain(dev, 0, i);

    /* kick it off */
    virtio_kick(dev, 0);

    /* wait for the transfer to complete */
    event_wait(&bdev->io_event);

    LTRACEF("status 0x%hhx\n", bdev->blk_response);

    mutex_release(&bdev->lock);

    return len;
}