/*===========================================================================* * sef_cb_signal_handler * *===========================================================================*/ static void sef_cb_signal_handler(int signo) { /* In case of a termination signal, shut down this driver. * Stop the device, and deallocate resources as proof of concept. */ int r; /* Only check for termination signal, ignore anything else. */ if (signo != SIGTERM) return; atl2_stop(); if ((r = sys_irqrmpolicy(&state.hook_id)) != OK) panic("unable to deregister IRQ: %d", r); free_contig(state.txd_base, ATL2_TXD_BUFSIZE); free_contig(state.txs_base, ATL2_TXS_COUNT * sizeof(u32_t)); free_contig(state.rxd_base_u, state.rxd_align + ATL2_RXD_COUNT * ATL2_RXD_SIZE); vm_unmap_phys(SELF, (void *) state.base, state.size); /* We cannot free the PCI device at this time. */ exit(0); }
static int virtio_net_alloc_bufs(void) { data_vir = alloc_contig(PACKET_BUF_SZ, 0, &data_phys); if (!data_vir) return ENOMEM; hdrs_vir = alloc_contig(BUF_PACKETS * sizeof(hdrs_vir[0]), 0, &hdrs_phys); if (!hdrs_vir) { free_contig(data_vir, PACKET_BUF_SZ); return ENOMEM; } packets = malloc(BUF_PACKETS * sizeof(packets[0])); if (!packets) { free_contig(data_vir, PACKET_BUF_SZ); free_contig(hdrs_vir, BUF_PACKETS * sizeof(hdrs_vir[0])); return ENOMEM; } memset(data_vir, 0, PACKET_BUF_SZ); memset(hdrs_vir, 0, BUF_PACKETS * sizeof(hdrs_vir[0])); memset(packets, 0, BUF_PACKETS * sizeof(packets[0])); return OK; }
static void free_phys_queue(struct virtio_queue *q) { assert(q != NULL); assert(q->vaddr != NULL); free_contig(q->vaddr, q->ring_size); q->vaddr = NULL; q->paddr = 0; q->num = 0; free_contig(q->data, sizeof(q->data[0])); q->data = NULL; }
/* * The driver is terminating. Clean up. */ static void virtio_net_stop(void) { dput(("Terminating")); free_contig(data_vir, PACKET_BUF_SZ); free_contig(hdrs_vir, BUF_PACKETS * sizeof(hdrs_vir[0])); free(packets); virtio_reset_device(net_dev); virtio_free_queues(net_dev); virtio_free_device(net_dev); net_dev = NULL; }
static int init_indirect_desc_tables(struct virtio_device *dev) { int i, j, r; struct indirect_desc_table *desc; dev->indirect = malloc(dev->num_indirect * sizeof(dev->indirect[0])); if (dev->indirect == NULL) { printf("%s: Could not allocate indirect tables\n", dev->name); return ENOMEM; } memset(dev->indirect, 0, dev->num_indirect* sizeof(dev->indirect[0])); for (i = 0; i < dev->num_indirect; i++) { desc = &dev->indirect[i]; if ((r = init_indirect_desc_table(desc)) != OK) { /* error path */ for (j = 0; j < i; j++) { desc = &dev->indirect[j]; free_contig(desc->descs, desc->len); } free(dev->indirect); return r; } } return OK; }
/*===========================================================================* * flt_free * *===========================================================================*/ void flt_free(char *buf, size_t size, const char *sbuf) { /* Free a buffer previously allocated with flt_malloc(). */ if(buf != sbuf) free_contig(buf, size); }
static void sef_cb_signal_handler(int signo) { if (signo != SIGTERM) return; dput(("Terminating")); free_contig(data_vir, PACKET_BUF_SZ); free_contig(hdrs_vir, BUF_PACKETS * sizeof(hdrs_vir[0])); free(packets); virtio_reset_device(net_dev); virtio_free_queues(net_dev); virtio_free_device(net_dev); net_dev = NULL; exit(1); }
/*===========================================================================* * buf_pool * *===========================================================================*/ PUBLIC void buf_pool(int new_nr_bufs) { /* Initialize the buffer pool. */ register struct buf *bp; assert(new_nr_bufs > 0); if(nr_bufs > 0) { assert(buf); (void) fs_sync(); for (bp = &buf[0]; bp < &buf[nr_bufs]; bp++) { if(bp->bp) { assert(bp->b_bytes > 0); free_contig(bp->bp, bp->b_bytes); } } } if(buf) free(buf); if(!(buf = calloc(sizeof(buf[0]), new_nr_bufs))) panic("couldn't allocate buf list (%d)", new_nr_bufs); if(buf_hash) free(buf_hash); if(!(buf_hash = calloc(sizeof(buf_hash[0]), new_nr_bufs))) panic("couldn't allocate buf hash list (%d)", new_nr_bufs); nr_bufs = new_nr_bufs; bufs_in_use = 0; front = &buf[0]; rear = &buf[nr_bufs - 1]; for (bp = &buf[0]; bp < &buf[nr_bufs]; bp++) { bp->b_blocknr = NO_BLOCK; bp->b_dev = NO_DEV; bp->b_next = bp + 1; bp->b_prev = bp - 1; bp->bp = NULL; bp->b_bytes = 0; } buf[0].b_prev = NULL; buf[nr_bufs - 1].b_next = NULL; for (bp = &buf[0]; bp < &buf[nr_bufs]; bp++) bp->b_hash = bp->b_next; buf_hash[0] = front; vm_forgetblocks(); }
/*===========================================================================* * sef_cb_signal_handler * *===========================================================================*/ static void sef_cb_signal_handler(int signo) { /* Terminate immediately upon receiving a SIGTERM. */ if (signo != SIGTERM) return; #if DEBUG printf("FBD: shutting down\n"); #endif /* Clean up resources. */ free_contig(fbd_buf, BUF_SIZE); exit(0); }
static int virtio_blk_alloc_requests(void) { /* Allocate memory for request headers and status field */ hdrs_vir = alloc_contig(VIRTIO_BLK_NUM_THREADS * sizeof(hdrs_vir[0]), AC_ALIGN4K, &hdrs_phys); if (!hdrs_vir) return ENOMEM; status_vir = alloc_contig(VIRTIO_BLK_NUM_THREADS * sizeof(status_vir[0]), AC_ALIGN4K, &status_phys); if (!status_vir) { free_contig(hdrs_vir, VIRTIO_BLK_NUM_THREADS * sizeof(hdrs_vir[0])); return ENOMEM; } return OK; }
void virtio_free_device(struct virtio_device *dev) { int i; struct indirect_desc_table *desc; assert(dev != NULL); assert(dev->num_indirect > 0); for (i = 0; i < dev->num_indirect; i++) { desc = &dev->indirect[i]; free_contig(desc->descs, desc->len); } dev->num_indirect = 0; assert(dev->indirect != NULL); free(dev->indirect); dev->indirect = NULL; free(dev); }
static int alloc_phys_queue(struct virtio_queue *q) { assert(q != NULL); /* How much memory do we need? */ q->ring_size = vring_size(q->num, PAGE_SIZE); q->vaddr = alloc_contig(q->ring_size, AC_ALIGN4K, &q->paddr); if (q->vaddr == NULL) return ENOMEM; q->data = alloc_contig(sizeof(q->data[0]) * q->num, AC_ALIGN4K, NULL); if (q->data == NULL) { free_contig(q->vaddr, q->ring_size); q->vaddr = NULL; q->paddr = 0; return ENOMEM; } return OK; }
static void virtio_blk_free_requests(void) { free_contig(hdrs_vir, VIRTIO_BLK_NUM_THREADS * sizeof(hdrs_vir[0])); free_contig(status_vir, VIRTIO_BLK_NUM_THREADS * sizeof(status_vir[0])); }
/*===========================================================================* * fbd_transfer_copy * *===========================================================================*/ static ssize_t fbd_transfer_copy(int do_write, u64_t position, endpoint_t endpt, iovec_t *iov, unsigned int count, size_t size, int flags) { /* Interpose on the request. */ iovec_s_t iovec[NR_IOREQS]; struct vscp_vec vscp_vec[SCPVEC_NR]; cp_grant_id_t grant; size_t off, len; message m; char *ptr; int i, j, r; ssize_t rsize; assert(count > 0 && count <= SCPVEC_NR); if (size > BUF_SIZE) { printf("FBD: allocating memory for %d bytes\n", size); ptr = alloc_contig(size, 0, NULL); assert(ptr != NULL); } else ptr = fbd_buf; /* For write operations, first copy in the data to write. */ if (do_write) { for (i = off = 0; i < count; i++) { len = iov[i].iov_size; vscp_vec[i].v_from = endpt; vscp_vec[i].v_to = SELF; vscp_vec[i].v_gid = iov[i].iov_addr; vscp_vec[i].v_offset = 0; vscp_vec[i].v_addr = (vir_bytes) (ptr + off); vscp_vec[i].v_bytes = len; off += len; } if ((r = sys_vsafecopy(vscp_vec, i)) != OK) panic("vsafecopy failed (%d)\n", r); /* Trigger write hook. */ rule_io_hook(ptr, size, position, FBD_FLAG_WRITE); } /* Allocate grants for the data, in the same chunking as the original * vector. This avoids performance fluctuations with bad hardware as * observed with the filter driver. */ for (i = off = 0; i < count; i++) { len = iov[i].iov_size; iovec[i].iov_size = len; iovec[i].iov_grant = cpf_grant_direct(driver_endpt, (vir_bytes) (ptr + off), len, do_write ? CPF_READ : CPF_WRITE); assert(iovec[i].iov_grant != GRANT_INVALID); off += len; } grant = cpf_grant_direct(driver_endpt, (vir_bytes) iovec, count * sizeof(iovec[0]), CPF_READ); assert(grant != GRANT_INVALID); m.m_type = do_write ? BDEV_SCATTER : BDEV_GATHER; m.m_lbdev_lblockdriver_msg.minor = driver_minor; m.m_lbdev_lblockdriver_msg.count = count; m.m_lbdev_lblockdriver_msg.grant = grant; m.m_lbdev_lblockdriver_msg.flags = flags; m.m_lbdev_lblockdriver_msg.id = 0; m.m_lbdev_lblockdriver_msg.pos = position; if ((r = ipc_sendrec(driver_endpt, &m)) != OK) panic("ipc_sendrec to driver failed (%d)\n", r); if (m.m_type != BDEV_REPLY) panic("invalid reply from driver (%d)\n", m.m_type); cpf_revoke(grant); for (i = 0; i < count; i++) cpf_revoke(iovec[i].iov_grant); /* For read operations, finish by copying out the data read. */ if (!do_write) { /* Trigger read hook. */ rule_io_hook(ptr, size, position, FBD_FLAG_READ); /* Upon success, copy back whatever has been processed. */ rsize = m.m_lblockdriver_lbdev_reply.status; for (i = j = off = 0; rsize > 0 && i < count; i++) { len = MIN(rsize, iov[i].iov_size); vscp_vec[j].v_from = SELF; vscp_vec[j].v_to = endpt; vscp_vec[j].v_gid = iov[i].iov_addr; vscp_vec[j].v_offset = 0; vscp_vec[j].v_addr = (vir_bytes) (ptr + off); vscp_vec[j].v_bytes = len; off += len; rsize -= len; j++; } if (j > 0 && (r = sys_vsafecopy(vscp_vec, j)) != OK) panic("vsafecopy failed (%d)\n", r); } if (ptr != fbd_buf) free_contig(ptr, size); return m.m_lblockdriver_lbdev_reply.status; }