/* * Copy a number of bytes from or to the caller, to or from the intermediate * buffer. If the given endpoint is SELF, a local memory copy must be made. */ static int vnd_copy(iovec_s_t *iov, size_t iov_off, size_t bytes, endpoint_t endpt, int do_write) { struct vscp_vec vvec[SCPVEC_NR], *vvp; size_t off, chunk; int count; char *ptr; assert(bytes > 0 && bytes <= VND_BUF_SIZE); vvp = vvec; count = 0; for (off = 0; off < bytes; off += chunk) { chunk = MIN(bytes - off, iov->iov_size - iov_off); if (endpt == SELF) { ptr = (char *) iov->iov_grant + iov_off; if (do_write) memcpy(&state.buf[off], ptr, chunk); else memcpy(ptr, &state.buf[off], chunk); } else { assert(count < SCPVEC_NR); /* SCPVEC_NR >= NR_IOREQS */ vvp->v_from = do_write ? endpt : SELF; vvp->v_to = do_write ? SELF : endpt; vvp->v_bytes = chunk; vvp->v_gid = iov->iov_grant; vvp->v_offset = iov_off; vvp->v_addr = (vir_bytes) &state.buf[off]; vvp++; count++; } iov_off += chunk; if (iov_off == iov->iov_size) { iov++; iov_off = 0; } } if (endpt != SELF) return sys_vsafecopy(vvec, count); else return OK; }
static int sys_easy_vsafecopy_from(endpoint_t src_proc, iovec_s_t *iov, int count, vir_bytes dst, size_t max, size_t *copied) { int i, r; size_t left = max; vir_bytes cur_off = 0; struct vscp_vec vv[NR_IOREQS]; for (i = 0; i < count && left > 0; i++) { vv[i].v_from = src_proc; vv[i].v_to = SELF; vv[i].v_gid = iov[i].iov_grant; vv[i].v_offset = 0; vv[i].v_addr = dst + cur_off; vv[i].v_bytes = iov[i].iov_size; /* More data in iov than the buffer can hold, this should be * manageable by the caller. */ if (left - vv[i].v_bytes > left) { printf("sys_easy_vsafecopy_from: buf too small!\n"); return ENOMEM; } left -= iov[i].iov_size; cur_off += iov[i].iov_size; } /* Now that we prepared the vscp_vec, we can call vsafecopy() */ if ((r = sys_vsafecopy(vv, count)) != OK) printf("sys_vsafecopy: failed: (%d)\n", r); if (copied) *copied = cur_off; return OK; }
/*===========================================================================* * fbd_transfer_copy * *===========================================================================*/ static ssize_t fbd_transfer_copy(int do_write, u64_t position, endpoint_t endpt, iovec_t *iov, unsigned int count, size_t size, int flags) { /* Interpose on the request. */ iovec_s_t iovec[NR_IOREQS]; struct vscp_vec vscp_vec[SCPVEC_NR]; cp_grant_id_t grant; size_t off, len; message m; char *ptr; int i, j, r; ssize_t rsize; assert(count > 0 && count <= SCPVEC_NR); if (size > BUF_SIZE) { printf("FBD: allocating memory for %d bytes\n", size); ptr = alloc_contig(size, 0, NULL); assert(ptr != NULL); } else ptr = fbd_buf; /* For write operations, first copy in the data to write. */ if (do_write) { for (i = off = 0; i < count; i++) { len = iov[i].iov_size; vscp_vec[i].v_from = endpt; vscp_vec[i].v_to = SELF; vscp_vec[i].v_gid = iov[i].iov_addr; vscp_vec[i].v_offset = 0; vscp_vec[i].v_addr = (vir_bytes) (ptr + off); vscp_vec[i].v_bytes = len; off += len; } if ((r = sys_vsafecopy(vscp_vec, i)) != OK) panic("vsafecopy failed (%d)\n", r); /* Trigger write hook. */ rule_io_hook(ptr, size, position, FBD_FLAG_WRITE); } /* Allocate grants for the data, in the same chunking as the original * vector. This avoids performance fluctuations with bad hardware as * observed with the filter driver. */ for (i = off = 0; i < count; i++) { len = iov[i].iov_size; iovec[i].iov_size = len; iovec[i].iov_grant = cpf_grant_direct(driver_endpt, (vir_bytes) (ptr + off), len, do_write ? CPF_READ : CPF_WRITE); assert(iovec[i].iov_grant != GRANT_INVALID); off += len; } grant = cpf_grant_direct(driver_endpt, (vir_bytes) iovec, count * sizeof(iovec[0]), CPF_READ); assert(grant != GRANT_INVALID); m.m_type = do_write ? BDEV_SCATTER : BDEV_GATHER; m.m_lbdev_lblockdriver_msg.minor = driver_minor; m.m_lbdev_lblockdriver_msg.count = count; m.m_lbdev_lblockdriver_msg.grant = grant; m.m_lbdev_lblockdriver_msg.flags = flags; m.m_lbdev_lblockdriver_msg.id = 0; m.m_lbdev_lblockdriver_msg.pos = position; if ((r = ipc_sendrec(driver_endpt, &m)) != OK) panic("ipc_sendrec to driver failed (%d)\n", r); if (m.m_type != BDEV_REPLY) panic("invalid reply from driver (%d)\n", m.m_type); cpf_revoke(grant); for (i = 0; i < count; i++) cpf_revoke(iovec[i].iov_grant); /* For read operations, finish by copying out the data read. */ if (!do_write) { /* Trigger read hook. */ rule_io_hook(ptr, size, position, FBD_FLAG_READ); /* Upon success, copy back whatever has been processed. */ rsize = m.m_lblockdriver_lbdev_reply.status; for (i = j = off = 0; rsize > 0 && i < count; i++) { len = MIN(rsize, iov[i].iov_size); vscp_vec[j].v_from = SELF; vscp_vec[j].v_to = endpt; vscp_vec[j].v_gid = iov[i].iov_addr; vscp_vec[j].v_offset = 0; vscp_vec[j].v_addr = (vir_bytes) (ptr + off); vscp_vec[j].v_bytes = len; off += len; rsize -= len; j++; } if (j > 0 && (r = sys_vsafecopy(vscp_vec, j)) != OK) panic("vsafecopy failed (%d)\n", r); } if (ptr != fbd_buf) free_contig(ptr, size); return m.m_lblockdriver_lbdev_reply.status; }