static void driver_setup_read(struct nic * nic) { message m; debug_print("device /dev/%s", nic->name); //assert(nic->rx_pbuf == NULL); if (!(nic->rx_pbuf == NULL)) { panic("device /dev/%s rx_pbuf %p", nic->name, nic->rx_pbuf); } if (!(nic->rx_pbuf = pbuf_alloc(PBUF_RAW, ETH_MAX_PACK_SIZE + ETH_CRC_SIZE, PBUF_RAM))) panic("Cannot allocate rx pbuf"); if (cpf_setgrant_direct(nic->rx_iovec[0].iov_grant, nic->drv_ep, (vir_bytes) nic->rx_pbuf->payload, nic->rx_pbuf->len, CPF_WRITE) != OK) panic("Failed to set grant"); nic->rx_iovec[0].iov_size = nic->rx_pbuf->len; m.m_type = DL_READV_S; m.DL_COUNT = 1; m.DL_GRANT = nic->rx_iogrant; if (asynsend(nic->drv_ep, &m) != OK) panic("asynsend to the driver failed!"); }
/***************************************************************************** * _ddekit_minix_queue_msg * ****************************************************************************/ static void _ddekit_minix_queue_msg(struct ddekit_minix_msg_q *mq, message *m) { int full; full = ddekit_sem_down_try(mq->msg_w_sem); if (full) { /* Our message queue is full... inform the sender. */ int result; DDEBUG_MSG_WARN("Receive queue is full. Ommiting ingoing msg.\n"); m->m_type = TASK_REPLY; m->REP_STATUS = EAGAIN; result = asynsend(m->m_source, m); if (result != 0) { ddekit_panic("unable to send reply to %d: %d\n", m->m_source, result); } } else { /* queue the message */ memcpy(&mq->messages[mq->msg_w_pos], m, sizeof(message)); if (++mq->msg_w_pos == MESSAGE_QUEUE_SIZE) { mq->msg_w_pos = 0; } DDEBUG_MSG_VERBOSE("ddekit_minix_queue_msg: queueing msg %x\n", m->m_type); ddekit_sem_up(mq->msg_r_sem); } }
int driver_tx(struct nic * nic) { struct packet_q * pkt; unsigned len; message m; int err; debug_print("device /dev/%s", nic->name); assert(nic->tx_buffer); pkt = driver_tx_head(nic); if (pkt == NULL) { debug_print("no packets enqueued"); return 0; } assert(pkt->buf_len <= nic->max_pkt_sz); if ((len = pkt->buf_len) < nic->min_pkt_sz) len = nic->min_pkt_sz; err = cpf_setgrant_direct(nic->tx_iovec[0].iov_grant, nic->drv_ep, (vir_bytes) pkt->buf, len, CPF_READ); debug_print("packet len %d", len); if (err != OK) panic("Failed to set grant"); nic->tx_iovec[0].iov_size = len; if (cpf_setgrant_direct(nic->tx_iogrant, nic->drv_ep, (vir_bytes) &nic->tx_iovec, sizeof(iovec_s_t), CPF_READ) != OK) panic("Failed to set grant"); m.m_type = DL_WRITEV_S; m.DL_COUNT = 1; m.DL_GRANT = nic->tx_iogrant; if (asynsend(nic->drv_ep, &m) != OK) panic("asynsend to the driver failed!"); nic->state = DRV_SENDING; debug_print("packet sent to driver"); return 1; }
/*===========================================================================* * vfs_close * *===========================================================================*/ PUBLIC int vfs_close(struct vmproc *for_who, callback_t callback, int fd) { static message m; int r; register_callback(for_who, callback, VM_VFS_REPLY_CLOSE); m.m_type = VM_VFS_CLOSE; m.VMVC_ENDPOINT = for_who->vm_endpoint; m.VMVC_FD = fd; if((r=asynsend(VFS_PROC_NR, &m)) != OK) { vm_panic("vfs_close: asynsend failed", r); } return r; }
/*===========================================================================* * vfs_open * *===========================================================================*/ PUBLIC int vfs_open(struct vmproc *for_who, callback_t callback, cp_grant_id_t filename_gid, int filename_len, int flags, int mode) { static message m; int r; register_callback(for_who, callback, VM_VFS_REPLY_OPEN); m.m_type = VM_VFS_OPEN; m.VMVO_NAME_GRANT = filename_gid; m.VMVO_NAME_LENGTH = filename_len; m.VMVO_FLAGS = flags; m.VMVO_MODE = mode; m.VMVO_ENDPOINT = for_who->vm_endpoint; if((r=asynsend(VFS_PROC_NR, &m)) != OK) { vm_panic("vfs_open: asynsend failed", r); } return r; }
/*===========================================================================* * sync_reply * *===========================================================================*/ static void sync_reply(message *m_ptr, int ipc_status, int reply) { /* Reply to a message sent to the driver. */ endpoint_t caller_e, user_e; int r; caller_e = m_ptr->m_source; user_e = m_ptr->USER_ENDPT; m_ptr->m_type = TASK_REPLY; m_ptr->REP_ENDPT = user_e; m_ptr->REP_STATUS = reply; /* If we would block sending the message, send it asynchronously. */ if (IPC_STATUS_CALL(ipc_status) == SENDREC) r = sendnb(caller_e, m_ptr); else r = asynsend(caller_e, m_ptr); if (r != OK) printf("driver_reply: unable to send reply to %d: %d\n", caller_e, r); }
/*===========================================================================* * async_reply * *===========================================================================*/ static void async_reply(message *mess, int r) { /* Send a reply using the asynchronous character device protocol. */ message reply_mess; /* Do not reply with ERESTART in this protocol. The only possible caller, * VFS, will find out through other means when we have restarted, and is not * (fully) ready to deal with ERESTART errors. */ if (r == ERESTART) return; memset(&reply_mess, 0, sizeof(reply_mess)); switch (mess->m_type) { case DEV_OPEN: reply_mess.m_type = DEV_OPEN_REPL; reply_mess.REP_ENDPT = mess->USER_ENDPT; reply_mess.REP_STATUS = r; break; case DEV_CLOSE: reply_mess.m_type = DEV_CLOSE_REPL; reply_mess.REP_ENDPT = mess->USER_ENDPT; reply_mess.REP_STATUS = r; break; case DEV_READ_S: case DEV_WRITE_S: case DEV_IOCTL_S: if (r == SUSPEND) printf("driver_task: reviving %d (%d) with SUSPEND\n", mess->m_source, mess->USER_ENDPT); reply_mess.m_type = DEV_REVIVE; reply_mess.REP_ENDPT = mess->USER_ENDPT; reply_mess.REP_IO_GRANT = (cp_grant_id_t) mess->IO_GRANT; reply_mess.REP_STATUS = r; break; case CANCEL: /* The original request should send a reply. */ return; case DEV_SELECT: reply_mess.m_type = DEV_SEL_REPL1; reply_mess.DEV_MINOR = mess->DEVICE; reply_mess.DEV_SEL_OPS = r; break; default: reply_mess.m_type = TASK_REPLY; reply_mess.REP_ENDPT = mess->USER_ENDPT; /* Status is # of bytes transferred or error code. */ reply_mess.REP_STATUS = r; break; } r = asynsend(mess->m_source, &reply_mess); if (r != OK) printf("asyn_reply: unable to asynsend reply to %d: %d\n", mess->m_source, r); }