static int serial_write(struct dev *dev, void *buffer, size_t count, blkno_t blkno, int flags) { struct serial_port *sp = (struct serial_port *) dev->privdata; unsigned int n; unsigned char *bufp; if (wait_for_object(&sp->tx_lock, sp->cfg.tx_timeout) < 0) return -ETIMEOUT; bufp = (unsigned char *) buffer; for (n = 0; n < count; n++) { // Wait until tx queue is not full if (wait_for_object(&sp->tx_sem, sp->cfg.tx_timeout) < 0) break; // Insert next char in transmit queue cli(); fifo_put(&sp->txq, *bufp++); sti(); //kprintf("serial: write %02X\n", bufp[-1]); //kprintf("fifo put: h:%d t:%d c:%d\n", sp->txq.head, sp->txq.tail, sp->txq.count); // If transmitter idle then queue a DPC to restart transmission //if (!sp->tx_busy) queue_dpc(&sp->dpc, serial_dpc, sp); // If transmitter idle then restart transmission if (!sp->tx_busy) drain_tx_queue(sp); } release_mutex(&sp->tx_lock); return count; }
struct netif *ether_netif_add(char *name, char *devname, struct ip_addr *ipaddr, struct ip_addr *netmask, struct ip_addr *gw) { struct netif *netif; struct dhcp_state *state; dev_t devno; // Open device devno = dev_open(devname); if (devno == NODEV) return NULL; if (device(devno)->driver->type != DEV_TYPE_PACKET) return NULL; // Attach device to network interface netif = netif_add(name, ipaddr, netmask, gw); if (!netif) return NULL; netif->output = ether_output; netif->state = (void *) devno; dev_attach(devno, netif, ether_input); // Obtain network parameters using DHCP if (ip_addr_isany(ipaddr)) { state = dhcp_start(netif); if (state) { if (wait_for_object(&state->binding_complete, 30000) < 0) { kprintf(KERN_WARNING "ether: timeout waiting for dhcp to complete on %s\n", name); } } } kprintf(KERN_INFO "%s: device %s addr %a mask %a gw %a\n", name, devname, &netif->ipaddr, &netif->netmask, &netif->gw); return netif; }
int fetch_page(void *addr) { struct filemap *fm; int rc; fm = (struct filemap *) olock(virt2pfn(addr), OBJECT_FILEMAP); if (!fm) return -EBADF; rc = wait_for_object(fm, INFINITE); if (rc < 0) { orel(fm); return rc; } if (!page_mapped(addr)) { rc = fetch_file_page(fm, addr); if (rc < 0) { unlock_filemap(fm); orel(fm); return rc; } } rc = unlock_filemap(fm); if (rc < 0) return rc; orel(fm); return 0; }
int enqueue(struct queue *q, void *msg, unsigned int timeout) { if (wait_for_object(&q->notfull, timeout) < 0) return -ETIMEOUT; q->elems[q->in++] = msg; if (q->in == q->size) q->in = 0; release_sem(&q->notempty, 1); return 0; }
int vmfree(void *addr, unsigned long size, int type) { struct filemap *fm = NULL; int pages = PAGES(size); int i, rc; char *vaddr; if (size == 0) return 0; addr = (void *) PAGEADDR(addr); if (!valid_range(addr, size)) return -EINVAL; if (type & (MEM_DECOMMIT | MEM_RELEASE)) { vaddr = (char *) addr; for (i = 0; i < pages; i++) { if (page_directory_mapped(vaddr)) { pte_t flags = get_page_flags(vaddr); unsigned long pfn = BTOP(virt2phys(vaddr)); if (flags & PT_FILE) { handle_t h = (flags & PT_PRESENT) ? pfdb[pfn].owner : pfn; struct filemap *newfm = (struct filemap *) hlookup(h); if (newfm != fm) { if (fm) { if (fm->pages == 0) { rc = free_filemap(fm); } else { rc = unlock_filemap(fm); } if (rc < 0) return rc; } fm = newfm; rc = wait_for_object(fm, INFINITE); if (rc < 0) return rc; } fm->pages--; unmap_page(vaddr); if (flags & PT_PRESENT) free_pageframe(pfn); } else if (flags & PT_PRESENT) { unmap_page(vaddr); free_pageframe(pfn); } } vaddr += PAGESIZE; } } if (fm) { if (fm->pages == 0) { rc = free_filemap(fm); } else { rc = unlock_filemap(fm); } if (rc < 0) return rc; } else if (type & MEM_RELEASE) { rmap_free(vmap, BTOP(addr), pages); } return 0; }
int __inline lock_fs(struct fs *fs, int fsop) { if (fs->ops->reentrant & fsop) return 0; if (fs->ops->lockfs) { return fs->ops->lockfs(fs); } else { return wait_for_object(&fs->exclusive, VFS_LOCK_TIMEOUT); } }
void *dequeue(struct queue *q, unsigned int timeout) { void *msg; if (wait_for_object(&q->notempty, timeout) < 0) return NULL; msg = q->elems[q->out++]; if (q->out == q->size) q->out = 0; release_sem(&q->notfull, 1); return msg; }
static int serial_read(struct dev *dev, void *buffer, size_t count, blkno_t blkno, int flags) { struct serial_port *sp = (struct serial_port *) dev->privdata; unsigned int n; unsigned char *bufp; if (wait_for_object(&sp->rx_lock, sp->cfg.rx_timeout) < 0) return -ETIMEOUT; bufp = (unsigned char *) buffer; for (n = 0; n < count; n++) { // Wait until rx queue is not empty if (wait_for_object(&sp->rx_sem, n == 0 ? sp->cfg.rx_timeout : 0) < 0) break; // Remove next char from receive queue cli(); *bufp++ = fifo_get(&sp->rxq); sti(); //kprintf("serial: read %02X\n", bufp[-1]); } release_mutex(&sp->rx_lock); return n; }
static int speedo_transmit(struct dev *dev, struct pbuf *p) { struct nic *sp = (struct nic *) dev->privdata; long ioaddr = sp->iobase; int entry; p = pbuf_linearize(PBUF_RAW, p); if (!p) return -ENOMEM; // Wait for free entry in transmit ring if (wait_for_object(&sp->tx_sem, TX_TIMEOUT) < 0) { kprintf("%s: transmit timeout, drop packet\n", dev->name); sp->stats.tx_dropped++; return -ETIMEOUT; } // Caution: the write order is important here, set the base address // with the "ownership" bits last. // Calculate the Tx descriptor entry entry = sp->cur_tx % TX_RING_SIZE; sp->tx_pbuf[entry] = p; // TODO: be a little more clever about setting the interrupt bit sp->tx_ring[entry].status = CmdSuspend | CmdTx | CmdTxFlex; sp->cur_tx++; sp->tx_ring[entry].link = virt2phys(&sp->tx_ring[sp->cur_tx % TX_RING_SIZE]); sp->tx_ring[entry].tx_desc_addr = virt2phys(&sp->tx_ring[entry].tx_buf_addr0); // The data region is always in one buffer descriptor sp->tx_ring[entry].count = sp->tx_threshold; sp->tx_ring[entry].tx_buf_addr0 = virt2phys(p->payload); sp->tx_ring[entry].tx_buf_size0 = p->tot_len; // TODO: perhaps leave the interrupt bit set if the Tx queue is more // than half full. Argument against: we should be receiving packets // and scavenging the queue. Argument for: if so, it shouldn't // matter. { struct descriptor *last_cmd = sp->last_cmd; sp->last_cmd = (struct descriptor *) &sp->tx_ring[entry]; clear_suspend(last_cmd); } wait_for_cmd_done(ioaddr + SCBCmd); outp(ioaddr + SCBCmd, CUResume); sp->trans_start = get_ticks(); return 0; }
static int rtl8139_transmit(struct dev *dev, struct pbuf *p) { struct nic *tp = (struct nic *) dev->privdata; long ioaddr = tp->iobase; int entry; // Wait for free entry in transmit ring if (wait_for_object(&tp->tx_sem, TX_TIMEOUT) < 0) { kprintf(KERN_WARNING "%s: transmit timeout, drop packet\n", dev->name); tp->stats.tx_dropped++; return -ETIMEOUT; } // Calculate the next Tx descriptor entry entry = tp->cur_tx % NUM_TX_DESC; tp->tx_pbuf[entry] = p; if (p->next || ((unsigned long) (p->payload) & 3)) { struct pbuf *q; unsigned char *ptr; // Must use alignment buffer q = p; ptr = tp->tx_buf[entry]; while (q) { memcpy(ptr, q->payload, q->len); ptr += q->len; q = q->next; } outpd(ioaddr + TxAddr0 + entry * 4, virt2phys(tp->tx_buf[entry])); } else { outpd(ioaddr + TxAddr0 + entry * 4, virt2phys(p->payload)); } // Note: the chip doesn't have auto-pad! outpd(ioaddr + TxStatus0 + entry * 4, tp->tx_flag | (p->tot_len >= ETH_ZLEN ? p->tot_len : ETH_ZLEN)); tp->trans_start = get_ticks(); tp->cur_tx++; //kprintf("%s: Queued Tx packet at %p size %d to slot %d\n", dev->name, p->payload, p->tot_len, entry); return 0; }
int vmsync(void *addr, unsigned long size) { struct filemap *fm = NULL; int pages = PAGES(size); int i, rc; char *vaddr; if (size == 0) return 0; addr = (void *) PAGEADDR(addr); if (!valid_range(addr, size)) return -EINVAL; vaddr = (char *) addr; for (i = 0; i < pages; i++) { if (page_directory_mapped(vaddr)) { pte_t flags = get_page_flags(vaddr); if ((flags & (PT_FILE | PT_PRESENT | PT_DIRTY)) == (PT_FILE | PT_PRESENT | PT_DIRTY)) { unsigned long pfn = BTOP(virt2phys(vaddr)); struct filemap *newfm = (struct filemap *) hlookup(pfdb[pfn].owner); if (newfm != fm) { if (fm) { rc = unlock_filemap(fm); if (rc < 0) return rc; } fm = newfm; rc = wait_for_object(fm, INFINITE); if (rc < 0) return rc; } rc = save_file_page(fm, vaddr); if (rc < 0) return rc; } } vaddr += PAGESIZE; } if (fm) { rc = unlock_filemap(fm); if (rc < 0) return rc; } return 0; }
static int serial_ioctl(struct dev *dev, int cmd, void *args, size_t size) { struct serial_port *sp = (struct serial_port *) dev->privdata; struct serial_status *ss; switch (cmd) { case IOCTL_GETDEVSIZE: return 0; case IOCTL_GETBLKSIZE: return 1; case IOCTL_SERIAL_SETCONFIG: if (!args || size != sizeof(struct serial_config)) return -EINVAL; memcpy(&sp->cfg, args, sizeof(struct serial_config)); serial_config(sp); return 0; case IOCTL_SERIAL_GETCONFIG: if (!args || size != sizeof(struct serial_config)) return -EINVAL; memcpy(args, &sp->cfg, sizeof(struct serial_config)); return 0; case IOCTL_SERIAL_WAITEVENT: if (!args && size == 0) { return wait_for_object(&sp->event, INFINITE); } else if (args && size == 4) { return wait_for_object(&sp->event, *(unsigned int *) args); } else { return -EINVAL; } case IOCTL_SERIAL_STAT: if (!args || size != sizeof(struct serial_status)) return -EINVAL; ss = (struct serial_status *) args; ss->linestatus = sp->linestatus; sp->linestatus = 0; ss->modemstatus = inp((unsigned short) (sp->iobase + UART_MSR)) & 0xFF; ss->rx_queue_size = sp->rxq.count; ss->tx_queue_size = sp->txq.count; return 0; case IOCTL_SERIAL_DTR: if (!args || size != 4) return -EINVAL; if (*(int *) args) { sp->mcr |= MCR_DTR; } else { sp->mcr &= ~MCR_DTR; } outp(sp->iobase + UART_MCR, sp->mcr); return 0; case IOCTL_SERIAL_RTS: if (!args || size != 4) return -EINVAL; if (*(int *) args) { sp->mcr |= MCR_RTS; } else { sp->mcr &= ~MCR_RTS; } outp(sp->iobase + UART_MCR, sp->mcr); return 0; case IOCTL_SERIAL_FLUSH_TX_BUFFER: cli(); fifo_clear(&sp->txq); set_sem(&sp->tx_sem, QUEUE_SIZE); sp->tx_queue_rel = 0; if (sp->type == UART_16550A) outp(sp->iobase + UART_FCR, FCR_ENABLE | FCR_XMT_RST | FCR_TRIGGER_14); sti(); return 0; case IOCTL_SERIAL_FLUSH_RX_BUFFER: cli(); fifo_clear(&sp->rxq); set_sem(&sp->rx_sem, 0); sp->rx_queue_rel = 0; if (sp->type == UART_16550A) outp(sp->iobase + UART_FCR, FCR_ENABLE | FCR_RCV_RST | FCR_TRIGGER_14); sti(); return 0; } return -ENOSYS; }
int netif_ioctl_cfg(void *data, size_t size) { struct netif *netif; struct ifcfg *ifcfg; struct dhcp_state *state; if (!data) return -EFAULT; if (size != sizeof(struct ifcfg)) return -EINVAL; ifcfg = (struct ifcfg *) data; netif = netif_find(ifcfg->name); if (!netif) return -ENXIO; // Check for interface down if ((ifcfg->flags & IFCFG_UP) == 0 && (netif->flags & NETIF_UP) == 1) { // Release DHCP lease dhcp_stop(netif); netif->flags &= ~NETIF_UP; } // Update network interface configuration if (ifcfg->flags & IFCFG_DHCP) { netif->flags |= NETIF_DHCP; } else { netif->flags &= ~NETIF_DHCP; } netif->ipaddr.addr = ((struct sockaddr_in *) &ifcfg->addr)->sin_addr.s_addr; netif->netmask.addr = ((struct sockaddr_in *) &ifcfg->netmask)->sin_addr.s_addr; netif->gw.addr = ((struct sockaddr_in *) &ifcfg->gw)->sin_addr.s_addr; netif->broadcast.addr = ((struct sockaddr_in *) &ifcfg->broadcast)->sin_addr.s_addr; if (netif->broadcast.addr == IP_ADDR_ANY) { netif->broadcast.addr = (netif->ipaddr.addr & netif->netmask.addr) | ~(netif->netmask.addr); } if (ifcfg->flags & IFCFG_DEFAULT) { netif_default = netif; } else if (netif == netif_default) { netif_default = NULL; } // Copy hwaddr into ifcfg as info memcpy(ifcfg->hwaddr, &netif->hwaddr, sizeof(struct eth_addr)); // Check for interface up if ((ifcfg->flags & IFCFG_UP) == 1 && (netif->flags & NETIF_UP) == 0) { netif->flags |= NETIF_UP; if (netif->flags & NETIF_DHCP) { // Obtain network parameters using DHCP state = dhcp_start(netif); if (state) { if (wait_for_object(&state->binding_complete, 30000) < 0) return -ETIMEDOUT; ((struct sockaddr_in *) &ifcfg->addr)->sin_addr.s_addr = netif->ipaddr.addr; ((struct sockaddr_in *) &ifcfg->netmask)->sin_addr.s_addr = netif->netmask.addr; ((struct sockaddr_in *) &ifcfg->gw)->sin_addr.s_addr = netif->gw.addr; ((struct sockaddr_in *) &ifcfg->broadcast)->sin_addr.s_addr = netif->broadcast.addr; } } } return 0; }