/* this function cycles through the ports table, deleting all the ports that are owned by the passed proc_id */ int port_delete_owned_ports(proc_id owner) { int i; int count = 0; if(ports_active == false) return ERR_PORT_NOT_ACTIVE; int_disable_interrupts(); GRAB_PORT_LIST_LOCK(); for(i=0; i<MAX_PORTS; i++) { if(ports[i].id != -1 && ports[i].owner == owner) { port_id id = ports[i].id; RELEASE_PORT_LIST_LOCK(); int_restore_interrupts(); port_delete(id); count++; int_disable_interrupts(); GRAB_PORT_LIST_LOCK(); } } RELEASE_PORT_LIST_LOCK(); int_restore_interrupts(); return count; }
int port_close(port_id id) { int slot; if(ports_active == false) return ERR_PORT_NOT_ACTIVE; if(id < 0) return ERR_INVALID_HANDLE; slot = id % MAX_PORTS; // walk through the sem list, trying to match name int_disable_interrupts(); GRAB_PORT_LOCK(ports[slot]); if (ports[slot].id != id) { RELEASE_PORT_LOCK(ports[slot]); int_restore_interrupts(); return ERR_INVALID_HANDLE; } // mark port to disable writing ports[slot].closed = true; RELEASE_PORT_LOCK(ports[slot]); int_restore_interrupts(); return NO_ERROR; }
int32 port_count(port_id id) { int slot; int count; if(ports_active == false) return ERR_PORT_NOT_ACTIVE; if(id < 0) return ERR_INVALID_HANDLE; slot = id % MAX_PORTS; int_disable_interrupts(); GRAB_PORT_LOCK(ports[slot]); if(ports[slot].id != id) { RELEASE_PORT_LOCK(ports[slot]); int_restore_interrupts(); dprintf("port_count: invalid port_id %d\n", id); return ERR_INVALID_HANDLE; } sem_get_count(ports[slot].read_sem, &count); // do not return negative numbers if (count < 0) count = 0; RELEASE_PORT_LOCK(ports[slot]); int_restore_interrupts(); // return count of messages (sem_count) return count; }
void arch_thread_enter_uspace(struct thread *t, addr_t entry, void *args, addr_t ustack_top) { dprintf("arch_thread_entry_uspace: thread 0x%x, entry 0x%lx, args %p, ustack_top 0x%lx\n", t->id, entry, args, ustack_top); // make sure the fpu is in a good state asm("fninit"); /* * semi-hack: touch the user space stack first to page it in before we * disable interrupts. We can't take a fault with interrupts off */ { int a = *(volatile long *)(ustack_top - 4); TOUCH(a); } int_disable_interrupts(); x86_64_set_kstack(t->kernel_stack_base + KSTACK_SIZE); // set the interrupt disable count to zero, since we'll have ints enabled as soon as we enter user space t->int_disable_level = 0; x86_64_enter_uspace(entry, args, ustack_top - 8); }
int port_set_owner(port_id id, proc_id proc) { int slot; if(ports_active == false) return ERR_PORT_NOT_ACTIVE; if(id < 0) return ERR_INVALID_HANDLE; slot = id % MAX_PORTS; int_disable_interrupts(); GRAB_PORT_LOCK(ports[slot]); if(ports[slot].id != id) { RELEASE_PORT_LOCK(ports[slot]); int_restore_interrupts(); dprintf("port_set_owner: invalid port_id %d\n", id); return ERR_INVALID_HANDLE; } // transfer ownership to other process ports[slot].owner = proc; // unlock port RELEASE_PORT_LOCK(ports[slot]); int_restore_interrupts(); return NO_ERROR; }
port_id port_find(const char *port_name) { int i; int ret_val = ERR_INVALID_HANDLE; if(ports_active == false) return ERR_PORT_NOT_ACTIVE; if(port_name == NULL) return ERR_INVALID_HANDLE; // lock list of ports int_disable_interrupts(); GRAB_PORT_LIST_LOCK(); // loop over list for(i=0; i<MAX_PORTS; i++) { // lock every individual port before comparing GRAB_PORT_LOCK(ports[i]); if(ports[i].id >= 0 && strcmp(port_name, ports[i].name) == 0) { ret_val = ports[i].id; RELEASE_PORT_LOCK(ports[i]); break; } RELEASE_PORT_LOCK(ports[i]); } RELEASE_PORT_LIST_LOCK(); int_restore_interrupts(); return ret_val; }
int port_delete(port_id id) { int slot; sem_id r_sem, w_sem; int capacity; int i; char *old_name; struct port_msg *q; if(ports_active == false) return ERR_PORT_NOT_ACTIVE; if(id < 0) return ERR_INVALID_HANDLE; slot = id % MAX_PORTS; int_disable_interrupts(); GRAB_PORT_LOCK(ports[slot]); if(ports[slot].id != id) { RELEASE_PORT_LOCK(ports[slot]); int_restore_interrupts(); dprintf("port_delete: invalid port_id %d\n", id); return ERR_INVALID_HANDLE; } /* mark port as invalid */ ports[slot].id = -1; old_name = ports[slot].name; q = ports[slot].msg_queue; r_sem = ports[slot].read_sem; w_sem = ports[slot].write_sem; capacity = ports[slot].capacity; ports[slot].name = NULL; RELEASE_PORT_LOCK(ports[slot]); int_restore_interrupts(); // delete the cbuf's that are left in the queue (if any) for (i=0; i<capacity; i++) { if (q[i].data_cbuf != NULL) cbuf_free_chain(q[i].data_cbuf); } kfree(q); kfree(old_name); // release the threads that were blocking on this port by deleting the sem // read_port() will see the ERR_SEM_DELETED acq_sem() return value, and act accordingly sem_delete(r_sem); sem_delete(w_sem); return NO_ERROR; }
int port_get_next_port_info(proc_id proc, uint32 *cookie, struct port_info *info) { int slot; if(ports_active == false) return ERR_PORT_NOT_ACTIVE; if (cookie == NULL) return ERR_INVALID_ARGS; if (*cookie == NULL) { // return first found slot = 0; } else { // start at index cookie, but check cookie against MAX_PORTS slot = *cookie; if (slot >= MAX_PORTS) return ERR_INVALID_HANDLE; } // spinlock int_disable_interrupts(); GRAB_PORT_LIST_LOCK(); info->id = -1; // used as found flag while (slot < MAX_PORTS) { GRAB_PORT_LOCK(ports[slot]); if (ports[slot].id != -1) if (ports[slot].owner == proc) { // found one! // copy the info info->id = ports[slot].id; info->owner = ports[slot].owner; strncpy(info->name, ports[slot].name, min(strlen(ports[slot].name),SYS_MAX_OS_NAME_LEN-1)); info->capacity = ports[slot].capacity; sem_get_count(ports[slot].read_sem, &info->queue_count); info->total_count = ports[slot].total_count; RELEASE_PORT_LOCK(ports[slot]); slot++; break; } RELEASE_PORT_LOCK(ports[slot]); slot++; } RELEASE_PORT_LIST_LOCK(); int_restore_interrupts(); if (info->id == -1) return ERR_PORT_NOT_FOUND; *cookie = slot; return NO_ERROR; }
void arch_smp_send_broadcast_ici(void) { int config; int_disable_interrupts(); config = apic_read(APIC_ICR1) & APIC_ICR1_WRITE_MASK; apic_write(APIC_ICR1, config | 0xfd | APIC_ICR1_DELMODE_FIXED | APIC_ICR1_DESTMODE_PHYS | APIC_ICR1_DEST_ALL_BUT_SELF); int_restore_interrupts(); }
void rhine_xmit(rhine *r, const char *ptr, ssize_t len) { #if 0 PANIC_UNIMPLEMENTED(); #if 0 int i; #endif //restart: sem_acquire(r->tx_sem, 1); mutex_lock(&r->lock); #if 0 dprintf("XMIT %d %x (%d)\n",r->txbn, ptr, len); dprintf("dumping packet:"); for(i=0; i<len; i++) { if(i%8 == 0) dprintf("\n"); dprintf("0x%02x ", ptr[i]); } dprintf("\n"); #endif int_disable_interrupts(); acquire_spinlock(&r->reg_spinlock); #if 0 /* wait for clear-to-send */ if(!(RTL_READ_32(r, RT_TXSTATUS0 + r->txbn*4) & RT_TX_HOST_OWNS)) { dprintf("rhine_xmit: no txbuf free\n"); rhine_dumptxstate(r); release_spinlock(&r->reg_spinlock); int_restore_interrupts(); mutex_unlock(&r->lock); sem_release(r->tx_sem, 1); goto restart; } #endif memcpy((void*)(r->txbuf + r->txbn * 0x800), ptr, len); if(len < ETHERNET_MIN_SIZE) len = ETHERNET_MIN_SIZE; RTL_WRITE_32(r, RT_TXSTATUS0 + r->txbn*4, len | 0x80000); if(++r->txbn >= 4) r->txbn = 0; release_spinlock(&r->reg_spinlock); int_restore_interrupts(); mutex_unlock(&r->lock); #endif }
void arch_smp_send_ici(int target_cpu) { int config; int_disable_interrupts(); config = apic_read(APIC_ICR2) & APIC_ICR2_MASK; apic_write(APIC_ICR2, config | cpu_apic_id[target_cpu] << 24); config = apic_read(APIC_ICR1) & APIC_ICR1_WRITE_MASK; apic_write(APIC_ICR1, config | 0xfd | APIC_ICR1_DELMODE_FIXED | APIC_ICR1_DESTMODE_PHYS | APIC_ICR1_DEST_FIELD); int_restore_interrupts(); }
int arch_smp_clear_apic_timer(void) { unsigned int config; if(apic == NULL) return -1; int_disable_interrupts(); config = apic_read(APIC_LVTT) | APIC_LVTT_M; // mask the timer apic_write(APIC_LVTT, config); apic_write(APIC_ICRT, 0); // zero out the timer int_restore_interrupts(); return 0; }
void rtl8169_xmit(rtl8169 *r, const char *ptr, ssize_t len) { //int i; #if debug_level_flow >= 3 dprintf("rtl8169_xmit dumping packet:"); hexdump(ptr, len, 0, 0); #endif restart: hal_sem_acquire(&r->tx_sem); hal_mutex_lock(&r->lock); int_disable_interrupts(); acquire_spinlock(&r->reg_spinlock); /* look at the descriptor pointed to by tx_idx_free */ if (r->txdesc[r->tx_idx_free].flags & RTL_DESC_OWN) { /* card owns this one, wait and try again later */ release_spinlock(&r->reg_spinlock); int_restore_interrupts(); mutex_unlock(&r->lock); // sem_release(r->tx_sem, 1); goto restart; } /* queue it up */ memcpy(TXBUF(r, r->tx_idx_free), ptr, len); if (len < 64) len = 64; r->txdesc[r->tx_idx_free].frame_len = len; r->txdesc[r->tx_idx_free].flags = (r->txdesc[r->tx_idx_free].flags & RTL_DESC_EOR) | RTL_DESC_FS | RTL_DESC_LS | RTL_DESC_OWN; inc_tx_idx_free(r); RTL_WRITE_8(r, REG_TPPOLL, (1<<6)); // something is on the normal queue release_spinlock(&r->reg_spinlock); int_restore_interrupts(); mutex_unlock(&r->lock); }
int arch_smp_set_apic_timer(bigtime_t relative_timeout, int type) { unsigned int config; unsigned int ticks; if(apic == NULL) return -1; if(relative_timeout < MIN_TIMEOUT) relative_timeout = MIN_TIMEOUT; // calculation should be ok, since it's going to be 64-bit ticks = ((relative_timeout * apic_timer_tics_per_sec) / 1000000); int_disable_interrupts(); config = apic_read(APIC_LVTT) | APIC_LVTT_M; // mask the timer apic_write(APIC_LVTT, config); apic_write(APIC_ICRT, 0); // zero out the timer config = apic_read(APIC_LVTT) & ~APIC_LVTT_M; // unmask the timer if(type == HW_TIMER_ONESHOT) config &= ~APIC_LVTT_TM; // clear the periodic bit else config |= APIC_LVTT_TM; // periodic apic_write(APIC_LVTT, config); dprintf("arch_smp_set_apic_timer: config 0x%x, timeout %Ld, tics/sec %d, tics %d\n", config, relative_timeout, apic_timer_tics_per_sec, ticks); apic_write(APIC_ICRT, ticks); // start it up int_restore_interrupts(); return 0; }
int port_get_info(port_id id, struct port_info *info) { int slot; if(ports_active == false) return ERR_PORT_NOT_ACTIVE; if (info == NULL) return ERR_INVALID_ARGS; if(id < 0) return ERR_INVALID_HANDLE; slot = id % MAX_PORTS; int_disable_interrupts(); GRAB_PORT_LOCK(ports[slot]); if(ports[slot].id != id) { RELEASE_PORT_LOCK(ports[slot]); int_restore_interrupts(); dprintf("port_get_info: invalid port_id %d\n", id); return ERR_INVALID_HANDLE; } // fill a port_info struct with info info->id = ports[slot].id; info->owner = ports[slot].owner; strncpy(info->name, ports[slot].name, min(strlen(ports[slot].name),SYS_MAX_OS_NAME_LEN-1)); info->capacity = ports[slot].capacity; sem_get_count(ports[slot].read_sem, &info->queue_count); info->total_count = ports[slot].total_count; RELEASE_PORT_LOCK(ports[slot]); int_restore_interrupts(); // from our port_entry return NO_ERROR; }
ssize_t rhine_rx(rhine *r, char *buf, ssize_t buf_len) { PANIC_UNIMPLEMENTED(); #if 0 rx_entry *entry; uint32 tail; uint16 len; int rc; bool release_sem = false; // dprintf("rhine_rx: entry\n"); if(buf_len < 1500) return -1; restart: sem_acquire(r->rx_sem, 1); mutex_lock(&r->lock); int_disable_interrupts(); acquire_spinlock(&r->reg_spinlock); tail = TAILREG_TO_TAIL(RTL_READ_16(r, RT_RXBUFTAIL)); // dprintf("tailreg = 0x%x, actual tail 0x%x\n", RTL_READ_16(r, RT_RXBUFTAIL), tail); if(tail == RTL_READ_16(r, RT_RXBUFHEAD)) { release_spinlock(&r->reg_spinlock); int_restore_interrupts(); mutex_unlock(&r->lock); goto restart; } if(RTL_READ_8(r, RT_CHIPCMD) & RT_CMD_RX_BUF_EMPTY) { release_spinlock(&r->reg_spinlock); int_restore_interrupts(); mutex_unlock(&r->lock); goto restart; } // grab another buffer entry = (rx_entry *)((uint8 *)r->rxbuf + tail); // dprintf("entry->status = 0x%x\n", entry->status); // dprintf("entry->len = 0x%x\n", entry->len); // see if it's an unfinished buffer if(entry->len == 0xfff0) { release_spinlock(&r->reg_spinlock); int_restore_interrupts(); mutex_unlock(&r->lock); goto restart; } // figure the len that we need to copy len = entry->len - 4; // minus the crc // see if we got an error if((entry->status & RT_RX_STATUS_OK) == 0 || len > ETHERNET_MAX_SIZE) { // error, lets reset the card rhine_resetrx(r); release_spinlock(&r->reg_spinlock); int_restore_interrupts(); mutex_unlock(&r->lock); goto restart; } // copy the buffer if(len > buf_len) { dprintf("rhine_rx: packet too large for buffer (len %d, buf_len %ld)\n", len, (long)buf_len); RTL_WRITE_16(r, RT_RXBUFTAIL, TAILREG_TO_TAIL(RTL_READ_16(r, RT_RXBUFHEAD))); rc = ERR_TOO_BIG; release_sem = true; goto out; } if(tail + len > 0xffff) { // dprintf("packet wraps around\n"); memcpy(buf, (const void *)&entry->data[0], 0x10000 - (tail + 4)); memcpy((uint8 *)buf + 0x10000 - (tail + 4), (const void *)r->rxbuf, len - (0x10000 - (tail + 4))); } else { memcpy(buf, (const void *)&entry->data[0], len); } rc = len; // calculate the new tail tail = ((tail + entry->len + 4 + 3) & ~3) % 0x10000; // dprintf("new tail at 0x%x, tailreg will say 0x%x\n", tail, TAIL_TO_TAILREG(tail)); RTL_WRITE_16(r, RT_RXBUFTAIL, TAIL_TO_TAILREG(tail)); if(tail != RTL_READ_16(r, RT_RXBUFHEAD)) { // we're at last one more packet behind release_sem = true; } out: release_spinlock(&r->reg_spinlock); int_restore_interrupts(); if(release_sem) sem_release(r->rx_sem, 1); mutex_unlock(&r->lock); #if 0 { int i; dprintf("RX %x (%d)\n", buf, len); dprintf("dumping packet:"); for(i=0; i<len; i++) { if(i%8 == 0) dprintf("\n"); dprintf("0x%02x ", buf[i]); } dprintf("\n"); } #endif return rc; #endif }
ssize_t rtl8169_rx(rtl8169 *r, char *buf, ssize_t buf_len) { //uint32 tail; ssize_t len; int rc; bool release_sem = false; SHOW_FLOW0(3, "rtl8169_rx: entry\n"); if(buf_len < 1500) return -1; restart: hal_sem_acquire(&r->rx_sem); mutex_lock(&r->lock); int_disable_interrupts(); acquire_spinlock(&r->reg_spinlock); /* look at the descriptor pointed to by rx_idx_free */ if (r->rxdesc[r->rx_idx_free].flags & RTL_DESC_OWN) { /* for some reason it's owned by the card, wait for more packets */ release_spinlock(&r->reg_spinlock); int_restore_interrupts(); mutex_unlock(&r->lock); goto restart; } /* process this packet */ len = r->rxdesc[r->rx_idx_free].frame_len & 0x3fff; SHOW_FLOW(3, "rtl8169_rx: desc idx %d: len %d\n", r->rx_idx_free, len); if (len > buf_len) { rc = ERR_TOO_BIG; release_sem = true; goto out; } memcpy(buf, RXBUF(r, r->rx_idx_free), len); rc = len; #if debug_level_flow >= 3 hexdump(RXBUF(r, r->rx_idx_free), len, 0, 0); #endif /* stick it back in the free list */ r->rxdesc[r->rx_idx_free].buffer_size = BUFSIZE_PER_FRAME; r->rxdesc[r->rx_idx_free].flags = (r->rxdesc[r->rx_idx_free].flags & RTL_DESC_EOR) | RTL_DESC_OWN; inc_rx_idx_free(r); /* see if there are more packets pending */ if ((r->rxdesc[r->rx_idx_free].flags & RTL_DESC_OWN) == 0) release_sem = true; // if so, release the rx sem so the next reader gets a shot out: release_spinlock(&r->reg_spinlock); int_restore_interrupts(); if(release_sem) hal_sem_release(&r->rx_sem); mutex_unlock(&r->lock); return rc; }
int port_write_etc(port_id id, int32 msg_code, void *msg_buffer, size_t buffer_size, uint32 flags, bigtime_t timeout) { int slot; int res; sem_id cached_semid; int h; cbuf* msg_store; int c1, c2; int err; if(ports_active == false) return ERR_PORT_NOT_ACTIVE; if(id < 0) return ERR_INVALID_HANDLE; // mask irrelevant flags flags = flags & (PORT_FLAG_USE_USER_MEMCPY | PORT_FLAG_INTERRUPTABLE | PORT_FLAG_TIMEOUT); slot = id % MAX_PORTS; // check buffer_size if (buffer_size > PORT_MAX_MESSAGE_SIZE) return ERR_INVALID_ARGS; int_disable_interrupts(); GRAB_PORT_LOCK(ports[slot]); if(ports[slot].id != id) { RELEASE_PORT_LOCK(ports[slot]); int_restore_interrupts(); dprintf("write_port_etc: invalid port_id %d\n", id); return ERR_INVALID_HANDLE; } if (ports[slot].closed) { RELEASE_PORT_LOCK(ports[slot]); int_restore_interrupts(); dprintf("write_port_etc: port %d closed\n", id); return ERR_PORT_CLOSED; } // store sem_id in local variable cached_semid = ports[slot].write_sem; RELEASE_PORT_LOCK(ports[slot]); int_restore_interrupts(); // XXX -> possible race condition if port gets deleted (->sem deleted too), // and queue is full therefore sem_id is cached in local variable up here // get 1 entry from the queue, block if needed // assumes flags res = sem_acquire_etc(cached_semid, 1, flags & (SEM_FLAG_TIMEOUT | SEM_FLAG_INTERRUPTABLE), timeout, NULL); // XXX: possible race condition if port written by two threads... // both threads will write in 2 different slots allocated above, simultaneously // slot is a thread-local variable if (res == ERR_SEM_DELETED) { // somebody deleted the port return ERR_PORT_DELETED; } if (res == ERR_SEM_TIMED_OUT) { // timed out, or, if timeout=0, 'would block' return ERR_PORT_TIMED_OUT; } if (res != NO_ERROR) { dprintf("write_port_etc: res unknown error %d\n", res); return res; } if (buffer_size > 0) { msg_store = cbuf_get_chain(buffer_size); if (msg_store == NULL) return ERR_NO_MEMORY; if (flags & PORT_FLAG_USE_USER_MEMCPY) { // copy from user memory if ((err = cbuf_user_memcpy_to_chain(msg_store, 0, msg_buffer, buffer_size)) < 0) return err; // memory exception } else // copy from kernel memory if ((err = cbuf_memcpy_to_chain(msg_store, 0, msg_buffer, buffer_size)) < 0) return err; // memory exception } else { msg_store = NULL; } // attach copied message to queue int_disable_interrupts(); GRAB_PORT_LOCK(ports[slot]); h = ports[slot].head; if (h < 0) panic("port %id: head < 0", ports[slot].id); if (h >= ports[slot].capacity) panic("port %id: head > cap %d", ports[slot].id, ports[slot].capacity); ports[slot].msg_queue[h].msg_code = msg_code; ports[slot].msg_queue[h].data_cbuf = msg_store; ports[slot].msg_queue[h].data_len = buffer_size; ports[slot].head = (ports[slot].head + 1) % ports[slot].capacity; ports[slot].total_count++; // store sem_id in local variable cached_semid = ports[slot].read_sem; RELEASE_PORT_LOCK(ports[slot]); int_restore_interrupts(); sem_get_count(ports[slot].read_sem, &c1); sem_get_count(ports[slot].write_sem, &c2); // release sem, allowing read (might reschedule) sem_release(cached_semid, 1); return NO_ERROR; }
ssize_t port_read_etc(port_id id, int32 *msg_code, void *msg_buffer, size_t buffer_size, uint32 flags, bigtime_t timeout) { int slot; sem_id cached_semid; size_t siz; int res; int t; cbuf* msg_store; int32 code; int err; if(ports_active == false) return ERR_PORT_NOT_ACTIVE; if(id < 0) return ERR_INVALID_HANDLE; if(msg_code == NULL) return ERR_INVALID_ARGS; if((msg_buffer == NULL) && (buffer_size > 0)) return ERR_INVALID_ARGS; if (timeout < 0) return ERR_INVALID_ARGS; flags = flags & (PORT_FLAG_USE_USER_MEMCPY | PORT_FLAG_INTERRUPTABLE | PORT_FLAG_TIMEOUT); slot = id % MAX_PORTS; int_disable_interrupts(); GRAB_PORT_LOCK(ports[slot]); if(ports[slot].id != id) { RELEASE_PORT_LOCK(ports[slot]); int_restore_interrupts(); dprintf("read_port_etc: invalid port_id %d\n", id); return ERR_INVALID_HANDLE; } // store sem_id in local variable cached_semid = ports[slot].read_sem; // unlock port && enable ints/ RELEASE_PORT_LOCK(ports[slot]); int_restore_interrupts(); // XXX -> possible race condition if port gets deleted (->sem deleted too), therefore // sem_id is cached in local variable up here // get 1 entry from the queue, block if needed res = sem_acquire_etc(cached_semid, 1, flags, timeout, NULL); // XXX: possible race condition if port read by two threads... // both threads will read in 2 different slots allocated above, simultaneously // slot is a thread-local variable if (res == ERR_SEM_DELETED) { // somebody deleted the port return ERR_PORT_DELETED; } if (res == ERR_INTERRUPTED) { // XXX: somebody signaled the process the port belonged to, deleting the sem ? return ERR_INTERRUPTED; } if (res == ERR_SEM_TIMED_OUT) { // timed out, or, if timeout=0, 'would block' return ERR_PORT_TIMED_OUT; } if (res != NO_ERROR) { dprintf("write_port_etc: res unknown error %d\n", res); return res; } int_disable_interrupts(); GRAB_PORT_LOCK(ports[slot]); t = ports[slot].tail; if (t < 0) panic("port %id: tail < 0", ports[slot].id); if (t > ports[slot].capacity) panic("port %id: tail > cap %d", ports[slot].id, ports[slot].capacity); ports[slot].tail = (ports[slot].tail + 1) % ports[slot].capacity; msg_store = ports[slot].msg_queue[t].data_cbuf; code = ports[slot].msg_queue[t].msg_code; // mark queue entry unused ports[slot].msg_queue[t].data_cbuf = NULL; // check output buffer size siz = min(buffer_size, ports[slot].msg_queue[t].data_len); cached_semid = ports[slot].write_sem; RELEASE_PORT_LOCK(ports[slot]); int_restore_interrupts(); // copy message *msg_code = code; if (siz > 0) { if (flags & PORT_FLAG_USE_USER_MEMCPY) { if ((err = cbuf_user_memcpy_from_chain(msg_buffer, msg_store, 0, siz) < 0)) { // leave the port intact, for other threads that might not crash cbuf_free_chain(msg_store); sem_release(cached_semid, 1); return err; } } else cbuf_memcpy_from_chain(msg_buffer, msg_store, 0, siz); } // free the cbuf cbuf_free_chain(msg_store); // make one spot in queue available again for write sem_release(cached_semid, 1); return siz; }
port_id port_create(int32 queue_length, const char *name) { int i; sem_id sem_r, sem_w; port_id retval; char *temp_name; int name_len; void *q; proc_id owner; if(ports_active == false) return ERR_PORT_NOT_ACTIVE; if(name == NULL) name = "unnamed port"; name_len = strlen(name) + 1; name_len = min(name_len, SYS_MAX_OS_NAME_LEN); temp_name = (char *)kmalloc(name_len); if(temp_name == NULL) return ERR_NO_MEMORY; strlcpy(temp_name, name, name_len); // check queue length if (queue_length < 1 || queue_length > MAX_QUEUE_LENGTH) { kfree(temp_name); return ERR_INVALID_ARGS; } // alloc a queue q = kmalloc( queue_length * sizeof(struct port_msg) ); if (q == NULL) { kfree(temp_name); // dealloc name, too return ERR_NO_MEMORY; } // create sem_r with owner set to -1 sem_r = sem_create_etc(0, temp_name, -1); if (sem_r < 0) { // cleanup kfree(temp_name); kfree(q); return sem_r; } // create sem_w sem_w = sem_create_etc(queue_length, temp_name, -1); if (sem_w < 0) { // cleanup sem_delete(sem_r); kfree(temp_name); kfree(q); return sem_w; } owner = proc_get_current_proc_id(); int_disable_interrupts(); GRAB_PORT_LIST_LOCK(); // find the first empty spot for(i=0; i<MAX_PORTS; i++) { if(ports[i].id == -1) { // make the port_id be a multiple of the slot it's in if(i >= next_port % MAX_PORTS) { next_port += i - next_port % MAX_PORTS; } else { next_port += MAX_PORTS - (next_port % MAX_PORTS - i); } ports[i].id = next_port++; ports[i].lock = 0; GRAB_PORT_LOCK(ports[i]); RELEASE_PORT_LIST_LOCK(); ports[i].capacity = queue_length; ports[i].name = temp_name; // assign sem ports[i].read_sem = sem_r; ports[i].write_sem = sem_w; ports[i].msg_queue = q; ports[i].head = 0; ports[i].tail = 0; ports[i].total_count= 0; ports[i].owner = owner; retval = ports[i].id; RELEASE_PORT_LOCK(ports[i]); goto out; } } // not enough ports... RELEASE_PORT_LIST_LOCK(); kfree(q); kfree(temp_name); retval = ERR_PORT_OUT_OF_SLOTS; dprintf("port_create(): ERR_PORT_OUT_OF_SLOTS\n"); // cleanup sem_delete(sem_w); sem_delete(sem_r); kfree(temp_name); kfree(q); out: int_restore_interrupts(); return retval; }
ssize_t port_buffer_size_etc(port_id id, uint32 flags, bigtime_t timeout) { int slot; int res; int t; int len; if(ports_active == false) return ERR_PORT_NOT_ACTIVE; if(id < 0) return ERR_INVALID_HANDLE; slot = id % MAX_PORTS; int_disable_interrupts(); GRAB_PORT_LOCK(ports[slot]); if(ports[slot].id != id) { RELEASE_PORT_LOCK(ports[slot]); int_restore_interrupts(); dprintf("port_get_info: invalid port_id %d\n", id); return ERR_INVALID_HANDLE; } RELEASE_PORT_LOCK(ports[slot]); int_restore_interrupts(); // block if no message, // if TIMEOUT flag set, block with timeout // XXX - is it a race condition to acquire a sem just after we // unlocked the port ? // XXX: call an acquire_sem which does the release lock, restore int & block the right way res = sem_acquire_etc(ports[slot].read_sem, 1, flags & (SEM_FLAG_TIMEOUT | SEM_FLAG_INTERRUPTABLE), timeout, NULL); GRAB_PORT_LOCK(ports[slot]); if (res == ERR_SEM_DELETED) { // somebody deleted the port RELEASE_PORT_LOCK(ports[slot]); return ERR_PORT_DELETED; } if (res == ERR_SEM_TIMED_OUT) { RELEASE_PORT_LOCK(ports[slot]); return ERR_PORT_TIMED_OUT; } // once message arrived, read data's length // determine tail // read data's head length t = ports[slot].head; if (t < 0) panic("port %id: tail < 0", ports[slot].id); if (t > ports[slot].capacity) panic("port %id: tail > cap %d", ports[slot].id, ports[slot].capacity); len = ports[slot].msg_queue[t].data_len; // restore readsem sem_release(ports[slot].read_sem, 1); RELEASE_PORT_LOCK(ports[slot]); // return length of item at end of queue return len; }