panic (char *format, ...) { u64 time; int count; va_list ap; ulong curstk; bool w = false; char *p, *pend; int cpunum = -1; static int panic_count = 0; static ulong panic_shell = 0; struct panic_pcpu_data_state *state, local_state; va_start (ap, format); if (currentcpu_available ()) cpunum = get_cpu_id (); if (cpunum >= 0) { spinlock_lock (&panic_lock); count = panic_count++; spinlock_unlock (&panic_lock); wait_for_other_cpu (cpunum); p = panicmsg_tmp; pend = panicmsg_tmp + sizeof panicmsg_tmp; if (panic_reboot) *p = '\0'; else snprintf (p, pend - p, "panic(CPU%d): ", cpunum); p += strlen (p); vsnprintf (p, pend - p, format, ap); if (*p != '\0') { printf ("%s\n", panicmsg_tmp); if (panicmsg[0] == '\0') snprintf (panicmsg, sizeof panicmsg, "%s", panicmsg_tmp); } asm_rdrsp (&curstk); if (count > 5 || curstk - (ulong)currentcpu->stackaddr < VMM_MINSTACKSIZE) { spinlock_lock (&panic_lock); paniccpu = -1; spinlock_unlock (&panic_lock); freeze (); } state = ¤tcpu->panic.state; } else { spinlock_lock (&panic_lock); count = panic_count++; printf ("panic: "); vprintf (format, ap); printf ("\n"); spinlock_unlock (&panic_lock); if (count) freeze (); state = &local_state; state->dump_vmm = false; state->backtrace = false; state->flag_dump_vm = false; } va_end (ap); if (!state->dump_vmm) { state->dump_vmm = true; catch_exception (cpunum, dump_vmm_control_regs); catch_exception (cpunum, dump_vmm_other_regs); state->dump_vmm = false; } if (!state->backtrace) { state->backtrace = true; catch_exception (cpunum, backtrace); state->backtrace = false; } if (cpunum >= 0 && current && !state->flag_dump_vm) { /* Guest state is printed only once. Because the * state will not change if panic will have been * called twice or more. */ state->flag_dump_vm = true; printf ("Guest state and registers of cpu %d ------------\n", cpunum); catch_exception (cpunum, dump_vm_general_regs); catch_exception (cpunum, dump_vm_control_regs); catch_exception (cpunum, dump_vm_sregs); catch_exception (cpunum, dump_vm_other_regs); printf ("------------------------------------------------\n"); } if (cpunum < 0) freeze (); if (do_wakeup) { do_wakeup = false; w = true; } spinlock_lock (&panic_lock); paniccpu = -1; spinlock_unlock (&panic_lock); if (w) { sleep_set_timer_counter (); panic_wakeup_all (); } call_initfunc ("panic"); if (cpunum == 0) { usleep (1000000); /* wait for dump of other processors */ #ifndef TTY_SERIAL setkbdled (LED_NUMLOCK_BIT | LED_SCROLLLOCK_BIT | LED_CAPSLOCK_BIT); if (!uefi_booted) { disable_apic (); if (bios_area_saved) copy_bios_area (bios_area_panic, bios_area_orig); callrealmode_setvideomode (VIDEOMODE_80x25TEXT_16COLORS); if (bios_area_saved) copy_bios_area (NULL, bios_area_panic); if (panic_reboot) printf ("%s\n", panicmsg); } keyboard_reset (); usleep (250000); setkbdled (LED_SCROLLLOCK_BIT | LED_CAPSLOCK_BIT); #endif } else { /* setvideomode is expected to be done in 3 seconds */ time = get_time (); while (get_time () - time < 3000000); } if (asm_lock_ulong_swap (&panic_shell, 1)) { for (;;) reboot_test (); clihlt (); } if (panic_reboot) do_panic_reboot (); printf ("%s\n", panicmsg); call_panic_shell (); }
/* * udp_recv_netbuf() */ static void udp_recv_netbuf(void *clnt, struct netbuf *nb) { struct ip_client *ic; struct udp_instance *ui; struct udp_client *uc; u16_t csum; struct ip_header *iph = (struct ip_header *)nb->nb_network; struct udp_header *udh = (struct udp_header *)nb->nb_transport; ic = (struct ip_client *)clnt; ui = (struct udp_instance *)ic->ic_instance; /* * When we get here our application data is still hooked under the * transport layer, so the first thing to do is untangle it. */ nb->nb_application = udh + 1; nb->nb_application_size = nb->nb_transport_size - sizeof(struct udp_header); nb->nb_transport_size = sizeof(struct udp_header); /* * If we've been given a checksum then check it! */ if (udh->uh_csum != 0) { csum = ipcsum_pseudo_partial(hton32(((struct ip_instance *)ic->ic_server->is_instance)->ii_addr), iph->ih_src_addr, 0x11, udh->uh_len); csum = ipcsum_partial(csum, nb->nb_transport, nb->nb_transport_size); csum = ipcsum(csum, nb->nb_application, nb->nb_application_size); if (csum != 0) { debug_print_pstr("\fudp recv: csum fail: "); debug_print16(csum); return; } } /* * Look up the socket and see if we know anyone who will try and deal * with it! */ spinlock_lock(&ui->ui_lock); uc = ui->ui_client_list; while (uc && (uc->uc_port != hton16(udh->uh_dest_port))) { uc = uc->uc_next; } if (uc) { udp_client_ref(uc); spinlock_unlock(&ui->ui_lock); uc->uc_recv(uc, nb); udp_client_deref(uc); } else { struct ip_header *iphc; struct netbuf *nbrep; struct ip_server *is; u32_t empty = 0; is = ui->ui_server; ip_server_ref(is); spinlock_unlock(&ui->ui_lock); /* * Issue an ICMP destination unreachable message (port unreachable). */ nbrep = netbuf_alloc(); nbrep->nb_application_membuf = membuf_alloc(nb->nb_network_size + 8, NULL); nbrep->nb_application = nbrep->nb_application_membuf; nbrep->nb_application_size = nb->nb_network_size + 8; iphc = (struct ip_header *)nbrep->nb_application; memcpy(iphc, iph, nb->nb_network_size); memcpy((iphc + 1), nb->nb_transport, 8); is->is_send_icmp(is, iph->ih_src_addr, 0x03, 0x03, (u8_t *)(&empty), nbrep); ip_server_deref(is); netbuf_deref(nbrep); } }
/** * @brief initialize host fram list * @param host struct uhci_host */ int init_hframelist(struct uhci_host *host) { struct usb_request_block *urb; u32 frid; phys32_t *frame_p; virt_t framelist_virt; phys_t framelist_phys; int n_skels; /* allocate a page for frame list */ alloc_page((void *)&framelist_virt, &framelist_phys); if (!framelist_phys) return -1; host->hframelist = framelist_phys; host->hframelist_virt = (phys32_t *)framelist_virt; spinlock_lock(&host->lock_hfl); /* create a TD for termination */ host->term_tdm = uhci_new_td_meta(host, NULL); if (!host->term_tdm) return -1; host->term_tdm->td->link = UHCI_TD_LINK_TE; host->term_tdm->td->status = 0U; host->term_tdm->td->token = UHCI_TD_TOKEN_DEVADDRESS(0x7f) | UHCI_TD_TOKEN_ENDPOINT(0) | UHCI_TD_TOKEN_PID_IN | uhci_td_explen(0); host->term_tdm->td->buffer = 0U; /* create skelton QHs */ for (n_skels = 0; n_skels<UHCI_NUM_SKELTYPES; n_skels++) { urb = create_urb(host); if (!urb) break; urb->address = URB_ADDRESS_SKELTON; URB_UHCI(urb)->qh = uhci_alloc_qh(host, &URB_UHCI(urb)->qh_phys); if (!URB_UHCI(urb)->qh) break; if (n_skels == 0) { URB_UHCI(urb)->tdm_head = host->term_tdm; URB_UHCI(urb)->qh->element = (phys32_t) URB_UHCI(urb)->tdm_head->td_phys; URB_UHCI(urb)->qh->link = UHCI_QH_LINK_TE; } else { URB_UHCI(urb)->qh->element = UHCI_QH_LINK_TE; URB_UHCI(urb)->qh->link = (phys32_t) URB_UHCI(host->host_skelton [n_skels - 1])->qh_phys | UHCI_QH_LINK_QH; urb->link_next = host->host_skelton[n_skels - 1]; } host->host_skelton[n_skels] = urb; } /* make link to a QH in each frame list entry according to intervals */ for (frid = 0U; frid < UHCI_NUM_FRAMES; frid++) { frame_p = (phys32_t *) (framelist_virt + frid * sizeof(phys32_t)); n_skels = __ffs((frid + 1) | (1 << (UHCI_NUM_SKELTYPES - 1))); *frame_p = (phys32_t) URB_UHCI(host->host_skelton[n_skels])->qh_phys | UHCI_FRAME_LINK_QH; } for (n_skels = 0; n_skels < 2; n_skels++) host->tailurb[n_skels] = host->host_skelton[0]; spinlock_unlock(&host->lock_hfl); return 0; }
void net_lock() { spinlock_lock(&net_spinlock); }
/** Control of the log from uspace * */ sysarg_t sys_klog(sysarg_t operation, void *buf, size_t size, sysarg_t level) { char *data; int rc; if (size > PAGE_SIZE) return (sysarg_t) ELIMIT; switch (operation) { case KLOG_WRITE: data = (char *) malloc(size + 1, 0); if (!data) return (sysarg_t) ENOMEM; rc = copy_from_uspace(data, buf, size); if (rc) { free(data); return (sysarg_t) rc; } data[size] = 0; if (level >= LVL_LIMIT) level = LVL_NOTE; log(LF_USPACE, level, "%s", data); free(data); return EOK; case KLOG_READ: data = (char *) malloc(size, 0); if (!data) return (sysarg_t) ENOMEM; size_t entry_len = 0; size_t copied = 0; rc = EOK; spinlock_lock(&log_lock); while (next_for_uspace < log_used) { size_t pos = (log_start + next_for_uspace) % LOG_LENGTH; log_copy_from((uint8_t *) &entry_len, pos, sizeof(size_t)); if (entry_len > PAGE_SIZE) { /* * Since we limit data transfer * to uspace to a maximum of PAGE_SIZE * bytes, skip any entries larger * than this limit to prevent * userspace being stuck trying to * read them. */ next_for_uspace += entry_len; continue; } if (size < copied + entry_len) { if (copied == 0) rc = EOVERFLOW; break; } log_copy_from((uint8_t *) (data + copied), pos, entry_len); copied += entry_len; next_for_uspace += entry_len; } spinlock_unlock(&log_lock); if (rc != EOK) { free(data); return (sysarg_t) rc; } rc = copy_to_uspace(buf, data, size); free(data); if (rc != EOK) return (sysarg_t) rc; return copied; default: return (sysarg_t) ENOTSUP; } }
/** * @brief chek_advance * @param host struct uhci_host */ int check_advance(struct uhci_host *host) { struct usb_request_block *urb, *nexturb; int advance = 0, ret = 0; u16 usbsts = 0U; if (cmpxchgl(&host->incheck, 0U, 1U)) return 0; #if 0 in16(host->iobase + UHCI_REG_USBSTS, &usbsts); if (usbsts) dprintft(2, "%04x: %s: usbsts = %04x\n", host->iobase, __FUNCTION__, usbsts); #endif /* 0 */ urb = host->inproc_urbs; while (urb) { /* update urb->status */ if (urb->status == URB_STATUS_RUN) check_urb_advance(host, urb, usbsts); switch (urb->status) { case URB_STATUS_UNLINKED: spinlock_lock(&host->lock_hfl); nexturb = urb->next; remove_urb(&host->inproc_urbs, urb); destroy_urb(host, urb); dprintft(3, "%04x: %s: urb(%p) destroyed.\n", host->iobase, __FUNCTION__, urb); urb = nexturb; spinlock_unlock(&host->lock_hfl); continue; default: /* errors */ dprintft(2, "%04x: %s: got some errors(%s) " "for urb(%p).\n", host->iobase, __FUNCTION__, uhci_error_status_string(urb->status), urb); uhci_dump_all(3, host, urb); /* through */ case URB_STATUS_ADVANCED: if (urb->callback) ret = (urb->callback)(host->hc, urb, urb->cb_arg); advance++; break; case URB_STATUS_NAK: dprintft(2, "%04x: %s: got an NAK for urb(%p).\n", host->iobase, __FUNCTION__, urb); if (urb->shadow) uhci_force_copyback(host, urb); urb->status = URB_STATUS_RUN; case URB_STATUS_RUN: case URB_STATUS_FINALIZED: break; } urb = urb->next; } #if 0 if (advance) { dprintft(3, "%s: USBSTS register cleared.\n", __FUNCTION__); out16(host->iobase + UHCI_REG_USBSTS, usbsts); } #endif host->incheck = 0U; return advance; }
/** * @brief activate urb * @param host struct uhci_host *host * @param urb struct usb_request_block */ u8 uhci_activate_urb(struct uhci_host *host, struct usb_request_block *urb) { u8 status, type; int n; type = (urb->endpoint) ? USB_EP_TRANSTYPE(urb->endpoint) : USB_ENDPOINT_TYPE_CONTROL; spinlock_lock(&host->lock_hfl); switch (type) { case USB_ENDPOINT_TYPE_INTERRUPT: n = __ffs(urb->endpoint->bInterval | (1 << (UHCI_NUM_SKELTYPES - 1))); /* MEMO: a new interrupt urb must be inserted just after a skelton anytime. */ urb->link_prev = host->host_skelton[n]; if (host->host_skelton[n] == host->tailurb[URB_TAIL_CONTROL]) host->tailurb[URB_TAIL_CONTROL] = urb; if (host->host_skelton[n] == host->tailurb[URB_TAIL_BULK]) host->tailurb[URB_TAIL_BULK] = urb; break; case USB_ENDPOINT_TYPE_CONTROL: urb->link_prev = host->tailurb[URB_TAIL_CONTROL]; if (host->tailurb[URB_TAIL_CONTROL] == host->tailurb[URB_TAIL_BULK]) host->tailurb[URB_TAIL_BULK] = urb; host->tailurb[URB_TAIL_CONTROL] = urb; break; case USB_ENDPOINT_TYPE_BULK: urb->link_prev = host->tailurb[URB_TAIL_BULK]; host->tailurb[URB_TAIL_BULK] = urb; break; case USB_ENDPOINT_TYPE_ISOCHRONOUS: default: printf("%s: transfer type(%02x) unsupported.\n", __FUNCTION__, type); status = urb->status; return status; } /* initialize qh_element_copy for detecting advance after NAK */ URB_UHCI(urb)->qh_element_copy = URB_UHCI(urb)->qh->element; /* urb link */ urb->link_next = urb->link_prev->link_next; urb->link_prev->link_next = urb; if (urb->link_next) { /* make a backward pointer */ urb->link_next->link_prev = urb; } else if (type == USB_ENDPOINT_TYPE_BULK) { if (host->fsbr) { dprintft(2, "%04x: %s: append it to the " "FSBR loopback.\n", host->iobase, __FUNCTION__); host->fsbr_loop_tail = urb; } else { dprintft(2, "%04x: %s: make a FSBR loopback.\n", host->iobase, __FUNCTION__); host->fsbr = 1; host->fsbr_loop_head = urb; host->fsbr_loop_tail = urb; } } /* record the current frame number */ URB_UHCI(urb)->frnum_issued = uhci_current_frame_number(host); /* qh link */ URB_UHCI(urb)->qh->link = URB_UHCI(urb->link_prev)->qh->link; if (host->fsbr_loop_tail) URB_UHCI(host->fsbr_loop_tail)->qh->link = (phys32_t) URB_UHCI(host->fsbr_loop_head)->qh_phys | UHCI_QH_LINK_QH; URB_UHCI(urb->link_prev)->qh->link = URB_UHCI(urb)->qh_phys | UHCI_QH_LINK_QH; urb->status = URB_STATUS_RUN; dprintft(3, "%s: The urb link is %p <- %p -> %p.\n", __FUNCTION__, urb->link_prev, urb, urb->link_next); status = urb->status; spinlock_unlock(&host->lock_hfl); return status; }
void atomic64_set(atomic64_t *v, uint64 x) { spinlock_lock(&v->spinlock); v->value = x; spinlock_unlock(&v->spinlock); }
static void enumerator_handler(RETRANSLATOR *pRetranslator, void *ctx) { DORTRANSAUTH ap; DORTRANSACK ackp; DORTRANSNAV np; unsigned char *uptr; size_t nNumRecord; if (pRetranslator->sock == -1) return; ENUMCONTEXT *pContext = (ENUMCONTEXT *)ctx; time_t now = time(NULL); if (FD_ISSET(pRetranslator->sock, &pContext->fdReadSet)) { #ifdef VERBOSE api_log_printf("[DORTRANS] socket #%d is readble\r\n", pRetranslator->sock); #endif unsigned char buf[8192]; int status = recv(pRetranslator->sock, (char *)buf, sizeof(buf), 0); if (status <= 0) { api_log_printf("[DORTRANS] socket #%d is closed\r\n", pRetranslator->sock); closesocket(pRetranslator->sock); pRetranslator->sock = -1; pRetranslator->connection_status = RETRANSLATOR_STATUS_INIT; return; } for (int i = 0; i < status; i++) { unsigned char ch = buf[i]; switch (pRetranslator->frame_state) { default: case DORTRANS_FRAMETAG1: if (ch == '~') { pRetranslator->frame_state = DORTRANS_FRAMETAG2; #ifdef VERBOSE api_log_printf("[DORTRANS] Tilda 1, socket #%d\r\n", pRetranslator->sock); #endif } break; case DORTRANS_FRAMETAG2: if (ch == '~') { pRetranslator->frame_state = DORTRANS_FRAMELEN; pRetranslator->frame_bytes_received = 2; pRetranslator->frame_crc = 0; pRetranslator->frame_len = 0; #ifdef VERBOSE api_log_printf("[DORTRANS] Tilda 2, socket #%d\r\n", pRetranslator->sock); #endif } else { pRetranslator->frame_state = DORTRANS_FRAMETAG1; } break; case DORTRANS_FRAMELEN: pRetranslator->frame_crc ^= ch; *(((unsigned char *)&pRetranslator->frame_len) + (pRetranslator->frame_bytes_received - 2)) = ch; pRetranslator->frame_bytes_received++; if (pRetranslator->frame_bytes_received == 6) { #ifdef VERBOSE api_log_printf("[DORTRANS] Frame len %u, socket #%d\r\n", pRetranslator->frame_len, pRetranslator->sock); #endif pRetranslator->frame_state = DORTRANS_FRAMERES; } break; case DORTRANS_FRAMERES: pRetranslator->frame_crc ^= ch; pRetranslator->frame_bytes_received++; if (pRetranslator->frame_bytes_received == 12) { pRetranslator->frame_state = (pRetranslator->frame_len > 0) ? DORTRANS_FRAMEBODY : DORTRANS_FRAMECRC; } break; case DORTRANS_FRAMEBODY: pRetranslator->frame_crc ^= ch; pRetranslator->frame_body[pRetranslator->frame_bytes_received - 12] = ch; pRetranslator->frame_bytes_received++; if (pRetranslator->frame_bytes_received == pRetranslator->frame_len - 1) { pRetranslator->frame_state = DORTRANS_FRAMECRC; } break; case DORTRANS_FRAMECRC: #ifdef VERBOSE api_log_printf("[DORTRANS] Frame crc 0x%02X, actual crc: 0x%02X, socket #%d\r\n", ch & 0xFF, pRetranslator->frame_crc & 0xFF, pRetranslator->sock); #endif pRetranslator->frame_state = DORTRANS_FRAMETAG1; if (ch == pRetranslator->frame_crc) { unsigned char *ptr = pRetranslator->frame_body; while (ptr < &pRetranslator->frame_body[pRetranslator->frame_bytes_received - 12]) { DORPACKETHEADER *ph = (DORPACKETHEADER *)ptr; ptr += ph->pack_len; #ifdef VERBOSE api_log_printf("[DORTRANS] Packet type %u, socket #%d\r\n", ph->pack_type, pRetranslator->sock); #endif size_t acks_count; unsigned int *acks; switch (ph->pack_type) { case 101: if (*(((unsigned char *)ph) + sizeof(DORPACKETHEADER)) == 0) { pRetranslator->status = DORTRANS_STATUS_ONLINE; pRetranslator->timeout = 0; RETRANSLATOR_RECORD rr; pRetranslator->records_list.insert(pRetranslator->records_list.begin(), rr); } else { api_log_printf("[DORTRANS] socket #%d auth failed, closing\r\n", pRetranslator->sock); pRetranslator->connection_status = RETRANSLATOR_STATUS_INIT; closesocket(pRetranslator->sock); pRetranslator->sock = -1; } break; case 0: acks_count = (ph->pack_len - sizeof(DORPACKETHEADER)) / 4; acks = (unsigned int *)((unsigned char *)ph + sizeof(DORPACKETHEADER)); for (size_t i = 0; i < acks_count; i++) { for (std::list<RETRANSLATOR_RECORD>::iterator record = pRetranslator->records_list.begin(); record != pRetranslator->records_list.end(); record++) { RETRANSLATOR_RECORD &rr = *record; if (rr.id == *acks) { pRetranslator->records_list.erase(record); break; } } acks++; } pRetranslator->status = DORTRANS_STATUS_ONLINE; pRetranslator->timeout = 0; api_log_printf("[DORTRANS] %u packets acked, socket #%d\r\n", acks_count, pRetranslator->sock); break; case 1: break; default: memset(&ackp, 0, sizeof(ackp)); ackp.frame_tag[0] = '~'; ackp.frame_tag[1] = '~'; ackp.frame_len = sizeof(ackp); ackp.pack_len = sizeof(ackp.pack_len) + sizeof(ackp.pack_num) + sizeof(ackp.pack_type) + sizeof(ackp.pack_reserved) + sizeof(ackp.pack_ack_num); ackp.pack_num = pRetranslator->pack_num++; ackp.pack_type = 0; ackp.pack_ack_num = ph->pack_num; uptr = (unsigned char *)&ackp; ackp.frame_crc = 0x00; for (int j = 0; j < sizeof(ackp) - 1; j++) ackp.frame_crc ^= *uptr++; api_log_printf("[DORTRANS] Send ack packet, socket #%d\r\n", pRetranslator->sock); send(pRetranslator->sock, (char *)&ackp, sizeof(ackp), 0); } } } break; } } } if (FD_ISSET(pRetranslator->sock, &pContext->fdWriteSet)) { #ifdef VERBOSE api_log_printf("[DORTRANS] socket #%d is writible\r\n", pRetranslator->sock); #endif switch (pRetranslator->connection_status) { case RETRANSLATOR_STATUS_INIT: api_log_printf("[DORTRANS] ERROR, socket #%d is in intial state\r\n", pRetranslator->sock); break; case RETRANSLATOR_STATUS_CONNECTING: api_log_printf("[DORTRANS] socket #%d connected\r\n", pRetranslator->sock); pRetranslator->connection_status = RETRANSLATOR_STATUS_CONNECTED; pRetranslator->status = DORTRANS_STATUS_AUTH; pRetranslator->timeout = now + 30; pRetranslator->last_send = now + 120; pRetranslator->pack_num = 0; case RETRANSLATOR_STATUS_CONNECTED: switch (pRetranslator->status) { case DORTRANS_STATUS_AUTH: memset(&ap, 0, sizeof(ap)); ap.frame_tag[0] = '~'; ap.frame_tag[1] = '~'; ap.frame_len = sizeof(ap); ap.pack_len = sizeof(ap.pack_len) + sizeof(ap.pack_num) + sizeof(ap.pack_type) + sizeof(ap.pack_reserved) + sizeof(ap.auth_code); ap.pack_num = pRetranslator->pack_num++; ap.pack_type = 1; ap.auth_code[0] = 0x57; ap.auth_code[1] = 0x61; ap.auth_code[2] = 0x1A; ap.auth_code[3] = 0xA3; ap.auth_code[4] = 0xB6; ap.auth_code[5] = 0x4A; ap.auth_code[6] = 0xB4; ap.auth_code[7] = 0x4B; ap.auth_code[8] = 0x80; ap.auth_code[9] = 0x47; ap.auth_code[10] = 0x8B; ap.auth_code[11] = 0x4B; ap.auth_code[12] = 0xAD; ap.auth_code[13] = 0xC0; ap.auth_code[14] = 0xD4; ap.auth_code[15] = 0x93; uptr = (unsigned char *)≈ ap.frame_crc = 0x00; for (size_t i = 0; i < sizeof(ap) - 1; i++) ap.frame_crc ^= *uptr++; send(pRetranslator->sock, (char *)&ap, sizeof(ap), 0); api_log_printf("[DORTRANS] socket #%d send auth request\r\n", pRetranslator->sock); pRetranslator->status = DORTRANS_STATUS_WAITACK; pRetranslator->timeout = now + 30; pRetranslator->last_send = now; break; case DORTRANS_STATUS_ONLINE: spinlock_lock(&pRetranslator->spinlock); if (pRetranslator->records_list.empty()) { spinlock_unlock(&pRetranslator->spinlock); break; } nNumRecord = 0; for (std::list<RETRANSLATOR_RECORD>::iterator record = pRetranslator->records_list.begin(); record != pRetranslator->records_list.end(); record++) { RETRANSLATOR_RECORD &rr = *record; np.p[nNumRecord].terminal_id = rr.nupe; np.p[nNumRecord].terminal_type = 733; np.p[nNumRecord].arrive_time = rr.t; np.p[nNumRecord].t = rr.t; np.p[nNumRecord].flags = 0x60; np.p[nNumRecord].lat = rr.latitude; np.p[nNumRecord].lon = rr.longitude; np.p[nNumRecord].speed = rr.speed / 10; np.p[nNumRecord].cog = rr.cog; np.p[nNumRecord].alt = rr.altitude; np.p[nNumRecord].nsat = 3; np.p[nNumRecord].mileage = 0; np.p[nNumRecord].flags2 = 0; np.p[nNumRecord].csq = 21; if ((rr.latitude != 0)&&(rr.longitude != 0)) np.p[nNumRecord].flags |= 0x80; if (rr.flags1 & RECORD_FLAG1_IGNITION) np.p[nNumRecord].flags |= 0x02; rr.id = pRetranslator->pack_num + nNumRecord; nNumRecord++; if (nNumRecord == DORTRANS_MAX_RECORDS) break; } spinlock_unlock(&pRetranslator->spinlock); if (nNumRecord > 0) { np.frame_tag[0] = '~'; np.frame_tag[1] = '~'; np.frame_len = 13; for (size_t i = 0; i < nNumRecord; i++) { np.p[i].pack_len = sizeof(np.p[i]); np.p[i].pack_num = pRetranslator->pack_num++; np.p[i].pack_type = 2; np.frame_len += np.p[i].pack_len; } uptr = (unsigned char *)&np; np.frame_crc = 0x00; for (size_t i = 0; i < np.frame_len - 1; i++) np.frame_crc ^= *uptr++; send(pRetranslator->sock, (char *)&np, np.frame_len - 1, 0); send(pRetranslator->sock, (char *)&np.frame_crc, 1, 0); pRetranslator->status = DORTRANS_STATUS_WAITACK; pRetranslator->timeout = now + 30; pRetranslator->last_send = now; api_log_printf("[DORTRANS] Send %u records, socket #%d\r\n", nNumRecord, pRetranslator->sock); } } break; } } }
/** * @brief deactivates the urb * @param host struct uhci_host * @param urb struct usb_request_block */ u8 uhci_deactivate_urb(struct uhci_host *host, struct usb_request_block *urb) { u8 status, type; /* nothing to do if already unlinked */ if (urb->status == URB_STATUS_UNLINKED) return urb->status; dprintft(5, "%s: The urb link is %p <- %p -> %p.\n", __FUNCTION__, urb->link_prev, urb, urb->link_next); spinlock_lock(&host->lock_hfl); /* urb link */ if ((urb == host->fsbr_loop_head) && (urb == host->fsbr_loop_tail)) { dprintft(2, "%04x: %s: FSBR unlooped \n", host->iobase, __FUNCTION__); host->fsbr = 0; host->fsbr_loop_head = host->fsbr_loop_tail = (struct usb_request_block *)NULL; /* qh */ URB_UHCI(urb->link_prev)->qh->link = UHCI_QH_LINK_TE; } else if (urb == host->fsbr_loop_tail) { /* tail of a FSBR loopback */ dprintft(2, "%04x: %s: the tail of a FSBR loopback\n", host->iobase, __FUNCTION__); host->fsbr_loop_tail = urb->link_prev; /* qh */ URB_UHCI(host->fsbr_loop_tail)->qh->link = (phys32_t)URB_UHCI(host->fsbr_loop_head)->qh_phys | UHCI_QH_LINK_QH; } else if (host->fsbr_loop_head == urb) { /* head of a FSBR loopback */ dprintft(2, "%04x: %s: the head of a FSBR loopback\n", host->iobase, __FUNCTION__); host->fsbr_loop_head = urb->link_next; /* qh */ URB_UHCI(host->fsbr_loop_tail)->qh->link = (phys32_t)URB_UHCI(host->fsbr_loop_head)->qh_phys | UHCI_QH_LINK_QH; URB_UHCI(urb->link_prev)->qh->link = URB_UHCI(urb)->qh->link; } else { /* qh */ URB_UHCI(urb->link_prev)->qh->link = URB_UHCI(urb)->qh->link; } URB_UHCI(urb)->qh->link = UHCI_QH_LINK_TE; /* MEMO: There must exist urb->link_prev because of the skelton. */ urb->link_prev->link_next = urb->link_next; if (urb->link_next) urb->link_next->link_prev = urb->link_prev; urb->status = URB_STATUS_UNLINKED; type = (urb->endpoint) ? USB_EP_TRANSTYPE(urb->endpoint) : USB_ENDPOINT_TYPE_CONTROL; switch (type) { case USB_ENDPOINT_TYPE_INTERRUPT: /* through */ case USB_ENDPOINT_TYPE_CONTROL: if (host->tailurb[URB_TAIL_CONTROL] == urb) host->tailurb[URB_TAIL_CONTROL] = urb->link_prev; /* through */ case USB_ENDPOINT_TYPE_BULK: if (host->tailurb[URB_TAIL_BULK] == urb) host->tailurb[URB_TAIL_BULK] = urb->link_prev; break; case USB_ENDPOINT_TYPE_ISOCHRONOUS: default: printf("%s: transfer type(%02x) unsupported.\n", __FUNCTION__, type); } status = urb->status; spinlock_unlock(&host->lock_hfl); return status; }
static void enumerator_select(RETRANSLATOR *pRetranslator, void *ctx) { unsigned long status; char port[12]; struct addrinfo sHints, *psAddrInfo, *p; ENUMCONTEXT *pContext = (ENUMCONTEXT *)ctx; time_t now = time(NULL); switch (pRetranslator->connection_status) { case RETRANSLATOR_STATUS_INIT: memset(&sHints, 0, sizeof(struct addrinfo)); sHints.ai_family = PF_UNSPEC; sHints.ai_socktype = SOCK_STREAM; sHints.ai_protocol = IPPROTO_TCP; sHints.ai_flags = AI_PASSIVE; sprintf(port, "%u", pRetranslator->port); status = getaddrinfo(pRetranslator->host.c_str(), port, &sHints, &psAddrInfo); if (status != 0) { api_log_printf("[DORTRANS] getaddrinfo failed for %s:%u\r\n", pRetranslator->host.c_str(), pRetranslator->port); break; } for (p = psAddrInfo; p; p = p->ai_next) { pRetranslator->sock = socket(p->ai_family, p->ai_socktype, p->ai_protocol); if (pRetranslator->sock < 0) { api_log_printf("[DORTRANS] errno #%d on creating socket\r\n", errno); continue; } status = 1; #ifdef _MSC_VER ioctlsocket(pRetranslator->sock, FIONBIO, &status); #else fcntl(pRetranslator->sock, F_SETFL, O_NONBLOCK); #endif api_log_printf("[DORTRANS] socket #%d connecting to %s:%u\r\n", pRetranslator->sock, pRetranslator->host.c_str(), pRetranslator->port); status = connect(pRetranslator->sock, p->ai_addr, p->ai_addrlen); if (status == 0) { pRetranslator->connection_status = RETRANSLATOR_STATUS_CONNECTED; pRetranslator->timeout = 0; pRetranslator->status = DORTRANS_STATUS_AUTH; api_log_printf("[DORTRANS] socket #%d connected immidiatly\r\n"); break; } else { int error; #ifdef _MSC_VER error = WSAGetLastError(); if (error == WSAEWOULDBLOCK) pRetranslator->connection_status = RETRANSLATOR_STATUS_CONNECTING; #else error = errno; if (error == EINPROGRESS) pRetranslator->connection_status = RETRANSLATOR_STATUS_CONNECTING; #endif if (pRetranslator->connection_status != RETRANSLATOR_STATUS_CONNECTING) { api_log_printf("[DORTRANS] error #%d on connecting to %s:%u\r\n", error, pRetranslator->host.c_str(), pRetranslator->port); closesocket(pRetranslator->sock); pRetranslator->sock = -1; } else { pRetranslator->timeout = now + 60; } break; } } freeaddrinfo(psAddrInfo); break; case RETRANSLATOR_STATUS_CONNECTING: if (pRetranslator->timeout <= now) { api_log_printf("[DORTRANS] socket #%d connect timeout, closing\r\n", pRetranslator->sock); pRetranslator->connection_status = RETRANSLATOR_STATUS_INIT; closesocket(pRetranslator->sock); pRetranslator->sock = -1; break; } FD_SET(pRetranslator->sock, &pContext->fdReadSet); FD_SET(pRetranslator->sock, &pContext->fdWriteSet); if (pRetranslator->sock > pContext->max_fd) pContext->max_fd = pRetranslator->sock; break; case RETRANSLATOR_STATUS_CONNECTED: FD_SET(pRetranslator->sock, &pContext->fdReadSet); switch (pRetranslator->status) { case DORTRANS_STATUS_WAITACK: if ((pRetranslator->timeout != 0)&&(pRetranslator->timeout <= now)) { api_log_printf("[DORTRANS] socket #%d ack timeout, closing\r\n", pRetranslator->sock); pRetranslator->connection_status = RETRANSLATOR_STATUS_INIT; closesocket(pRetranslator->sock); pRetranslator->sock = -1; break; } if (pRetranslator->sock > pContext->max_fd) pContext->max_fd = pRetranslator->sock; break; case DORTRANS_STATUS_ONLINE: spinlock_lock(&pRetranslator->spinlock); if (!pRetranslator->records_list.empty()) { FD_SET(pRetranslator->sock, &pContext->fdWriteSet); } spinlock_unlock(&pRetranslator->spinlock); if (pRetranslator->sock > pContext->max_fd) pContext->max_fd = pRetranslator->sock; if (pRetranslator->last_send + 120 < now) { DORTRANSPING dp; memset(&dp, 0, sizeof(dp)); dp.frame_tag[0] = '~'; dp.frame_tag[1] = '~'; dp.frame_len = sizeof(DORTRANSPING); dp.pack_len = sizeof(dp.pack_len) + sizeof(dp.pack_num) + sizeof(dp.pack_type) + sizeof(dp.pack_reserved); dp.pack_num = pRetranslator->pack_num++; dp.pack_type = 10; unsigned char *uptr = (unsigned char *)&dp; dp.frame_crc = 0x00; for (int j = 0; j < sizeof(dp) - 1; j++) dp.frame_crc ^= *uptr++; api_log_printf("[DORTRANS] Send ping packet, socket #%d\r\n", pRetranslator->sock); send(pRetranslator->sock, (char *)&dp, sizeof(dp), 0); pRetranslator->last_send = now; } break; } } }
static int init_hub_device(struct usb_host *usbhc, struct usb_request_block *urb, void *arg) { u8 devadr, cls; struct usb_device *dev; struct usb_device_handle *handler; static const struct usb_hook_pattern pat_clrpf = { .pid = USB_PID_SETUP, .mask = 0x000000000000ffffULL, .pattern = 0x000000000000000123ULL, .offset = 0, .next = NULL }; devadr = urb->address; dev = urb->dev; /* an interface descriptor must exists */ if (!dev || !dev->config || !dev->config->interface || !dev->config->interface->altsetting) { dprintft(1, "HUB(%02x): interface descriptor not found.\n", devadr); return USB_HOOK_PASS; } /* only Hub devices interests */ cls = dev->config->interface->altsetting->bInterfaceClass; if (cls != 0x09) return USB_HOOK_PASS; dprintft(1, "HUB(%02x): A Hub Class device found\n", devadr); if (dev->handle) { dprintft(1, "HUB(%02x): maybe reset.\n", devadr); return USB_HOOK_PASS; } handler = usb_new_dev_handle (usbhc, dev); handler->remove = usbhub_remove; dev->handle = handler; /* notify whenever ClearPortFeature() issued. */ spinlock_lock(&usbhc->lock_hk); usb_hook_register (usbhc, USB_HOOK_REPLY, USB_HOOK_MATCH_DEV | USB_HOOK_MATCH_ENDP | USB_HOOK_MATCH_DATA, devadr, 0, &pat_clrpf, usbhub_connect_changed, NULL, dev); spinlock_unlock(&usbhc->lock_hk); return USB_HOOK_PASS; } void usbhub_init_handle(struct usb_host *host) { static const struct usb_hook_pattern pat_setconf = { .pid = USB_PID_SETUP, .mask = 0x000000000000ffffULL, .pattern = 0x0000000000000900ULL, .offset = 0, .next = NULL }; /* check a device class whenever SetConfigration() issued. */ spinlock_lock(&host->lock_hk); usb_hook_register(host, USB_HOOK_REPLY, USB_HOOK_MATCH_ENDP | USB_HOOK_MATCH_DATA, 0, 0, &pat_setconf, init_hub_device, NULL, NULL); spinlock_unlock(&host->lock_hk); printf("USB HUB Class handler registered.\n"); return; } void hub_portdevice_register(struct usb_host *host, u64 hub_port, struct usb_device *dev) { struct usb_device *hubdev; for (hubdev = host->device; hubdev; hubdev = hubdev->next) if (hub_port == hubdev->portno) { dev->parent = hubdev; dprintft(3, "HUB(%02x): HUB PORT(%d) device " "checked and registered.\n", hubdev->devnum, (int)dev->portno & USB_PORT_MASK); break; } if (!hubdev) dprintft(1, "HUB(%02x): HUB device not found!?!?\n", dev->devnum); return; }
void io_storeRemoveLibrary(io_library_t *library) { spinlock_lock(&__io_storeLock); atree_remove(__io_storeLibraries, (void *)library->name); spinlock_unlock(&__io_storeLock); }
static ssize_t pcap_sg_read_pcap_pkt(int fd, struct pcap_pkthdr *hdr, uint8_t *packet, size_t len) { /* In contrast to writing, reading gets really ugly ... */ spinlock_lock(&lock); if (likely(avail - used >= sizeof(*hdr) && iov[c].iov_len - iov_used >= sizeof(*hdr))) { __memcpy_small(hdr, iov[c].iov_base + iov_used, sizeof(*hdr)); iov_used += sizeof(*hdr); used += sizeof(*hdr); } else { size_t remainder, offset = 0; if (avail - used < sizeof(*hdr)) return -ENOMEM; offset = iov[c].iov_len - iov_used; remainder = sizeof(*hdr) - offset; assert(offset + remainder == sizeof(*hdr)); __memcpy_small(hdr, iov[c].iov_base + iov_used, offset); used += offset; iov_used = 0; c++; if (c == IOVSIZ) { /* We need to refetch! */ c = 0; avail = readv(fd, iov, IOVSIZ); if (avail < 0) return -EIO; used = 0; } /* Now we copy the remainder and go on with business ... */ __memcpy_small(hdr, iov[c].iov_base + iov_used, remainder); iov_used += remainder; used += remainder; } if (likely(avail - used >= hdr->len && iov[c].iov_len - iov_used >= hdr->len)) { __memcpy(packet, iov[c].iov_base + iov_used, hdr->len); iov_used += hdr->len; used += hdr->len; } else { size_t remainder, offset = 0; if (avail - used < hdr->len) return -ENOMEM; offset = iov[c].iov_len - iov_used; remainder = hdr->len - offset; assert(offset + remainder == hdr->len); __memcpy(packet, iov[c].iov_base + iov_used, offset); used += offset; iov_used = 0; c++; if (c == IOVSIZ) { /* We need to refetch! */ c = 0; avail = readv(fd, iov, IOVSIZ); if (avail < 0) return -EIO; used = 0; } /* Now we copy the remainder and go on with business ... */ __memcpy(packet, iov[c].iov_base + iov_used, remainder); iov_used += remainder; used += remainder; } spinlock_unlock(&lock); if (unlikely(hdr->len == 0)) return -EINVAL; /* Bogus packet */ return sizeof(*hdr) + hdr->len; }
/** * @brief submit the control messagie * @param host struct uhci_host * @param device struct usb_device * @param endpoint u8 * @param csetup struct usb_device * @param callback int * * @param arg void* * @param ioc int */ struct usb_request_block * uhci_submit_control(struct uhci_host *host, struct usb_device *device, u8 endpoint, struct usb_ctrl_setup *csetup, int (*callback)(struct usb_host *, struct usb_request_block *, void *), void *arg, int ioc) { struct usb_request_block *urb; struct usb_endpoint_descriptor *epdesc; struct uhci_td_meta *tdm; struct usb_buffer_list *b; size_t pktsize; epdesc = usb_epdesc(device, endpoint); if (!epdesc) { dprintft(2, "%04x: %s: no endpoint(%d) found.\n", host->iobase, __FUNCTION__, endpoint); return (struct usb_request_block *)NULL; } dprintft(5, "%s: epdesc->wMaxPacketSize = %d\n", __FUNCTION__, epdesc->wMaxPacketSize); urb = create_urb(host); if (!urb) return (struct usb_request_block *)NULL; if (device){ spinlock_lock(&device->lock_dev); init_urb(urb, device->devnum, epdesc, callback, arg); spinlock_unlock(&device->lock_dev); } /* create a QH */ URB_UHCI(urb)->qh = uhci_alloc_qh(host, &URB_UHCI(urb)->qh_phys); if (!URB_UHCI(urb)->qh) goto fail_submit_control; URB_UHCI(urb)->qh->link = UHCI_QH_LINK_TE; pktsize = epdesc->wMaxPacketSize; /* SETUP TD */ URB_UHCI(urb)->tdm_head = tdm = uhci_new_td_meta(host, NULL); if (!tdm) goto fail_submit_control; URB_UHCI(urb)->qh->element = URB_UHCI(urb)->qh_element_copy = tdm->td_phys; b = zalloc_usb_buffer_list(); b->len = sizeof(*csetup); b->vadr = malloc_from_pool(host->pool, b->len, &b->padr); if (!b->vadr) { free(b); goto fail_submit_control; } urb->buffers = b; memcpy((void *)b->vadr, (void *)csetup, b->len); tdm->td->status = tdm->status_copy = UHCI_TD_STAT_AC | uhci_td_maxerr(3); if (device){ spinlock_lock(&device->lock_dev); tdm->td->token = tdm->token_copy = uhci_td_explen(sizeof(*csetup)) | UHCI_TD_TOKEN_ENDPOINT(epdesc->bEndpointAddress) | UHCI_TD_TOKEN_DEVADDRESS(device->devnum) | UHCI_TD_TOKEN_PID_SETUP; spinlock_unlock(&device->lock_dev); } tdm->td->buffer = (phys32_t)b->padr; if (csetup->wLength > 0) { b = zalloc_usb_buffer_list(); b->len = csetup->wLength; b->vadr = malloc_from_pool(host->pool, b->len, &b->padr); if (!b->vadr) { free(b); goto fail_submit_control; } b->next = urb->buffers; urb->buffers = b; if(device){ spinlock_lock(&device->lock_dev); tdm = prepare_buffer_tds(host, (phys32_t)b->padr, b->len, device->devnum, epdesc, UHCI_TD_STAT_AC | UHCI_TD_STAT_SP | uhci_td_maxerr(3)); spinlock_unlock(&device->lock_dev); } if (!tdm) goto fail_submit_control; dprintft(5, "%s: tdm->td_phys = %llx\n", __FUNCTION__, tdm->td_phys); URB_UHCI(urb)->tdm_head->next = tdm; URB_UHCI(urb)->tdm_head->td->link = tdm->td_phys; } /* The 1st toggle for SETUP must be 0. */ uhci_fixup_toggles(URB_UHCI(urb)->tdm_head, epdesc->toggle); /* append one more TD for the status stage */ for (tdm = URB_UHCI(urb)->tdm_head; tdm->next; tdm = tdm->next); tdm->next = uhci_new_td_meta(host, NULL); if (!tdm->next) goto fail_submit_control; tdm->next->td->link = UHCI_TD_LINK_TE; tdm->next->td->status = UHCI_TD_STAT_AC | uhci_td_maxerr(3); if (ioc) tdm->next->td->status |= UHCI_TD_STAT_IC; if (device){ spinlock_lock(&device->lock_dev); tdm->next->td->token = uhci_td_explen(0) | UHCI_TD_TOKEN_ENDPOINT(epdesc->bEndpointAddress) | UHCI_TD_TOKEN_DEVADDRESS(device->devnum) | UHCI_TD_TOKEN_DT1_TOGGLE; spinlock_unlock(&device->lock_dev); } tdm->next->td->token |= (csetup->wLength > 0) ? UHCI_TD_TOKEN_PID_OUT : UHCI_TD_TOKEN_PID_IN; tdm->next->td->buffer = 0U; tdm->td->link = (phys32_t)tdm->next->td_phys; /* link the QH into the frame list */ if (uhci_activate_urb(host, urb) != URB_STATUS_RUN) goto fail_submit_control; link_urb(&host->inproc_urbs, urb); return urb; fail_submit_control: destroy_urb(host, urb); return (struct usb_request_block *)NULL; }
void sched_enter_critical() { spinlock_lock(&run_queue->spinlock); }
/** * @brief submit asynchronous urb * @param host struct uhci_host * @param device struct usb_device * @param epdesc struct usb_endpoint_descriptor * @param data void * * @param size u16 * @param callback int * * @param arg void* * @param ioc int */ static struct usb_request_block * uhci_submit_async(struct uhci_host *host, struct usb_device *device, struct usb_endpoint_descriptor *epdesc, void *data, u16 size, int (*callback)(struct usb_host *, struct usb_request_block *, void *), void *arg, int ioc) { struct usb_request_block *urb; size_t pktsize; urb = create_urb(host); if (!urb) return (struct usb_request_block *)NULL; if (device){ spinlock_lock(&device->lock_dev); init_urb(urb, device->devnum, epdesc, callback, arg); spinlock_unlock(&device->lock_dev); } /* create a QH */ URB_UHCI(urb)->qh = uhci_alloc_qh(host, &URB_UHCI(urb)->qh_phys); if (!URB_UHCI(urb)->qh) goto fail_submit_async; URB_UHCI(urb)->qh->link = UHCI_QH_LINK_TE; pktsize = epdesc->wMaxPacketSize; /* buffer and TD */ if (size > 0) { struct usb_buffer_list *b; b = zalloc_usb_buffer_list(); b->len = size; b->vadr = malloc_from_pool(host->pool, b->len, &b->padr); if (!b->vadr) { free(b); goto fail_submit_async; } /* copy data if OUT direction */ if (!USB_EP_DIRECT(epdesc)) memcpy((void *)b->vadr, data, b->len); urb->buffers = b; } if (device){ spinlock_lock(&device->lock_dev); URB_UHCI(urb)->tdm_head = prepare_buffer_tds(host, (urb->buffers) ? (phys32_t)urb->buffers->padr : 0U, size, device->devnum, epdesc, UHCI_TD_STAT_AC | UHCI_TD_STAT_SP | uhci_td_maxerr(3)); spinlock_unlock(&device->lock_dev); } if (!URB_UHCI(urb)->tdm_head) goto fail_submit_async; /* link the TDs into the QH */ URB_UHCI(urb)->qh->element = URB_UHCI(urb)->tdm_head->td_phys; /* set IOC */ if (ioc) URB_UHCI(urb)->tdm_head->td->status |= UHCI_TD_STAT_IC; /* set up toggles in TDs */ epdesc->toggle = uhci_fixup_toggles(URB_UHCI(urb)->tdm_head, epdesc->toggle); /* link the QH into the frame list */ if (uhci_activate_urb(host, urb) != URB_STATUS_RUN) goto fail_submit_async; link_urb(&host->inproc_urbs, urb); return urb; fail_submit_async: destroy_urb(host, urb); return (struct usb_request_block *)NULL; }
int gpmi_wait_for_dma(uint32_t u32usec, uint32_t chipSelect) { reg32_t r32ChipDmaNumber = NAND0_APBH_CH; // + chipSelect; bool bTimedOut = FALSE; int rtStatus = SUCCESS; #if 1 //def RTOS_THREADX // Wait for the IRQ to unlock the spinlock. int lockResult = spinlock_lock(&g_gpmi.dmaInfo.irqSpinlock, u32usec); // Note that, in the RTOS_THREADX case, SEMA.PHORE register can easily be nonzero if the CPU is running fast. // (The DMA engine can trigger the ISR at the end of the DMA, before decrementing // the SEMA.PHORE count, thus creating a race condition that lets us find a // nonzero SEMA.PHORE value here.) // // Since SEMA.PHORE can still be nonzero, we cannot use it as // evidence of timeout. We have to rely on the RTOS timeout indicator. bTimedOut = (lockResult != 0); //( TX_SUCCESS != retCode_tx_semaphore ); #else // RTOS_THREADX not defined // Poll for DMA completion. { uint64_t u64StartTime; // Microsecond read - always read at start of transaction so that if // ThreadX times out, that time is included in the overall timeout time. u64StartTime = g_gpmi.dmaInfo.uStartDMATime; // End of DMA chain will decrement the hardware semaphore. Poll the hardware semaphore for // DMA completion. do { i32Sema = HW_APBH_CHn_SEMA_RD(r32ChipDmaNumber) & BM_APBH_CHn_SEMA_PHORE; } while ((i32Sema != 0) && ( (hw_profile_GetMicroseconds() - u64StartTime) < u32usec)); } // Re-read the hardware semaphore in case a higher-priority thread caused the timeout between semaphore // and timeout test. i32Sema = HW_APBH_CHn_SEMA_RD(r32ChipDmaNumber) & BM_APBH_CHn_SEMA_PHORE; bTimedOut = (0 != i32Sema); #endif // ifdef RTOS_THREADX // // If timeout: return error, // else: return BAR field from last DMA command // if ( bTimedOut ) { // The DMA has not completed within the alotted time. // // Clean up. //! @todo Since we don't know exactly what caused the timeout, it //! could be beneficial to also reset the GPMI block here. //! Note, however, that soft-resetting the GPMI block changes //! its register settings. Thus, it would also be necessary to //! re-initialize the GPMI settings completely. Otherwise the //! GPMI may not work. // abort dma by resetting channel // BW_APBH_CHANNEL_CTRL_RESET_CHANNEL(1 << r32ChipDmaNumber); // // // Wait for the reset to complete // while ( HW_APBH_CHANNEL_CTRL.B.RESET_CHANNEL & (0x1 << r32ChipDmaNumber) ) // { // ; // } // // // // // Okay, this is important. // // When we read from the NAND using GPMI with ECC, // // there will be an ECC interrupt upon completion of the ECC correction. // // Thereafter, these actions must happen in sequence: // // 1. ECC status must be read. // // 2. ECC ISR must be reenabled. // // 3. ECC-completion must be cleared, which frees the ECC // // block to process the next data. // // The status must be read before the ECC-completion is cleared, or // // the next ECC cycle will overwrite the status. In the case of a // // successful DMA and ECC, the code that reads the ECC status // // also performs steps 2 and 3. // // // // Q: What happens if the DMA times-out for some reason? // // A: Somebody may have to clean-up by using steps 2 and 3. // // That somebody is us. // // // // // If there was an ECC-completion expected... // if (kNandGpmiDmaWaitMask_Ecc & g_gpmi.dmaInfo.u16DmaWaitMask) // { // // ...then we have to clear the ECC-completion and the ECC circuit. // // // It is not necessary to reset the BCH block after an "uncorrectable" error. // // In fact, due to a 378x chip bug it is not possible to reset the // // BCH block after it has been used to transfer data. // // // Clear the ECC-completion. // gpmi_clear_ecc_isr_enable( ); // } rtStatus = ERROR_DDI_NAND_GPMI_DMA_TIMEOUT; } else { // The DMA descriptor chain was set up with the alternate meaning // of the BAR register. Rather than containing an // address at this point, it contains a return-code that indicates whether // the "success" or "failure" part of the chain executed last. // So, here we get that return code. rtStatus = (int)BF_RDn(APBH_CHn_BAR, r32ChipDmaNumber, ADDRESS); } return rtStatus; }
void array_lock(array_t *array) { spinlock_lock(&array->lock); }