static void process_data_sock(int h, struct pollfd *pfds, int count) { asrt(count <= ts[h].poll_count); int i; for( i= 1; i < ts[h].poll_count; i++) { if(pfds[i].revents) { int ps_i = ts[h].psi[i]; asrt(pfds[i].fd == ts[h].ps[ps_i].pfd.fd); uint32_t user_id = ts[h].ps[ps_i].user_id; int type = ts[h].ps[ps_i].type; int flags = 0; print_events(pfds[i].revents); if(IS_READ(pfds[i].revents)) { flags |= SOCK_THREAD_FD_RD; } if(IS_WRITE(pfds[i].revents)) { flags |= SOCK_THREAD_FD_WR; } if(IS_EXCEPTION(pfds[i].revents)) { flags |= SOCK_THREAD_FD_EXCEPTION; //remove the whole slot not flags remove_poll(h, &ts[h].ps[ps_i], ts[h].ps[ps_i].flags); } else if(flags) remove_poll(h, &ts[h].ps[ps_i], flags); //remove the monitor flags that already processed if(flags) ts[h].callback(pfds[i].fd, type, flags, user_id); } } }
//DispatchException,called by GeneralIntHandler to handle exception,include //system call. static VOID DispatchException(__COMMON_OBJECT* lpThis, LPVOID lpEsp, UCHAR ucVector) { __SYSTEM* lpSystem = (__SYSTEM*)lpThis; __INTERRUPT_OBJECT* lpIntObj = NULL; if(NULL == lpSystem) { return; } if(!IS_EXCEPTION(ucVector)) //Not a exception. { return; } //lpIntObj = lpSystem->lpInterruptVector[ucVector]; lpIntObj = lpSystem->InterruptSlotArray[ucVector].lpFirstIntObject; if(NULL == lpIntObj) //Null exception,call default exception handler. { DefaultExcepHandler(lpEsp,ucVector); //Update exception counter. lpSystem->InterruptSlotArray[ucVector].dwTotalInt ++; return; } //Call the exception handler now.For each exception,only one handler present. lpIntObj->InterruptHandler(lpEsp,lpIntObj->lpHandlerParam); lpSystem->InterruptSlotArray[ucVector].dwTotalInt ++; lpSystem->InterruptSlotArray[ucVector].dwSuccHandledInt ++; return; }
VOID GeneralIntHandler(DWORD dwVector,LPVOID lpEsp) { UCHAR ucVector = (BYTE)(dwVector); if(IS_EXCEPTION(ucVector)) //Exception. { System.DispatchException((__COMMON_OBJECT*)&System, lpEsp, ucVector); return; } //Interrupt,dispatch it by DispatchInterrupt routine. System.DispatchInterrupt((__COMMON_OBJECT*)&System, lpEsp, ucVector); }
static void btu_exec_tap_fd_read(void *p_param) { struct pollfd ufd; int fd = (int)p_param; if (fd == -1 || fd != btpan_cb.tap_fd) return; // Don't occupy BTU context too long, avoid GKI buffer overruns and // give other profiles a chance to run by limiting the amount of memory // PAN can use from the shared pool buffer. for(int i = 0; i < PAN_POOL_MAX && btif_is_enabled() && btpan_cb.flow; i++) { BT_HDR *buffer = (BT_HDR *)GKI_getpoolbuf(PAN_POOL_ID); if (!buffer) { BTIF_TRACE_WARNING("%s unable to allocate buffer for packet.", __func__); break; } buffer->offset = PAN_MINIMUM_OFFSET; buffer->len = GKI_get_buf_size(buffer) - sizeof(BT_HDR) - buffer->offset; UINT8 *packet = (UINT8 *)buffer + sizeof(BT_HDR) + buffer->offset; // If we don't have an undelivered packet left over, pull one from the TAP driver. // We save it in the congest_packet right away in case we can't deliver it in this // attempt. if (!btpan_cb.congest_packet_size) { ssize_t ret = read(fd, btpan_cb.congest_packet, sizeof(btpan_cb.congest_packet)); switch (ret) { case -1: BTIF_TRACE_ERROR("%s unable to read from driver: %s", __func__, strerror(errno)); GKI_freebuf(buffer); return; case 0: BTIF_TRACE_WARNING("%s end of file reached.", __func__); GKI_freebuf(buffer); return; default: btpan_cb.congest_packet_size = ret; break; } } memcpy(packet, btpan_cb.congest_packet, MIN(btpan_cb.congest_packet_size, buffer->len)); buffer->len = MIN(btpan_cb.congest_packet_size, buffer->len); if (buffer->len > sizeof(tETH_HDR) && should_forward((tETH_HDR *)packet)) { // Extract the ethernet header from the buffer since the PAN_WriteBuf inside // forward_bnep can't handle two pointers that point inside the same GKI buffer. tETH_HDR hdr; memcpy(&hdr, packet, sizeof(tETH_HDR)); // Skip the ethernet header. buffer->len -= sizeof(tETH_HDR); buffer->offset += sizeof(tETH_HDR); if (forward_bnep(&hdr, buffer) != FORWARD_CONGEST) btpan_cb.congest_packet_size = 0; } else { BTIF_TRACE_WARNING("%s dropping packet of length %d", __func__, buffer->len); btpan_cb.congest_packet_size = 0; GKI_freebuf(buffer); } // Bail out of the loop if reading from the TAP fd would block. ufd.fd = fd; ufd.events = POLLIN; ufd.revents = 0; if(poll(&ufd, 1, 0) <= 0 || IS_EXCEPTION(ufd.revents)) { btsock_thread_add_fd(pan_pth, fd, 0, SOCK_THREAD_FD_RD, 0); return; } } }