ISR(USART1_RX_vect, ISR_BLOCK) { uint8_t receivedByte; if (ringbuf_elements(&USARTtoUSB_Buffer) >= ringbuf_size(&USARTtoUSB_Buffer) - 1 ) return; receivedByte = UDR1; // TODO check for callback Code // set var if waiting for normal reply. // if not awaiting normal reply and receivedByte == 42 call callback. if(!jennic_in_programming_mode && opCode < 0){ if(receivedByte == 42){ callback(); return; } opCode = receivedByte; return; } // removed if condition cause we need the serial in even if theres no USB Connection // if (USB_DeviceState == DEVICE_STATE_Configured) { ringbuf_put(&USARTtoUSB_Buffer, receivedByte); // } }
void ringbuf_dbg(const struct ringbuf* rb) { struct iovec iov[2]; ringbuf_readable_iov(rb, iov, ringbuf_size(rb)); iovec_dbg(iov, ARRAYSIZE(iov)); }
void CDC_In_Task() { /* Read bytes from the USB OUT endpoint and transmit to jennic if programming mode */ if ( ringbuf_elements(&USBtoUSART_Buffer) < ringbuf_size(&USBtoUSART_Buffer)-2 ) { // TODO check int16_t type int16_t ReceivedByte = CDC_Device_ReceiveByte(&VirtualSerial_CDC1_Interface); if ( !(ReceivedByte < 0) ) ringbuf_put(&USBtoUSART_Buffer, ReceivedByte); } }
static size_t channel_wanted_writesz(struct channel* c) { if (c->dir != CHANNEL_TO_FD) return 0; if (c->fdh == NULL) return 0; return XMIN(ringbuf_size(c->rb), UINT32_MAX - c->bytes_written); }
static size_t gaussian_size(const size_t n) { size_t size = 0; size += sizeof(gaussian_state_t); size += n * sizeof(gaussian_type_t); size += ringbuf_size(n); return size; }
void CDC_Arduino_In_Task() { uint16_t bytes = CDC_Device_BytesReceived(&VirtualSerial_CDC0_Interface); while(bytes--){ /* Read bytes from the USB OUT endpoint and store it for the Arduino Serial Class */ if ( ringbuf_elements(&serialRx_Buffer) < ringbuf_size(&serialRx_Buffer)-2 ) { int16_t ReceivedByte = CDC_Device_ReceiveByte(&VirtualSerial_CDC0_Interface); if ( !(ReceivedByte < 0) ) ringbuf_put(&serialRx_Buffer, ReceivedByte); } else{ return; } } // end while }
void dbgch(const char* label, struct channel** ch, unsigned nrch) { SCOPED_RESLIST(rl_dbgch); unsigned chno; dbglock(); dbg("DBGCH[%s]", label); for (chno = 0; chno < nrch; ++chno) { struct channel* c = ch[chno]; struct pollfd p = channel_request_poll(ch[chno]); const char* pev; switch (p.events) { case POLLIN | POLLOUT: pev = "POLLIN,POLLOUT"; break; case POLLIN: pev = "POLLIN"; break; case POLLOUT: pev = "POLLOUT"; break; case 0: pev = "NONE"; break; default: pev = xaprintf("%xd", p.events); break; } assert(p.fd == -1 || p.fd == c->fdh->fd); dbg(" %-18s size:%-4zu room:%-4zu window:%-4d %s%-2s %p %s", xaprintf("ch[%d=%s]", chno, chname(chno)), ringbuf_size(c->rb), ringbuf_room(c->rb), c->window, (c->dir == CHANNEL_FROM_FD ? "<" : ">"), ((c->fdh != NULL) ? xaprintf("%d", c->fdh->fd) : (c->sent_eof ? "!!" : "!?")), c, pev); } }
struct pollfd channel_request_poll(struct channel* c) { if (channel_wanted_readsz(c)) return (struct pollfd){c->fdh->fd, POLLIN, 0}; if (channel_wanted_writesz(c)) return (struct pollfd){c->fdh->fd, POLLOUT, 0}; return (struct pollfd){-1, 0, 0}; } void channel_write(struct channel* c, const struct iovec* iov, unsigned nio) { assert(c->dir == CHANNEL_TO_FD); if (c->fdh == NULL) return; // If the stream is closed, just discard bool try_direct = !c->always_buffer && ringbuf_size(c->rb) == 0; size_t directwrsz = 0; size_t totalsz; if (c->adb_encoding_hack) try_direct = false; // If writing directly, would make us overflow the write counter, // fall back to buffered IO. if (try_direct) { totalsz = iovec_sum(iov, nio); if (c->track_bytes_written && UINT32_MAX - c->bytes_written < totalsz) { try_direct = false; } } if (try_direct) { // If writev fails, just fall back to buffering path directwrsz = XMAX(writev(c->fdh->fd, iov, nio), 0); if (c->track_bytes_written) c->bytes_written += directwrsz; } for (unsigned i = 0; i < nio; ++i) { size_t skip = XMIN(iov[i].iov_len, directwrsz); directwrsz -= skip; char* b = (char*)iov[i].iov_base + skip; size_t blen = iov[i].iov_len - skip; ringbuf_copy_in(c->rb, b, blen); ringbuf_note_added(c->rb, blen); } } // Begin channel shutdown process. Closure is not complete until // channel_dead_p(c) returns true. void channel_close(struct channel* c) { c->pending_close = true; if (c->fdh != NULL && ((c->dir == CHANNEL_TO_FD && ringbuf_size(c->rb) == 0) || c->dir == CHANNEL_FROM_FD)) { fdh_destroy(c->fdh); c->fdh = NULL; } } static void poll_channel_1(void* arg) { struct channel* c = arg; size_t sz; if ((sz = channel_wanted_readsz(c)) > 0) { size_t nr_read; if (c->adb_encoding_hack) nr_read = channel_read_adb_hack(c, sz); else nr_read = channel_read_1(c, sz); assert(nr_read <= c->window); if (c->track_window) c->window -= nr_read; if (nr_read == 0) channel_close(c); } if ((sz = channel_wanted_writesz(c)) > 0) { size_t nr_written; if (c->adb_encoding_hack) nr_written = channel_write_adb_hack(c, sz); else nr_written = channel_write_1(c, sz); assert(nr_written <= UINT32_MAX - c->bytes_written); if (c->track_bytes_written) c->bytes_written += nr_written; if (c->pending_close && ringbuf_size(c->rb) == 0) channel_close(c); } } bool channel_dead_p(struct channel* c) { return (c->fdh == NULL && ringbuf_size(c->rb) == 0 && c->sent_eof == true); } void channel_poll(struct channel* c) { struct errinfo ei = { .want_msg = false }; if (catch_error(poll_channel_1, c, &ei) && ei.err != EINTR) { if (c->dir == CHANNEL_TO_FD) { // Error writing to fd, so purge buffered bytes we'll // never write. By purging, we also make the stream // appear writable (because now there's space available), // but any writes will actually go into a black hole. // This way, if somebody's blocked on being able to write // to this stream, he'll get unblocked. This behavior is // important when c is TO_PEER and lets us complete an // orderly shutdown, flushing any data we've buffered, // without adding special logic all over the place to // account for this situation. ringbuf_note_removed(c->rb, ringbuf_size(c->rb)); } channel_close(c); c->err = ei.err; } }
PROCESS_THREAD(xbee_process, ev, data) { static char buf[XBEE_PACKET_SIZE]; static int ptr = 0; static int rcvState = xbee_state_start; static int rcvCnt = 0; static unsigned char ourChksum = 0; static unsigned int msgLen = 0; PROCESS_BEGIN(); //PRINTF("xbee_process: started\n"); while(1) { //PRINTF("xbee_process: calling receiver callback\n"); /* This might look ugly since I can't use switch statements inside a protothread * state machine is used to handle fragmented packets from XBee module * (rare but can occur) */ if(rcvState == xbee_state_start) { xbee_getByte(); if(c == XBEE_DELIMITER) { rcvCnt++; rcvState = xbee_state_length; } } if(rcvState == xbee_state_length) { if(rcvCnt < 2) { xbee_getByte(); msgLen = (unsigned char)c << 8; rcvCnt++; } else { xbee_getByte(); msgLen += (unsigned char)c; rcvCnt++; rcvState = xbee_state_api_id; } } if(rcvState == xbee_state_api_id) { xbee_getByte(); if(c == XBEE_API_RX) { rcvCnt++; ourChksum += c; rcvState = xbee_state_metadata; } else { ptr = 0; rcvCnt = 0; ourChksum = 0; msgLen = 0; rcvState = xbee_state_start; } } if(rcvState == xbee_state_metadata) { /* ignore rf packet metadata for now */ while(ringbuf_size(&rxbuf) > 0 && rcvCnt < 8) { xbee_getByte(); // src address high ourChksum += c; rcvCnt++; } if(rcvCnt == 8) { rcvState = xbee_state_data; } } if(rcvState == xbee_state_data) { while(ringbuf_size(&rxbuf) > 0 && ((rcvCnt - 3) < msgLen)) { xbee_getByte(); // data byte ourChksum += c; buf[ptr++] = c; rcvCnt++; } if((rcvCnt - 3) == msgLen) { rcvState = xbee_state_checksum; } } if(rcvState == xbee_state_checksum) { xbee_getByte(); ourChksum += c; if(ourChksum == 0xFF) /* Checksum is ok */ { packetbuf_clear(); //packetbuf_set_attr(PACKETBUF_ATTR_TIMESTAMP, last_packet_timestamp); memcpy(packetbuf_dataptr(), buf, ptr); packetbuf_set_datalen(ptr); NETSTACK_RDC.input(); } else { ourChksum = 0; } // cleanup ptr = 0; rcvCnt = 0; ourChksum = 0; msgLen = 0; rcvState = xbee_state_start; } } PROCESS_END(); }
int IIS328DQ::ioctl(struct file *filp, int cmd, unsigned long arg) { switch (cmd) { case SENSORIOCSPOLLRATE: { switch (arg) { /* switching to manual polling */ case SENSOR_POLLRATE_MANUAL: stop(); _call_interval = 0; return OK; /* external signalling not supported */ case SENSOR_POLLRATE_EXTERNAL: /* zero would be bad */ case 0: return -EINVAL; /* set default/max polling rate */ case SENSOR_POLLRATE_MAX: case SENSOR_POLLRATE_DEFAULT: return ioctl(filp, SENSORIOCSPOLLRATE, IIS328DQ_DEFAULT_RATE); /* adjust to a legal polling interval in Hz */ default: { /* do we need to start internal polling? */ bool want_start = (_call_interval == 0); /* convert hz to hrt interval via microseconds */ unsigned ticks = 1000000 / arg; /* check against maximum sane rate */ if (ticks < 1000) return -EINVAL; /* update interval for next measurement */ /* XXX this is a bit shady, but no other way to adjust... */ _call_interval = ticks; /* adjust filters */ float cutoff_freq_hz = _accel_filter_x.get_cutoff_freq(); float sample_rate = 1.0e6f/ticks; set_driver_lowpass_filter(sample_rate, cutoff_freq_hz); /* if we need to start the poll state machine, do it */ if (want_start) start(); return OK; } } } case SENSORIOCGPOLLRATE: if (_call_interval == 0) return SENSOR_POLLRATE_MANUAL; return 1000000 / _call_interval; case SENSORIOCSQUEUEDEPTH: { /* lower bound is mandatory, upper bound is a sanity check */ if ((arg < 1) || (arg > 100)) return -EINVAL; irqstate_t flags = irqsave(); if (!ringbuf_resize(_reports, arg)) { irqrestore(flags); return -ENOMEM; } irqrestore(flags); return OK; } case SENSORIOCGQUEUEDEPTH: return ringbuf_size(_reports); case SENSORIOCRESET: reset(); return OK; case ACCELIOCSSAMPLERATE: return set_samplerate(arg, _accel_onchip_filter_bandwidth); case ACCELIOCGSAMPLERATE: return _accel_samplerate; case ACCELIOCSLOWPASS: { // set the software lowpass cut-off in Hz float cutoff_freq_hz = arg; float sample_rate = 1.0e6f / _call_interval; set_driver_lowpass_filter(sample_rate, cutoff_freq_hz); return OK; } case ACCELIOCGLOWPASS: return static_cast<int>(_accel_filter_x.get_cutoff_freq()); case ACCELIOCSSCALE: /* copy scale in */ memcpy(&_accel_scale, (struct accel_scale *) arg, sizeof(_accel_scale)); return OK; case ACCELIOCGSCALE: /* copy scale out */ memcpy((struct accel_scale *) arg, &_accel_scale, sizeof(_accel_scale)); return OK; case ACCELIOCSRANGE: /* arg should be in dps accel */ return set_range(arg); case ACCELIOCGRANGE: /* convert to m/s^2 and return rounded in G */ return (unsigned long)((_accel_range_m_s2)/IIS328DQ_ONE_G + 0.5f); case ACCELIOCSELFTEST: return self_test(); case ACCELIOCSHWLOWPASS: return set_samplerate(_accel_samplerate, arg); case ACCELIOCGHWLOWPASS: return _accel_onchip_filter_bandwidth;//set_samplerate函数中赋值 default: /* give it to the superclass */ return CDev::ioctl(filp, cmd, arg); } }