void write_trigger_poll(rpc_comm_t *comm, poll_job_t* pjob, buf_t *buf, msgid_t msgid, uint8_t *data, size_t sz_data) { apr_thread_mutex_lock(comm->mx); // apr_atomic_add32(&sz_data_tosend_, sizeof(funid_t) + sz_buf + sizeof(size_t)); // apr_atomic_inc32(&n_data_sent_); // realloc the write buf if not enough. buf_readjust(buf, sz_data + SZ_SZMSG + SZ_MSGID); // copy memory LOG_TRACE("add message to sending buffer, message size: %d, message type: %x", (int32_t)sz_data, (int32_t)msgid); // LOG_TRACE("size in buf:%llx, original size:%llx", // *(ctx->buf_send.buf + ctx->buf_send.offset_end), sz_buf + sizeof(funid_t)); size_t n = SZ_SZMSG; SAFE_ASSERT(buf_write(buf, (uint8_t*)&sz_data, n) == n); SAFE_ASSERT(msgid != 0); // message id cannot be zero n = SZ_MSGID; SAFE_ASSERT(buf_write(buf, (uint8_t*)&msgid, n) == n); n = sz_data; SAFE_ASSERT(buf_write(buf, (uint8_t*)data, n) == n); // change poll type poll_mgr_update_job(pjob->mgr, pjob, APR_POLLIN | APR_POLLOUT); apr_thread_mutex_unlock(comm->mx); }
static size_t wsgi_getheaders(Request* request, PyObject* buf) { char* bufp = PyString_AS_STRING(buf); Py_ssize_t i; #define buf_write(src, len) \ do { \ size_t n = len; \ const char* s = src; \ while(n--) *bufp++ = *s++; \ } while(0) #define buf_write2(src) buf_write(src, strlen(src)) buf_write2("HTTP/1.1 "); buf_write(PyString_AS_STRING(request->status), PyString_GET_SIZE(request->status)); for(i=0; i<PyList_GET_SIZE(request->headers); ++i) { PyObject *tuple = PyList_GET_ITEM(request->headers, i); PyObject *field = PyTuple_GET_ITEM(tuple, 0), *value = PyTuple_GET_ITEM(tuple, 1); buf_write2("\r\n"); buf_write(PyString_AS_STRING(field), PyString_GET_SIZE(field)); buf_write2(": "); buf_write(PyString_AS_STRING(value), PyString_GET_SIZE(value)); } if(request->state.chunked_response) buf_write2("\r\nTransfer-Encoding: chunked"); buf_write2("\r\n\r\n"); return bufp - PyString_AS_STRING(buf); }
void write_data(fsinfo_t *fsopts, fsnode *node, unsigned char *buf, size_t len, uint32_t ofs) { struct chfs_flash_data_node fdata; memset(&fdata, 0, sizeof(fdata)); if (len == 0) { return; } pad_block_if_less_than(fsopts, sizeof(fdata) + len); fdata.magic = htole16(CHFS_FS_MAGIC_BITMASK); fdata.type = htole16(CHFS_NODETYPE_DATA); fdata.length = htole32(CHFS_PAD(sizeof(fdata) + len)); fdata.hdr_crc = htole32(crc32(0, (uint8_t *)&fdata, CHFS_NODE_HDR_SIZE - 4)); fdata.vno = htole64(node->inode->ino); fdata.data_length = htole32(len); fdata.offset = htole32(ofs); fdata.data_crc = htole32(crc32(0, (uint8_t *)buf, len)); fdata.node_crc = htole32(crc32(0, (uint8_t *)&fdata, sizeof(fdata) - 4)); buf_write(fsopts, &fdata, sizeof(fdata)); buf_write(fsopts, buf, len); padword(fsopts); }
void write_dirent(fsinfo_t *fsopts, fsnode *node) { struct chfs_flash_dirent_node fdirent; char *name; name = emalloc(strlen(node->name)); memcpy(name, node->name, strlen(node->name)); memset(&fdirent, 0, sizeof(fdirent)); fdirent.magic = htole16(CHFS_FS_MAGIC_BITMASK); fdirent.type = htole16(CHFS_NODETYPE_DIRENT); fdirent.length = htole32(CHFS_PAD(sizeof(fdirent) + strlen(name))); fdirent.hdr_crc = htole32(crc32(0, (uint8_t *)&fdirent, CHFS_NODE_HDR_SIZE - 4)); fdirent.vno = htole64(node->inode->ino); if (node->parent != NULL) { fdirent.pvno = htole64(node->parent->inode->ino); } else { fdirent.pvno = htole64(node->inode->ino); } fdirent.version = htole64(version++); fdirent.mctime = 0; fdirent.nsize = htole32(strlen(name)); fdirent.dtype = htole32(IFTOCHT(node->type & S_IFMT)); fdirent.name_crc = htole32(crc32(0, (uint8_t *)name, fdirent.nsize)); fdirent.node_crc = htole32(crc32(0, (uint8_t *)&fdirent, sizeof(fdirent) - 4)); pad_block_if_less_than(fsopts, sizeof(fdirent) + fdirent.nsize); buf_write(fsopts, &fdirent, sizeof(fdirent)); buf_write(fsopts, name, fdirent.nsize); padword(fsopts); }
END_TEST START_TEST(test_integer) { #define OVERSIZE ":19223372036854775807\r\n" #define INVALID1 ":123lOl456\r\n" #define INVALID2 ":\r\n" struct element el_c, el_p; int ret; struct int_pair { char *serialized; uint64_t num; } pairs[3] = { {":-1\r\n", -1}, {":9223372036854775807\r\n", 9223372036854775807}, {":128\r\n", 128} }; test_reset(); for (int i = 0; i < 3; i++) { size_t len = strlen(pairs[i].serialized); buf_reset(buf); el_c.type = ELEM_INT; el_c.num = pairs[i].num; ret = compose_element(&buf, &el_c); ck_assert(ret == len); ck_assert_int_eq(cc_bcmp(buf->rpos, pairs[i].serialized, len), 0); el_p.type = ELEM_UNKNOWN; ret = parse_element(&el_p, buf); ck_assert_int_eq(ret, PARSE_OK); ck_assert(buf->rpos == buf->wpos); ck_assert(el_p.type == ELEM_INT); ck_assert(el_p.num == pairs[i].num); } buf_reset(buf); buf_write(buf, OVERSIZE, sizeof(OVERSIZE) - 1); ret = parse_element(&el_p, buf); ck_assert_int_eq(ret, PARSE_EOVERSIZE); buf_reset(buf); buf_write(buf, INVALID1, sizeof(INVALID1) - 1); ret = parse_element(&el_p, buf); ck_assert_int_eq(ret, PARSE_EINVALID); buf_reset(buf); buf_write(buf, INVALID2, sizeof(INVALID2) - 1); ret = parse_element(&el_p, buf); ck_assert_int_eq(ret, PARSE_EINVALID); #undef INVALID2 #undef INVALID1 #undef OVERSIZE }
static bool send_line_crlf (socket_descriptor_t sd, const char *src) { bool ret; struct buffer buf = alloc_buf (strlen (src) + 3); ASSERT (buf_write (&buf, src, strlen (src))); ASSERT (buf_write (&buf, "\r\n", 3)); ret = send_line (sd, BSTR (&buf)); free_buf (&buf); return ret; }
static void adc_int_handler() { elua_adc_dev_state *d = adc_get_dev_state( 0 ); elua_adc_ch_state *s = d->ch_state[ d->seq_ctr ]; u32 tmp, dreg_t; tmp = AD0STAT; // Clear interrupt flag //AD0INTEN = 0; // Disable generating interrupts dreg_t = *( PREG )adc_dr[ s->id ]; if ( dreg_t & ( 1UL << 31 ) ) { d->sample_buf[ d->seq_ctr ] = ( u16 )( ( dreg_t >> 6 ) & 0x3FF ); AD0CR &= 0xF8FFFF00; // stop ADC, disable channels s->value_fresh = 1; if ( s->logsmoothlen > 0 && s->smooth_ready == 0) adc_smooth_data( s->id ); #if defined( BUF_ENABLE_ADC ) else if ( s->reqsamples > 1 ) { buf_write( BUF_ID_ADC, s->id, ( t_buf_data* )s->value_ptr ); s->value_fresh = 0; } #endif if ( adc_samples_available( s->id ) >= s->reqsamples && s->freerunning == 0 ) { platform_adc_stop( s->id ); } }
static void padword(fsinfo_t *fsopts) { if (img_ofs % 4) { buf_write(fsopts, ffbuf, 4 - img_ofs % 4); } }
void write_vnode(fsinfo_t *fsopts, fsnode *node) { struct chfs_flash_vnode fvnode; memset(&fvnode, 0, sizeof(fvnode)); fvnode.magic = htole16(CHFS_FS_MAGIC_BITMASK); fvnode.type = htole16(CHFS_NODETYPE_VNODE); fvnode.length = htole32(CHFS_PAD(sizeof(fvnode))); fvnode.hdr_crc = htole32(crc32(0, (uint8_t *)&fvnode, CHFS_NODE_HDR_SIZE - 4)); fvnode.vno = htole64(node->inode->ino); fvnode.version = htole64(version++); fvnode.mode = htole32(node->inode->st.st_mode); fvnode.dn_size = htole32(node->inode->st.st_size); fvnode.atime = htole32(node->inode->st.st_atime); fvnode.ctime = htole32(node->inode->st.st_ctime); fvnode.mtime = htole32(node->inode->st.st_mtime); fvnode.gid = htole32(node->inode->st.st_uid); fvnode.uid = htole32(node->inode->st.st_gid); fvnode.node_crc = htole32(crc32(0, (uint8_t *)&fvnode, sizeof(fvnode) - 4)); pad_block_if_less_than(fsopts, sizeof(fvnode)); buf_write(fsopts, &fvnode, sizeof(fvnode)); padword(fsopts); }
int pgpdb_close(KEYRING *keydb) { int err = 0; if (keydb->modified) { FILE *f; #ifdef DEBUG assert(keydb->writer); #endif if (keydb->encryptkey && keydb->encryptkey->length) pgp_encrypt(PGP_NCONVENTIONAL | PGP_NOARMOR, keydb->db, keydb->encryptkey, NULL, NULL, NULL, NULL); assert(keydb->type == PGP_TYPE_PRIVATE || keydb->type == PGP_TYPE_PUBLIC); if (keydb->filetype == ARMORED) pgp_armor(keydb->db, keydb->type == PGP_TYPE_PUBLIC ? PGP_ARMOR_KEY : PGP_ARMOR_SECKEY); if (keydb->filetype == -1 || (f = mix_openfile(keydb->filename, keydb->filetype == ARMORED ? "w" : "wb")) == NULL) err = -1; else { err = buf_write(keydb->db, f); fclose(f); } } if (keydb->lock) unlockfile(keydb->lock); if (keydb->encryptkey) buf_free(keydb->encryptkey); buf_free(keydb->db); free(keydb); return (err); }
bool buf_assign (struct buffer *dest, const struct buffer *src) { if (!buf_init (dest, src->offset)) return false; return buf_write (dest, BPTR (src), BLEN (src)); }
bool crypto_pem_encode(const char *name, struct buffer *dst, const struct buffer *src, struct gc_arena *gc) { bool ret = false; BIO *bio = BIO_new(BIO_s_mem()); if (!bio || !PEM_write_bio(bio, name, "", BPTR(src), BLEN(src))) { ret = false; goto cleanup; } BUF_MEM *bptr; BIO_get_mem_ptr(bio, &bptr); *dst = alloc_buf_gc(bptr->length, gc); ASSERT(buf_write(dst, bptr->data, bptr->length)); ret = true; cleanup: if (!BIO_free(bio)) { ret = false;; } return ret; }
void write_eb_header(fsinfo_t *fsopts) { chfs_opt_t *opts; struct chfs_eb_hdr ebhdr; char *buf; opts = fsopts->fs_specific; #define MINSIZE MAX(MAX(CHFS_EB_EC_HDR_SIZE, CHFS_EB_HDR_NOR_SIZE), \ CHFS_EB_HDR_NAND_SIZE) if ((uint32_t)opts->pagesize < MINSIZE) errx(EXIT_FAILURE, "pagesize cannot be less than %zu", MINSIZE); buf = emalloc(opts->pagesize); ebhdr.ec_hdr.magic = htole32(CHFS_MAGIC_BITMASK); ebhdr.ec_hdr.erase_cnt = htole32(1); ebhdr.ec_hdr.crc_ec = htole32(crc32(0, (uint8_t *)&ebhdr.ec_hdr + 8, 4)); memcpy(buf, &ebhdr.ec_hdr, CHFS_EB_EC_HDR_SIZE); memset(buf + CHFS_EB_EC_HDR_SIZE, 0xFF, opts->pagesize - CHFS_EB_EC_HDR_SIZE); buf_write(fsopts, buf, opts->pagesize); memset(buf, 0xFF, opts->pagesize); if (opts->mediatype == TYPE_NAND) { ebhdr.u.nand_hdr.lid = htole32(lebnumber++); ebhdr.u.nand_hdr.serial = htole64(++(max_serial)); ebhdr.u.nand_hdr.crc = htole32(crc32(0, (uint8_t *)&ebhdr.u.nand_hdr + 4, CHFS_EB_HDR_NAND_SIZE - 4)); memcpy(buf, &ebhdr.u.nand_hdr, CHFS_EB_HDR_NAND_SIZE); } else { ebhdr.u.nor_hdr.lid = htole32(lebnumber++); ebhdr.u.nor_hdr.crc = htole32(crc32(0, (uint8_t *)&ebhdr.u.nor_hdr + 4, CHFS_EB_HDR_NOR_SIZE - 4)); memcpy(buf, &ebhdr.u.nor_hdr, CHFS_EB_HDR_NOR_SIZE); } buf_write(fsopts, buf, opts->pagesize); free(buf); }
static size_t wsgi_getheaders(Request* request, PyObject* buf) { char* bufp = PyString_AS_STRING(buf); #define buf_write(src, len) \ do { \ size_t n = len; \ const char* s = src; \ while(n--) *bufp++ = *s++; \ } while(0) #define buf_write2(src) buf_write(src, strlen(src)) /* First line, e.g. "HTTP/1.1 200 Ok" */ buf_write2("HTTP/1.1 "); buf_write(PyString_AS_STRING(request->status), PyString_GET_SIZE(request->status)); /* Headers, from the `request->headers` mapping. * [("Header1", "value1"), ("Header2", "value2")] * --> "Header1: value1\r\nHeader2: value2" */ for(Py_ssize_t i=0; i<PyList_GET_SIZE(request->headers); ++i) { PyObject *tuple = PyList_GET_ITEM(request->headers, i); PyObject *field = PyTuple_GET_ITEM(tuple, 0), *value = PyTuple_GET_ITEM(tuple, 1); buf_write2("\r\n"); buf_write(PyString_AS_STRING(field), PyString_GET_SIZE(field)); buf_write2(": "); buf_write(PyString_AS_STRING(value), PyString_GET_SIZE(value)); } /* See `wsgi_call_application` */ if(request->state.keep_alive) { buf_write2("\r\nConnection: Keep-Alive"); if(request->state.chunked_response) { buf_write2("\r\nTransfer-Encoding: chunked"); } } else { buf_write2("\r\nConnection: close"); } buf_write2("\r\n\r\n"); return bufp - PyString_AS_STRING(buf); }
void padblock(fsinfo_t *fsopts) { chfs_opt_t *chfs_opts = fsopts->fs_specific; while (img_ofs % chfs_opts->eraseblock) { buf_write(fsopts, ffbuf, MIN(sizeof(ffbuf), chfs_opts->eraseblock - (img_ofs % chfs_opts->eraseblock))); } }
/* given a key and key_type, write key to buffer */ bool write_key (const struct key *key, const struct key_type *kt, struct buffer *buf) { ASSERT (kt->cipher_length <= MAX_CIPHER_KEY_LENGTH && kt->hmac_length <= MAX_HMAC_KEY_LENGTH); if (!buf_write (buf, &kt->cipher_length, 1)) return false; if (!buf_write (buf, &kt->hmac_length, 1)) return false; if (!buf_write (buf, key->cipher, kt->cipher_length)) return false; if (!buf_write (buf, key->hmac, kt->hmac_length)) return false; return true; }
static bool tls_crypt_v2_wrap_client_key(struct buffer *wkc, const struct key2 *src_key, const struct buffer *src_metadata, struct key_ctx *server_key, struct gc_arena *gc) { cipher_ctx_t *cipher_ctx = server_key->cipher; struct buffer work = alloc_buf_gc(TLS_CRYPT_V2_MAX_WKC_LEN + cipher_ctx_block_size(cipher_ctx), gc); /* Calculate auth tag and synthetic IV */ uint8_t *tag = buf_write_alloc(&work, TLS_CRYPT_TAG_SIZE); if (!tag) { msg(M_WARN, "ERROR: could not write tag"); return false; } uint16_t net_len = htons(sizeof(src_key->keys) + BLEN(src_metadata) + TLS_CRYPT_V2_TAG_SIZE + sizeof(uint16_t)); hmac_ctx_t *hmac_ctx = server_key->hmac; hmac_ctx_reset(hmac_ctx); hmac_ctx_update(hmac_ctx, (void *)&net_len, sizeof(net_len)); hmac_ctx_update(hmac_ctx, (void *)src_key->keys, sizeof(src_key->keys)); hmac_ctx_update(hmac_ctx, BPTR(src_metadata), BLEN(src_metadata)); hmac_ctx_final(hmac_ctx, tag); dmsg(D_CRYPTO_DEBUG, "TLS-CRYPT WRAP TAG: %s", format_hex(tag, TLS_CRYPT_TAG_SIZE, 0, gc)); /* Use the 128 most significant bits of the tag as IV */ ASSERT(cipher_ctx_reset(cipher_ctx, tag)); /* Overflow check (OpenSSL requires an extra block in the dst buffer) */ if (buf_forward_capacity(&work) < (sizeof(src_key->keys) + BLEN(src_metadata) + sizeof(net_len) + cipher_ctx_block_size(cipher_ctx))) { msg(M_WARN, "ERROR: could not crypt: insufficient space in dst"); return false; } /* Encrypt */ int outlen = 0; ASSERT(cipher_ctx_update(cipher_ctx, BEND(&work), &outlen, (void *)src_key->keys, sizeof(src_key->keys))); ASSERT(buf_inc_len(&work, outlen)); ASSERT(cipher_ctx_update(cipher_ctx, BEND(&work), &outlen, BPTR(src_metadata), BLEN(src_metadata))); ASSERT(buf_inc_len(&work, outlen)); ASSERT(cipher_ctx_final(cipher_ctx, BEND(&work), &outlen)); ASSERT(buf_inc_len(&work, outlen)); ASSERT(buf_write(&work, &net_len, sizeof(net_len))); return buf_copy(wkc, &work); }
TEST write_handles_failed_flush(void) { char b[] = "hello$"; enum status s = buf_init(&h, buffer, sizeof(buffer), term, TERM_LEN, test_bad_write, flush_buffer); ASSERT_EQ(status_ok, s); ASSERT_EQ(status_err, buf_write(&h, b, sizeof(b))); PASS(); }
static char *get_caller_list() { Buffer *b = make_buffer(); for (int i = 0; i < vec_len(functions); i++) { if (i > 0) buf_printf(b, " -> "); buf_printf(b, "%s", vec_get(functions, i)); } buf_write(b, '\0'); return buf_body(b); }
END_TEST START_TEST(test_unfin_token) { char *token[13] = { "+hello ", "-err", "-err\r", ":5", ":5\r", "$5", "$5\r", "$5\r\n", "$5\r\nabc", "$5\r\nabcde\r", "*5", "*5\r", }; char *pos; size_t len; for (int i = 0; i < 10; i++) { struct element el; len = strlen(token[i]); buf_reset(buf); buf_write(buf, token[i], len); pos = buf->rpos; ck_assert_int_eq(parse_element(&el, buf), PARSE_EUNFIN); ck_assert(buf->rpos == pos); } for (int i = 10; i < 12; i++) { int64_t nelem; len = strlen(token[i]); buf_reset(buf); buf_write(buf, token[i], len); pos = buf->rpos; ck_assert_int_eq(token_array_nelem(&nelem, buf), PARSE_EUNFIN); ck_assert(buf->rpos == pos); } }
void USART1_IRQHandler(void) { int c; if(USART_GetITStatus(USART1, USART_IT_RXNE) != RESET) { /* Read one byte from the receive data register */ c = USART_ReceiveData(USART1); buf_write( BUF_ID_UART, CON_UART_ID, ( t_buf_data* )&c ); } }
static int reg_write(struct dumperinfo *di, vm_paddr_t pa, vm_size_t size) { struct sparc64_dump_reg r; r.dr_pa = pa; r.dr_size = size; r.dr_offs = dumppos; dumppos += size; return (buf_write(di, (char *)&r, sizeof(r))); }
static void all_usart_irqhandler( int usart_id ) { int c; if( USART_GetITStatus( usart[ usart_id ], USART_IT_RXNE ) != RESET ) { /* Read one byte from the receive data register */ c = USART_ReceiveData( usart[ usart_id ] ); buf_write( BUF_ID_UART, usart_id, ( t_buf_data* )&c ); } }
static size_t wsgi_getheaders(Request* request, PyObject* buf) { char* bufp = PyString_AS_STRING(buf); int have_http11 = (request->parser.parser.http_major > 0 && request->parser.parser.http_minor > 0); #define buf_write(src, len) \ do { \ size_t n = len; \ const char* s = src; \ while(n--) *bufp++ = *s++; \ } while(0) #define buf_write2(src) buf_write(src, strlen(src)) buf_write2("HTTP/1.1 "); buf_write(PyString_AS_STRING(request->status), PyString_GET_SIZE(request->status)); for(Py_ssize_t i=0; i<PyList_GET_SIZE(request->headers); ++i) { PyObject *tuple = PyList_GET_ITEM(request->headers, i); PyObject *field = PyTuple_GET_ITEM(tuple, 0), *value = PyTuple_GET_ITEM(tuple, 1); buf_write2("\r\n"); buf_write(PyString_AS_STRING(field), PyString_GET_SIZE(field)); buf_write2(": "); buf_write(PyString_AS_STRING(value), PyString_GET_SIZE(value)); } if(!have_http11) { if(request->state.chunked_response) /* Can't do chunked with HTTP 1.0 */ buf_write2("\r\nConnection: close"); else if (request->state.keep_alive) /* Need to be explicit with HTTP 1.0 */ buf_write2("\r\nConnection: Keep-Alive"); } else if(request->state.chunked_response) buf_write2("\r\nTransfer-Encoding: chunked"); buf_write2("\r\n\r\n"); return bufp - PyString_AS_STRING(buf); }
/* removing all acknowledged entries from ack */ bool reliable_ack_write(struct reliable_ack *ack, struct buffer *buf, const struct session_id *sid, int max, bool prepend) { int i, j; uint8_t n; struct buffer sub; n = ack->len; if (n > max) { n = max; } sub = buf_sub(buf, ACK_SIZE(n), prepend); if (!BDEF(&sub)) { goto error; } ASSERT(buf_write(&sub, &n, sizeof(n))); for (i = 0; i < n; ++i) { packet_id_type pid = ack->packet_id[i]; packet_id_type net_pid = htonpid(pid); ASSERT(buf_write(&sub, &net_pid, sizeof(net_pid))); dmsg(D_REL_DEBUG, "ACK write ID " packet_id_format " (ack->len=%d, n=%d)", (packet_id_print_type)pid, ack->len, n); } if (n) { ASSERT(session_id_defined(sid)); ASSERT(session_id_write(sid, &sub)); for (i = 0, j = n; j < ack->len; ) ack->packet_id[i++] = ack->packet_id[j++]; ack->len = i; } return true; error: return false; }
// Handle ADC interrupts // NOTE: This could probably be less complicated... void ADC_IRQHandler(void) { elua_adc_dev_state *d = adc_get_dev_state( 0 ); elua_adc_ch_state *s = d->ch_state[ d->seq_ctr ]; //int i; // Disable sampling & current sequence channel ADC_StartCmd( LPC_ADC, 0 ); ADC_ChannelCmd( LPC_ADC, s->id, DISABLE ); ADC_IntConfig( LPC_ADC, s->id, DISABLE ); if ( ADC_ChannelGetStatus( LPC_ADC, s->id, ADC_DATA_DONE ) ) { d->sample_buf[ d->seq_ctr ] = ( u16 )ADC_ChannelGetData( LPC_ADC, s->id ); s->value_fresh = 1; if ( s->logsmoothlen > 0 && s->smooth_ready == 0) adc_smooth_data( s->id ); #if defined( BUF_ENABLE_ADC ) else if ( s->reqsamples > 1 ) { buf_write( BUF_ID_ADC, s->id, ( t_buf_data* )s->value_ptr ); s->value_fresh = 0; } #endif if ( adc_samples_available( s->id ) >= s->reqsamples && s->freerunning == 0 ) platform_adc_stop( s->id ); } // Set up for next channel acquisition if we're still running if( d->running == 1 ) { // Prep next channel in sequence, if applicable if( d->seq_ctr < ( d->seq_len - 1 ) ) d->seq_ctr++; else if( d->seq_ctr == ( d->seq_len - 1 ) ) { adc_update_dev_sequence( 0 ); d->seq_ctr = 0; // reset sequence counter if on last sequence entry } ADC_ChannelCmd( LPC_ADC, d->ch_state[ d->seq_ctr ]->id, ENABLE ); ADC_IntConfig( LPC_ADC, d->ch_state[ d->seq_ctr ]->id, ENABLE ); if( d->clocked == 1 && d->seq_ctr == 0 ) // always use clock for first in clocked sequence ADC_StartCmd( LPC_ADC, adc_trig[ d->timer_id ] ); // Start next conversion if unclocked or if clocked and sequence index > 0 if( ( d->clocked == 1 && d->seq_ctr > 0 ) || d->clocked == 0 ) ADC_StartCmd( LPC_ADC, ADC_START_NOW ); } }
void savemsg(BUFFER *message) { char savename[PATHMAX]; FILE *f; askfilename(savename); f = fopen(savename, "a"); if (f != NULL) { buf_write(message, f); fclose(f); } }
static void cmn_rx_handler( int usart_id, u8 data ) { #ifdef BUILD_SERMUX if( usart_id == SERMUX_PHYS_ID ) { if( data != SERMUX_ESCAPE_CHAR ) { if( ( data >= SERMUX_SERVICE_ID_FIRST ) && data < ( SERMUX_SERVICE_ID_FIRST + SERMUX_NUM_VUART ) ) uart_service_id_in = data; else if( ( data == SERMUX_FORCE_SID_CHAR ) && ( uart_last_sent != -1 ) ) { // Retransmit service ID and last char platform_s_uart_send( SERMUX_PHYS_ID, uart_service_id_out ); if( uart_last_sent & SERMUX_ESC_MASK ) platform_s_uart_send( SERMUX_PHYS_ID, SERMUX_ESCAPE_CHAR ); platform_s_uart_send( SERMUX_PHYS_ID, uart_last_sent & 0xFF ); uart_last_sent = -1; } else { // Check for an escaped char if( uart_got_esc ) { data ^= SERMUX_ESCAPE_XOR_MASK; uart_got_esc = 0; } if( uart_service_id_in == -1 ) // request full restransmit if needed platform_s_uart_send( SERMUX_PHYS_ID, SERMUX_FORCE_SID_CHAR ); else buf_write( BUF_ID_UART, uart_service_id_in, ( t_buf_data* )&data ); } } else uart_got_esc = 1; } else #endif // #ifdef BUILD_SERMUX buf_write( BUF_ID_UART, usart_id, ( t_buf_data* )&data ); }
/* * Read from data to device */ uint8_t telnet_send(uint64_t addr, uint8_t *data, uint8_t num) { struct buf *buf; uint8_t i = 0; if (num > 0 && data != NULL) { /* Copy data into write buffer. */ buf = &telnet_data.writebuf; i = buf_write(buf, data, num); } return i; }
void UARTIntHandler() { u32 temp; int c; temp = MAP_UARTIntStatus(uart_base[ CON_UART_ID ], true); MAP_UARTIntClear(uart_base[ CON_UART_ID ], temp); while( MAP_UARTCharsAvail( uart_base[ CON_UART_ID ] ) ) { c = MAP_UARTCharGetNonBlocking( uart_base[ CON_UART_ID ] ); buf_write( BUF_ID_UART, CON_UART_ID, ( t_buf_data* )&c ); } }