int pgp_rkeylist(REMAILER remailer[], int keyid[], int n) /* Step through all remailers and get keyid */ { BUFFER *userid; BUFFER *id; int i, err; userid = buf_new(); id = buf_new(); for (i = 1; i < n; i++) { buf_clear(userid); buf_setf(userid, "<%s>", remailer[i].addr); keyid[i]=0; if (remailer[i].flags.pgp) { buf_clear(id); err = pgpdb_getkey(PK_VERIFY, PGP_ANY, NULL, NULL, NULL, NULL, userid, NULL, id, NULL, NULL); if (id->length == 8) { /* printf("%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x %s\n", id->data[0], id->data[1], id->data[2], id->data[3], id->data[4], id->data[5], id->data[6], id->data[7], id->data[8], remailer[i].addr); */ keyid[i] = (((((id->data[4] << 8) + id->data[5]) << 8) + id->data[6]) << 8) + id->data[7]; } } } buf_free(userid); return (0); }
void case_buf_clear() { struct buf *buf1 = buf("test"); struct buf *buf2 = buf(NULL); assert(!buf_isempty(buf1)); assert(buf_isempty(buf2)); buf_clear(buf1); buf_clear(buf2); assert(buf_isempty(buf1)); assert(buf_isempty(buf2)); buf_free(buf1); buf_free(buf2); }
int pgp_latestkeys(BUFFER* outtxt, int algo) /* returns our latest key from pgpkey.txt in the buffer outtxt * with pgp key header, ascii armored * * Can probably be extended to do this for all keys if we pass * the keyring file and the userid * * IN: algo: PGP_ANY, PGP_ES_RSA, PGP_E_ELG, PGP_S_DSA * OUT: outtxt */ { int err = -1; long expires_found = 0, expires; BUFFER *key, *userid, *tmptxt; KEYRING *keys; key = buf_new(); userid = buf_new(); buf_sets(userid, REMAILERNAME); tmptxt = buf_new(); keys = pgpdb_open(PGPKEY, NULL, 0, PGP_TYPE_PUBLIC); if (keys != NULL) { while (pgpdb_getnext(keys, key, NULL, userid) != -1) { buf_clear(tmptxt); if (pgp_makekeyheader(PGP_PUBKEY, key, tmptxt, NULL, algo) == 0) { buf_rewind(key); pgp_getkey(PK_VERIFY, algo, NULL, NULL, &expires, key, NULL, NULL, NULL, NULL); if (expires == 0 || (expires_found <= expires)) { err = 0; buf_clear(outtxt); buf_appends(outtxt, "Type Bits/KeyID Date User ID\n"); buf_cat(outtxt, tmptxt); buf_nl(outtxt); pgp_armor(key, PGP_ARMOR_KEY); buf_cat(outtxt, key); buf_nl(outtxt); expires_found = expires; } } } pgpdb_close(keys); } buf_free(key); buf_free(userid); buf_free(tmptxt); return (err); }
void menu_refresh(void) { menu_t *temp; uint8_t i; if (currentPointer->parent) temp = (currentPointer->parent)->child; else temp = &menu1; for (i = 0; i != menu_index - lcd_row_pos; i++) { temp = temp->next; } buf_clear(); for (i = 0; i < LCD_ROWS; i++) { buf_locate(0, i); if (temp == currentPointer) buf_char(62); else buf_char(' '); buf_locate(2, i); buf_str(temp->name); temp = temp->next; if (!temp) break; } // lcd_refresh(); }
static void test_proto_control0(void *arg) { (void)arg; buf_t *buf = buf_new(); /* The only remaining function for the v0 control protocol is the function that detects whether the user has stumbled across an old controller that's using it. The format was: u16 length; u16 command; u8 body[length]; */ /* Empty buffer -- nothing to do. */ tt_int_op(0, OP_EQ, peek_buf_has_control0_command(buf)); /* 3 chars in buf -- can't tell */ buf_add(buf, "AUT", 3); tt_int_op(0, OP_EQ, peek_buf_has_control0_command(buf)); /* command in buf -- easy to tell */ buf_add(buf, "HENTICATE ", 10); tt_int_op(0, OP_EQ, peek_buf_has_control0_command(buf)); /* Control0 command header in buf: make sure we detect it. */ buf_clear(buf); buf_add(buf, "\x09\x05" "\x00\x05" "blah", 8); tt_int_op(1, OP_EQ, peek_buf_has_control0_command(buf)); done: buf_free(buf); }
void buf_free(struct Buffer *buf) { buf_clear(buf); buf->next = buf->prev = NULL; free(buf->name); buf->name = '\0'; free(buf); }
int v2_merge(BUFFER *mid) { char fname[PATHMAX], line[LINELEN]; BUFFER *temp, *msg; FILE *l, *f; int i, numpackets; struct stat sb; long d; int n; int err = -1; temp = buf_new(); msg = buf_new(); pool_packetfile(fname, mid, 0); l = fopen(fname, "a+"); if (l != NULL) lock(l); pool_packetfile(fname, mid, 1); f = fopen(fname, "rb"); if (f == NULL) goto end; fscanf(f, "%32s %ld %d %d\n", line, &d, &i, &numpackets); fclose(f); /* do we have all packets? */ for (i = 1; i <= numpackets; i++) { pool_packetfile(fname, mid, i); if (stat(fname, &sb) != 0) goto end; } errlog(LOG, "Reassembling multipart message.\n"); for (i = 1; i <= numpackets; i++) { pool_packetfile(fname, mid, i); f = fopen(fname, "rb"); if (f == NULL) goto end; fscanf(f, "%32s %ld %d %d\n", line, &d, &n, &n); buf_clear(temp); buf_read(temp, f); v2body_setlen(temp); buf_append(msg, temp->data + 4, temp->length - 4); fclose(f); unlink(fname); } err = v2body(msg); end: if (l != NULL) fclose(l); pool_packetfile(fname, mid, 0); unlink(fname); buf_free(temp); buf_free(msg); return (err); }
struct buf *buf_create(size_t cap) { struct buf *buf = xmalloc(sizeof(*buf)); buf->buf = NULL; buf->cap = 0; buf_grow(buf, cap); buf_clear(buf); buf->cap = cap; return buf; }
static void cmdline_mode_key_pressed(struct editor *editor, struct tb_event *ev) { char ch; struct cmdline_mode *mode = (struct cmdline_mode*) editor->mode; switch (ev->key) { case TB_KEY_ESC: case TB_KEY_CTRL_C: buf_clear(editor->status); editor_pop_mode(editor); return; // FIXME(ibadawi): termbox doesn't support shift + arrow keys. // vim uses <S-Left>, <S-Right> for moving cursor to prev/next WORD. case TB_KEY_ARROW_LEFT: editor->status_cursor = max(editor->status_cursor - 1, 1); return; case TB_KEY_ARROW_RIGHT: editor->status_cursor = min(editor->status_cursor + 1, editor->status->len); return; case TB_KEY_CTRL_B: case TB_KEY_HOME: editor->status_cursor = 1; return; case TB_KEY_CTRL_E: case TB_KEY_END: editor->status_cursor = editor->status->len; return; case TB_KEY_BACKSPACE2: buf_delete(editor->status, --editor->status_cursor, 1); if (editor->status->len == 0) { editor_pop_mode(editor); return; } else if (mode->char_cb) { char *command = xstrdup(editor->status->buf + 1); mode->char_cb(editor, command); free(command); } return; case TB_KEY_ENTER: { char *command = xstrdup(editor->status->buf + 1); editor_pop_mode(editor); mode->done_cb(editor, command); free(command); return; } case TB_KEY_SPACE: ch = ' '; break; default: ch = (char) ev->ch; } char s[2] = {ch, '\0'}; buf_insert(editor->status, s, editor->status_cursor++); if (mode->char_cb) { char *command = xstrdup(editor->status->buf + 1); mode->char_cb(editor, command); free(command); } }
static int pgp_readkeyring(BUFFER *keys, char *filename) { FILE *keyfile; BUFFER *armored, *line, *tmp; int err = -1; if ((keyfile = mix_openfile(filename, "rb")) == NULL) return (err); armored = buf_new(); buf_read(armored, keyfile); fclose(keyfile); if (pgp_ispacket(armored)) { err = 0; buf_move(keys, armored); } else { line = buf_new(); tmp = buf_new(); while (1) { do if (buf_getline(armored, line) == -1) { goto end_greedy_dearmor; } while (!bufleft(line, begin_pgp)) ; buf_clear(tmp); buf_cat(tmp, line); buf_appends(tmp, "\n"); do { if (buf_getline(armored, line) == -1) { goto end_greedy_dearmor; } buf_cat(tmp, line); buf_appends(tmp, "\n"); } while (!bufleft(line, end_pgp)) ; if (pgp_dearmor(tmp, tmp) == 0) { err = ARMORED; buf_cat(keys, tmp); } } end_greedy_dearmor: buf_free(line); buf_free(tmp); } buf_free(armored); return (err); }
/** Perform unsupported SOCKS 5 commands */ static void test_socks_5_unsupported_commands(void *ptr) { SOCKS_TEST_INIT(); /* SOCKS 5 Send unsupported BIND [02] command */ ADD_DATA(buf, "\x05\x02\x00\x01"); tt_int_op(fetch_from_buf_socks(buf, socks, get_options()->TestSocks, get_options()->SafeSocks),OP_EQ, 0); tt_int_op(0,OP_EQ, buf_datalen(buf)); tt_int_op(5,OP_EQ, socks->socks_version); tt_int_op(2,OP_EQ, socks->replylen); tt_int_op(5,OP_EQ, socks->reply[0]); tt_int_op(0,OP_EQ, socks->reply[1]); ADD_DATA(buf, "\x05\x02\x00\x01\x02\x02\x02\x01\x01\x01"); tt_int_op(fetch_from_buf_socks(buf, socks, get_options()->TestSocks, get_options()->SafeSocks),OP_EQ, -1); tt_int_op(5,OP_EQ,socks->socks_version); tt_int_op(10,OP_EQ,socks->replylen); tt_int_op(5,OP_EQ,socks->reply[0]); tt_int_op(SOCKS5_COMMAND_NOT_SUPPORTED,OP_EQ,socks->reply[1]); tt_int_op(1,OP_EQ,socks->reply[3]); buf_clear(buf); socks_request_clear(socks); /* SOCKS 5 Send unsupported UDP_ASSOCIATE [03] command */ ADD_DATA(buf, "\x05\x02\x00\x01"); tt_int_op(fetch_from_buf_socks(buf, socks, get_options()->TestSocks, get_options()->SafeSocks),OP_EQ, 0); tt_int_op(5,OP_EQ, socks->socks_version); tt_int_op(2,OP_EQ, socks->replylen); tt_int_op(5,OP_EQ, socks->reply[0]); tt_int_op(0,OP_EQ, socks->reply[1]); ADD_DATA(buf, "\x05\x03\x00\x01\x02\x02\x02\x01\x01\x01"); tt_int_op(fetch_from_buf_socks(buf, socks, get_options()->TestSocks, get_options()->SafeSocks),OP_EQ, -1); tt_int_op(5,OP_EQ,socks->socks_version); tt_int_op(10,OP_EQ,socks->replylen); tt_int_op(5,OP_EQ,socks->reply[0]); tt_int_op(SOCKS5_COMMAND_NOT_SUPPORTED,OP_EQ,socks->reply[1]); tt_int_op(1,OP_EQ,socks->reply[3]); done: ; }
void mix_upd_stats(void) { FILE *f; BUFFER *statssrc; statssrc = buf_new(); buf_clear(statssrc); f = mix_openfile(STATSSRC, "r"); if (f != NULL) { buf_read(statssrc, f); fclose(f); } if (statssrc->length > 0) download_stats(statssrc->data); buf_free(statssrc); }
/** * @fn void initCircArray(CircArr_InitTypeDef* arr) * @brief function to initialize a circular array * @param arr a pointer to a circular array object * @param size the size of the circular array to be initialized * @return */ void initCircArray(CircArr_InitTypeDef* arr, uint32_t size) { //printf("initializing circ array\n"); //check for already initialized circarray. make resize another function if (arr->enabled){ printf("error circ array already initialized\n"); return; } arr->size = size; arr->n_r = 0; arr->n_w = 0; arr->buf = calloc(size,sizeof(uint8_t)); //if buf == null error here buf_clear(arr); }
static char * read_inline_file (struct in_src *is, const char *close_tag) { char line[OPTION_LINE_SIZE]; struct buffer buf = alloc_buf (10000); char *ret; while (in_src_get (is, line, sizeof (line))) { if (!strncmp (line, close_tag, strlen (close_tag))) break; buf_printf (&buf, "%s", line); } ret = string_alloc (buf_str (&buf)); buf_clear (&buf); free_buf (&buf); CLEAR (line); return ret; }
static void test_proto_line(void *arg) { (void)arg; char tmp[60]; buf_t *buf = buf_new(); #define S(str) str, sizeof(str)-1 const struct { const char *input; size_t input_len; size_t line_len; const char *output; int returnval; } cases[] = { { S("Hello world"), 0, NULL, 0 }, { S("Hello world\n"), 12, "Hello world\n", 1 }, { S("Hello world\nMore"), 12, "Hello world\n", 1 }, { S("\n oh hello world\nMore"), 1, "\n", 1 }, { S("Hello worpd\n\nMore"), 12, "Hello worpd\n", 1 }, { S("------------------------------------------------------------\n"), 0, NULL, -1 }, }; unsigned i; for (i = 0; i < ARRAY_LENGTH(cases); ++i) { buf_add(buf, cases[i].input, cases[i].input_len); memset(tmp, 0xfe, sizeof(tmp)); size_t sz = sizeof(tmp); int rv = buf_get_line(buf, tmp, &sz); tt_int_op(rv, OP_EQ, cases[i].returnval); if (rv == 1) { tt_int_op(sz, OP_LT, sizeof(tmp)); tt_mem_op(cases[i].output, OP_EQ, tmp, sz+1); tt_int_op(buf_datalen(buf), OP_EQ, cases[i].input_len - strlen(tmp)); tt_int_op(sz, OP_EQ, cases[i].line_len); } else { tt_int_op(buf_datalen(buf), OP_EQ, cases[i].input_len); // tt_int_op(sz, OP_EQ, sizeof(tmp)); } buf_clear(buf); } done: buf_free(buf); }
/** * @fn void buf_delete(CircArr_InitTypeDef* arr) * @brief function to deallocate the internal array used within the struct. Disables the CircArray. resets size and index data * @param arr a pointer to a circular array object * @return returns true if successful, false if the array did not exist */ bool buf_delete(CircArr_InitTypeDef* arr){ if (!arr){ //check for unitialized circ array struct. return false because we don't want to accidentally delete it. return false; } buf_clear(arr); //zero out internal array data free(arr->buf); //delete the internal dyn allocated array //reset all values arr->n_r =0; arr->n_w = 0; arr->size = 0; arr->buf = NULL; arr->enabled = false; //array was deleted successfully return true; }
bool buf_read(struct Buffer *buf, const char *name) { buf_clear(buf); FILE *fp = fopen(name ?: buf->name, "r"); if (fp) { char in[BUFSIZ]; while (fgets(in, BUFSIZ, fp) != NULL) { struct Line *l = line_new(in, strlen(in) - 1); buf_pushback(buf, l); } buf->line = buf->beg; fclose(fp); return true; } else { struct Line *l = line_new(NULL, 0); buf_pushback(buf, l); buf->line = l; } return false; }
CTEST2(buf_test, checksum) { buf_clear(data->b); uint32_t cs1, cs2, cs3; uint64_t v1 = 123456789101112; buf_putnstr(data->b, "nessdata", 8); buf_putuint32(data->b, 0); buf_putuint32(data->b, 1); buf_putuint64(data->b, v1); buf_xsum(data->b->buf, data->b->NUL, &cs1); buf_putuint32(data->b, cs1); buf_skip(data->b, 8 + 4 + 4 + 8); buf_getuint32(data->b, &cs2); buf_xsum(data->b->buf, 8 + 4 + 4 + 8, &cs3); ASSERT_EQUAL(cs2, cs3); }
void buf_flush() { int i; if (strcmp(text[0], "") == 0) return; switch (buf_type) { case TYPE_TEXT: printf("<p>\n"); break; case TYPE_PRE: printf("<pre>\n"); break; case TYPE_LI: printf("<ul>\n"); break; } for (i = 0; i < 100; i++) { if (strcmp(text[i], "") == 0) break; if (buf_type == TYPE_LI) printf("<li>"); hm(text[i]); o("\n"); if (buf_type == TYPE_LI) printf("</li>"); } switch (buf_type) { case TYPE_TEXT: printf("</p>\n"); break; case TYPE_PRE: printf("</pre>\n"); break; case TYPE_LI: printf("</ul>\n"); break; } buf_clear(); }
/* * ffs_blkalloc allocates a disk block for ffs_pageout(), as a consequence * it does no buf_breads (that could lead to deadblock as the page may be already * marked busy as it is being paged out. Also important to note that we are not * growing the file in pageouts. So ip->i_size cannot increase by this call * due to the way UBC works. * This code is derived from ffs_balloc and many cases of that are dealt * in ffs_balloc are not applicable here * Do not call with B_CLRBUF flags as this should only be called only * from pageouts */ ffs_blkalloc( struct inode *ip, ufs_daddr_t lbn, int size, kauth_cred_t cred, int flags) { register struct fs *fs; register ufs_daddr_t nb; struct buf *bp, *nbp; struct vnode *vp = ITOV(ip); struct indir indirs[NIADDR + 2]; ufs_daddr_t newb, *bap, pref; int deallocated, osize, nsize, num, i, error; ufs_daddr_t *allocib, *blkp, *allocblk, allociblk[NIADDR + 1]; int devBlockSize=0; struct mount *mp=vp->v_mount; #if REV_ENDIAN_FS int rev_endian=(mp->mnt_flag & MNT_REVEND); #endif /* REV_ENDIAN_FS */ fs = ip->i_fs; if(size > fs->fs_bsize) panic("ffs_blkalloc: too large for allocation"); /* * If the next write will extend the file into a new block, * and the file is currently composed of a fragment * this fragment has to be extended to be a full block. */ nb = lblkno(fs, ip->i_size); if (nb < NDADDR && nb < lbn) { panic("ffs_blkalloc():cannot extend file: i_size %d, lbn %d", ip->i_size, lbn); } /* * The first NDADDR blocks are direct blocks */ if (lbn < NDADDR) { nb = ip->i_db[lbn]; if (nb != 0 && ip->i_size >= (lbn + 1) * fs->fs_bsize) { /* TBD: trivial case; the block is already allocated */ return (0); } if (nb != 0) { /* * Consider need to reallocate a fragment. */ osize = fragroundup(fs, blkoff(fs, ip->i_size)); nsize = fragroundup(fs, size); if (nsize > osize) { panic("ffs_allocblk: trying to extend a fragment"); } return(0); } else { if (ip->i_size < (lbn + 1) * fs->fs_bsize) nsize = fragroundup(fs, size); else nsize = fs->fs_bsize; error = ffs_alloc(ip, lbn, ffs_blkpref(ip, lbn, (int)lbn, &ip->i_db[0]), nsize, cred, &newb); if (error) return (error); ip->i_db[lbn] = newb; ip->i_flag |= IN_CHANGE | IN_UPDATE; return (0); } } /* * Determine the number of levels of indirection. */ pref = 0; if (error = ufs_getlbns(vp, lbn, indirs, &num)) return(error); if(num == 0) { panic("ffs_blkalloc: file with direct blocks only"); } /* * Fetch the first indirect block allocating if necessary. */ --num; nb = ip->i_ib[indirs[0].in_off]; allocib = NULL; allocblk = allociblk; if (nb == 0) { pref = ffs_blkpref(ip, lbn, 0, (ufs_daddr_t *)0); if (error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize, cred, &newb)) return (error); nb = newb; *allocblk++ = nb; bp = buf_getblk(vp, (daddr64_t)((unsigned)(indirs[1].in_lbn)), fs->fs_bsize, 0, 0, BLK_META); buf_setblkno(bp, (daddr64_t)((unsigned)fsbtodb(fs, nb))); buf_clear(bp); /* * Write synchronously conditional on mount flags. */ if ((vp)->v_mount->mnt_flag & MNT_ASYNC) { error = 0; buf_bdwrite(bp); } else if (error = buf_bwrite(bp)) { goto fail; } allocib = &ip->i_ib[indirs[0].in_off]; *allocib = nb; ip->i_flag |= IN_CHANGE | IN_UPDATE; } /* * Fetch through the indirect blocks, allocating as necessary. */ for (i = 1;;) { error = (int)buf_meta_bread(vp, (daddr64_t)((unsigned)(indirs[i].in_lbn)), (int)fs->fs_bsize, NOCRED, &bp); if (error) { buf_brelse(bp); goto fail; } bap = (ufs_daddr_t *)buf_dataptr(bp); #if REV_ENDIAN_FS if (rev_endian) nb = OSSwapInt32(bap[indirs[i].in_off]); else { #endif /* REV_ENDIAN_FS */ nb = bap[indirs[i].in_off]; #if REV_ENDIAN_FS } #endif /* REV_ENDIAN_FS */ if (i == num) break; i += 1; if (nb != 0) { buf_brelse(bp); continue; } if (pref == 0) pref = ffs_blkpref(ip, lbn, 0, (ufs_daddr_t *)0); if (error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize, cred, &newb)) { buf_brelse(bp); goto fail; } nb = newb; *allocblk++ = nb; nbp = buf_getblk(vp, (daddr64_t)((unsigned)(indirs[i].in_lbn)), fs->fs_bsize, 0, 0, BLK_META); buf_setblkno(nbp, (daddr64_t)((unsigned)fsbtodb(fs, nb))); buf_clear(nbp); /* * Write synchronously conditional on mount flags. */ if ((vp)->v_mount->mnt_flag & MNT_ASYNC) { error = 0; buf_bdwrite(nbp); } else if (error = buf_bwrite(nbp)) { buf_brelse(bp); goto fail; } #if REV_ENDIAN_FS if (rev_endian) bap[indirs[i - 1].in_off] = OSSwapInt32(nb); else { #endif /* REV_ENDIAN_FS */ bap[indirs[i - 1].in_off] = nb; #if REV_ENDIAN_FS } #endif /* REV_ENDIAN_FS */ /* * If required, write synchronously, otherwise use * delayed write. */ if (flags & B_SYNC) { buf_bwrite(bp); } else { buf_bdwrite(bp); } } /* * Get the data block, allocating if necessary. */ if (nb == 0) { pref = ffs_blkpref(ip, lbn, indirs[i].in_off, &bap[0]); if (error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize, cred, &newb)) { buf_brelse(bp); goto fail; } nb = newb; *allocblk++ = nb; #if REV_ENDIAN_FS if (rev_endian) bap[indirs[i].in_off] = OSSwapInt32(nb); else { #endif /* REV_ENDIAN_FS */ bap[indirs[i].in_off] = nb; #if REV_ENDIAN_FS } #endif /* REV_ENDIAN_FS */ /* * If required, write synchronously, otherwise use * delayed write. */ if (flags & B_SYNC) { buf_bwrite(bp); } else { buf_bdwrite(bp); } return (0); } buf_brelse(bp); return (0); fail: /* * If we have failed part way through block allocation, we * have to deallocate any indirect blocks that we have allocated. */ for (deallocated = 0, blkp = allociblk; blkp < allocblk; blkp++) { ffs_blkfree(ip, *blkp, fs->fs_bsize); deallocated += fs->fs_bsize; } if (allocib != NULL) *allocib = 0; if (deallocated) { devBlockSize = vfs_devblocksize(mp); #if QUOTA /* * Restore user's disk quota because allocation failed. */ (void) chkdq(ip, (int64_t)-deallocated, cred, FORCE); #endif /* QUOTA */ ip->i_blocks -= btodb(deallocated, devBlockSize); ip->i_flag |= IN_CHANGE | IN_UPDATE; } return (error); }
void tls_crypt_v2_write_client_key_file(const char *filename, const char *b64_metadata, const char *server_key_file, const char *server_key_inline) { struct gc_arena gc = gc_new(); struct key_ctx server_key = { 0 }; struct buffer client_key_pem = { 0 }; struct buffer dst = alloc_buf_gc(TLS_CRYPT_V2_CLIENT_KEY_LEN + TLS_CRYPT_V2_MAX_WKC_LEN, &gc); struct key2 client_key = { 2 }; if (!rand_bytes((void *)client_key.keys, sizeof(client_key.keys))) { msg(M_FATAL, "ERROR: could not generate random key"); goto cleanup; } ASSERT(buf_write(&dst, client_key.keys, sizeof(client_key.keys))); struct buffer metadata = alloc_buf_gc(TLS_CRYPT_V2_MAX_METADATA_LEN, &gc); if (b64_metadata) { if (TLS_CRYPT_V2_MAX_B64_METADATA_LEN < strlen(b64_metadata)) { msg(M_FATAL, "ERROR: metadata too long (%d bytes, max %u bytes)", (int)strlen(b64_metadata), TLS_CRYPT_V2_MAX_B64_METADATA_LEN); } ASSERT(buf_write(&metadata, &TLS_CRYPT_METADATA_TYPE_USER, 1)); int decoded_len = openvpn_base64_decode(b64_metadata, BPTR(&metadata), BCAP(&metadata)); if (decoded_len < 0) { msg(M_FATAL, "ERROR: failed to base64 decode provided metadata"); goto cleanup; } ASSERT(buf_inc_len(&metadata, decoded_len)); } else { int64_t timestamp = htonll((uint64_t)now); ASSERT(buf_write(&metadata, &TLS_CRYPT_METADATA_TYPE_TIMESTAMP, 1)); ASSERT(buf_write(&metadata, ×tamp, sizeof(timestamp))); } tls_crypt_v2_init_server_key(&server_key, true, server_key_file, server_key_inline); if (!tls_crypt_v2_wrap_client_key(&dst, &client_key, &metadata, &server_key, &gc)) { msg(M_FATAL, "ERROR: could not wrap generated client key"); goto cleanup; } /* PEM-encode Kc || WKc */ if (!crypto_pem_encode(tls_crypt_v2_cli_pem_name, &client_key_pem, &dst, &gc)) { msg(M_FATAL, "ERROR: could not PEM-encode client key"); goto cleanup; } if (!buffer_write_file(filename, &client_key_pem)) { msg(M_FATAL, "ERROR: could not write client key file"); goto cleanup; } /* Sanity check: load client key (as "client") */ struct key_ctx_bi test_client_key; struct buffer test_wrapped_client_key; msg(D_GENKEY, "Testing client-side key loading..."); tls_crypt_v2_init_client_key(&test_client_key, &test_wrapped_client_key, filename, NULL); free_key_ctx_bi(&test_client_key); /* Sanity check: unwrap and load client key (as "server") */ struct buffer test_metadata = alloc_buf_gc(TLS_CRYPT_V2_MAX_METADATA_LEN, &gc); struct key2 test_client_key2 = { 0 }; free_key_ctx(&server_key); tls_crypt_v2_init_server_key(&server_key, false, server_key_file, server_key_inline); msg(D_GENKEY, "Testing server-side key loading..."); ASSERT(tls_crypt_v2_unwrap_client_key(&test_client_key2, &test_metadata, test_wrapped_client_key, &server_key)); secure_memzero(&test_client_key2, sizeof(test_client_key2)); free_buf(&test_wrapped_client_key); cleanup: secure_memzero(&client_key, sizeof(client_key)); free_key_ctx(&server_key); buf_clear(&client_key_pem); buf_clear(&dst); gc_free(&gc); }
static bool tls_crypt_v2_unwrap_client_key(struct key2 *client_key, struct buffer *metadata, struct buffer wrapped_client_key, struct key_ctx *server_key) { const char *error_prefix = __func__; bool ret = false; struct gc_arena gc = gc_new(); /* The crypto API requires one extra cipher block of buffer head room when * decrypting, which nicely matches the tag size of WKc. So * TLS_CRYPT_V2_MAX_WKC_LEN is always large enough for the plaintext. */ uint8_t plaintext_buf_data[TLS_CRYPT_V2_MAX_WKC_LEN] = { 0 }; struct buffer plaintext = { 0 }; dmsg(D_TLS_DEBUG_MED, "%s: unwrapping client key (len=%d): %s", __func__, BLEN(&wrapped_client_key), format_hex(BPTR(&wrapped_client_key), BLEN(&wrapped_client_key), 0, &gc)); if (TLS_CRYPT_V2_MAX_WKC_LEN < BLEN(&wrapped_client_key)) { CRYPT_ERROR("wrapped client key too big"); } /* Decrypt client key and metadata */ uint16_t net_len = 0; const uint8_t *tag = BPTR(&wrapped_client_key); if (BLEN(&wrapped_client_key) < sizeof(net_len)) { CRYPT_ERROR("failed to read length"); } memcpy(&net_len, BEND(&wrapped_client_key) - sizeof(net_len), sizeof(net_len)); if (ntohs(net_len) != BLEN(&wrapped_client_key)) { dmsg(D_TLS_DEBUG_LOW, "%s: net_len=%u, BLEN=%i", __func__, ntohs(net_len), BLEN(&wrapped_client_key)); CRYPT_ERROR("invalid length"); } buf_inc_len(&wrapped_client_key, -(int)sizeof(net_len)); if (!buf_advance(&wrapped_client_key, TLS_CRYPT_TAG_SIZE)) { CRYPT_ERROR("failed to read tag"); } if (!cipher_ctx_reset(server_key->cipher, tag)) { CRYPT_ERROR("failed to initialize IV"); } buf_set_write(&plaintext, plaintext_buf_data, sizeof(plaintext_buf_data)); int outlen = 0; if (!cipher_ctx_update(server_key->cipher, BPTR(&plaintext), &outlen, BPTR(&wrapped_client_key), BLEN(&wrapped_client_key))) { CRYPT_ERROR("could not decrypt client key"); } ASSERT(buf_inc_len(&plaintext, outlen)); if (!cipher_ctx_final(server_key->cipher, BEND(&plaintext), &outlen)) { CRYPT_ERROR("cipher final failed"); } ASSERT(buf_inc_len(&plaintext, outlen)); /* Check authentication */ uint8_t tag_check[TLS_CRYPT_TAG_SIZE] = { 0 }; hmac_ctx_reset(server_key->hmac); hmac_ctx_update(server_key->hmac, (void *)&net_len, sizeof(net_len)); hmac_ctx_update(server_key->hmac, BPTR(&plaintext), BLEN(&plaintext)); hmac_ctx_final(server_key->hmac, tag_check); if (memcmp_constant_time(tag, tag_check, sizeof(tag_check))) { dmsg(D_CRYPTO_DEBUG, "tag : %s", format_hex(tag, sizeof(tag_check), 0, &gc)); dmsg(D_CRYPTO_DEBUG, "tag_check: %s", format_hex(tag_check, sizeof(tag_check), 0, &gc)); CRYPT_ERROR("client key authentication error"); } if (buf_len(&plaintext) < sizeof(client_key->keys)) { CRYPT_ERROR("failed to read client key"); } memcpy(&client_key->keys, BPTR(&plaintext), sizeof(client_key->keys)); ASSERT(buf_advance(&plaintext, sizeof(client_key->keys))); if (!buf_copy(metadata, &plaintext)) { CRYPT_ERROR("metadata too large for supplied buffer"); } ret = true; error_exit: if (!ret) { secure_memzero(client_key, sizeof(*client_key)); } buf_clear(&plaintext); gc_free(&gc); return ret; }
/** Perform malformed SOCKS 5 commands */ static void test_socks_5_malformed_commands(void *ptr) { SOCKS_TEST_INIT(); /* XXX: Stringified address length > MAX_SOCKS_ADDR_LEN will never happen */ /** SOCKS 5 Send CONNECT [01] to IP address 2.2.2.2:4369, with SafeSocks set */ ADD_DATA(buf, "\x05\x01\x00"); ADD_DATA(buf, "\x05\x01\x00\x01\x02\x02\x02\x02\x11\x11"); tt_int_op(fetch_from_buf_socks(buf, socks, get_options()->TestSocks, 1), OP_EQ, -1); tt_int_op(5,OP_EQ,socks->socks_version); tt_int_op(10,OP_EQ,socks->replylen); tt_int_op(5,OP_EQ,socks->reply[0]); tt_int_op(SOCKS5_NOT_ALLOWED,OP_EQ,socks->reply[1]); tt_int_op(1,OP_EQ,socks->reply[3]); buf_clear(buf); socks_request_clear(socks); /* SOCKS 5 Send RESOLVE_PTR [F1] for FQDN torproject.org */ ADD_DATA(buf, "\x05\x01\x00"); ADD_DATA(buf, "\x05\xF1\x00\x03\x0Etorproject.org\x11\x11"); tt_int_op(fetch_from_buf_socks(buf, socks, get_options()->TestSocks, get_options()->SafeSocks),OP_EQ, -1); tt_int_op(5,OP_EQ,socks->socks_version); tt_int_op(10,OP_EQ,socks->replylen); tt_int_op(5,OP_EQ,socks->reply[0]); tt_int_op(SOCKS5_ADDRESS_TYPE_NOT_SUPPORTED,OP_EQ,socks->reply[1]); tt_int_op(1,OP_EQ,socks->reply[3]); buf_clear(buf); socks_request_clear(socks); /* XXX: len + 1 > MAX_SOCKS_ADDR_LEN (FQDN request) will never happen */ /* SOCKS 5 Send CONNECT [01] to FQDN """"".com */ ADD_DATA(buf, "\x05\x01\x00"); ADD_DATA(buf, "\x05\x01\x00\x03\x09\"\"\"\"\".com\x11\x11"); tt_int_op(fetch_from_buf_socks(buf, socks, get_options()->TestSocks, get_options()->SafeSocks),OP_EQ, -1); tt_int_op(5,OP_EQ,socks->socks_version); tt_int_op(10,OP_EQ,socks->replylen); tt_int_op(5,OP_EQ,socks->reply[0]); tt_int_op(SOCKS5_GENERAL_ERROR,OP_EQ,socks->reply[1]); tt_int_op(1,OP_EQ,socks->reply[3]); buf_clear(buf); socks_request_clear(socks); /* SOCKS 5 Send CONNECT [01] to address type 0x23 */ ADD_DATA(buf, "\x05\x01\x00"); ADD_DATA(buf, "\x05\x01\x00\x23\x02\x02\x02\x02\x11\x11"); tt_int_op(fetch_from_buf_socks(buf, socks, get_options()->TestSocks, get_options()->SafeSocks),OP_EQ, -1); tt_int_op(5,OP_EQ,socks->socks_version); tt_int_op(10,OP_EQ,socks->replylen); tt_int_op(5,OP_EQ,socks->reply[0]); tt_int_op(SOCKS5_ADDRESS_TYPE_NOT_SUPPORTED,OP_EQ,socks->reply[1]); tt_int_op(1,OP_EQ,socks->reply[3]); done: ; }
char * mxdl_find(char *usename) { char *result = NULL; char *libname = NULL; int len; int trlen; char *tr_usename; char *tr_pathname; char *mxpath = mxdl_getMXPath(); StringBuffer *d; /* translate the usename */ trlen = len = strlen(usename); tr_usename = emalloc(len + 1); tr_usename[len] = '\0'; while (--len >= 0) { if (*(usename + len) == '.') { *(tr_usename + len) = '_'; } else { *(tr_usename + len) = *(usename + len); } } if (index(tr_usename, '_') == NULL) { /* leave name alone */ } /* translate the tr_usename to a path */ len = strlen(tr_usename); tr_pathname = emalloc(len + 1); tr_pathname[len] = '\0'; while (--len >= 0) { if (*(tr_usename + len) == '_') { *(tr_pathname + len) = '/'; } else { *(tr_pathname + len) = *(tr_usename + len); } } /* look in all path directories for the library */ d = buf_createDefault(); if (mxpath != NULL) { char *dupmxpath = estrdup(mxpath); char *dir = strtok(dupmxpath, ":"); while (dir != NULL) { buf_clear(d); buf_puts(d, dir); if (dir[strlen(dir) - 1] != '/') { buf_putc(d, '/'); } buf_puts(d, tr_pathname); buf_putc(d, '/'); buf_puts(d, "libmx_"); buf_puts(d, tr_usename); buf_puts(d, ".so"); libname = buf_data(d); if (stat_file(libname) == 0) { result = buf_toString(d); //mman_track(rtime_getRuntime()->mpool, result); buf_free(d); d = NULL; break; } dir = strtok(NULL, ":"); } free(dupmxpath); } else { buf_printf(d, "libmx_%s.so", tr_usename); libname = buf_toString(d); buf_free(d); d = NULL; if (stat_file(libname) == 0) { result = libname; } } if (d != NULL) { buf_free(d); } free(tr_usename); free(tr_pathname); return result; }
struct dt_dentry * dtread_readfile(FILE *file, unsigned int *maxid, char *md5buf) { int c; char ch; struct buf_str *bs; struct cuckoo_ctx *cu; struct umd5_ctx md5; struct dtread_data *dr; struct dt_dentry *root = NULL; LOG_ASSERT(file != NULL, "Bad arguments\n"); if ((bs = buf_alloc()) == NULL) return NULL; if ((cu = cuckoo_alloc(0)) == NULL) goto clear_bs; umd5_init(&md5); while ((c = fgetc(file)) != EOF) { if (c == '\n') { if (buf_error(bs)) goto clear_cu; umd5_update(&md5, buf_string(bs), buf_strlen(bs)); umd5_update(&md5, "\n", 1); if (!dtread_readline(buf_string(bs), cu, maxid)) goto clear_cu; buf_clear(bs); } else { ch = c; buf_appendn(bs, &ch, 1); } } if (cuckoo_items(cu) != 1) { LOG_ERR("Some directories are left in cockoo tables\n"); goto clear_cu; } if ((dr = cuckoo_lookup(cu, 1)) == NULL) { LOG_ERR("No root node in cuckoo table\n"); goto clear_cu; } root = dr->de; cuckoo_delete(cu, 1); free(dr); umd5_finish(&md5); umd5_value(&md5, md5buf); clear_cu: cuckoo_rfree(cu, dtread_data_free); clear_bs: buf_free(bs); return root; }
static struct index_node_f *index_read(FILE *in, uint32_t offset) { struct index_node_f *node; char *prefix; int i, child_count = 0; if ((offset & INDEX_NODE_MASK) == 0) return NULL; fseek(in, offset & INDEX_NODE_MASK, SEEK_SET); if (offset & INDEX_NODE_PREFIX) { struct buffer buf; buf_init(&buf); buf_freadchars(&buf, in); prefix = buf_steal(&buf); } else prefix = NOFAIL(strdup("")); if (offset & INDEX_NODE_CHILDS) { char first = read_char(in); char last = read_char(in); child_count = last - first + 1; node = NOFAIL(malloc(sizeof(struct index_node_f) + sizeof(uint32_t) * child_count)); node->first = first; node->last = last; for (i = 0; i < child_count; i++) node->children[i] = read_long(in); } else { node = NOFAIL(malloc(sizeof(struct index_node_f))); node->first = INDEX_CHILDMAX; node->last = 0; } node->values = NULL; if (offset & INDEX_NODE_VALUES) { int value_count; struct buffer buf; const char *value; unsigned int priority; value_count = read_long(in); buf_init(&buf); while (value_count--) { priority = read_long(in); buf_freadchars(&buf, in); value = buf_str(&buf); add_value(&node->values, value, buf.used, priority); buf_clear(&buf); } buf_release(&buf); } node->prefix = prefix; node->file = in; return node; }
void fixed_gap_arena_remove(FixedGapArena *arena, memi offset, memi size) { // TODO: Decide how much we want to protect the API. // This one is public, so maybe allow more protection? if (size == 0) { return; } hale_assert_input((offset + size) <= arena->size); hale_assert_input(offset <= arena->size); Buf *it0 = vector_begin(&arena->buffers); Buf *end = vector_end(&arena->buffers); it0 = find_buf_GT(arena, &offset, it0, end); hale_assert_requirement(it0 != end); memi length = buf_length(it0); if ((offset + size) < length) { buf_remove(it0, offset, size); } else { memi p2 = size; if (offset != 0) { p2 -= buf_remove(it0, offset, length - offset); ++it0; } if (p2) { Buf *itE = it0; length = buf_length(it0); if (p2 == length) { p2 = 0; ++itE; } else // if (p2) { while (length < p2) { p2 -= length; ++itE; if (itE == end) { break; } length = buf_length(itE); } if (p2) { buf_remove(itE, 0, p2); } } if (it0 != itE) { if (vector_count(&arena->buffers) == 1) { // Do not remove the first buffer. // Requirement for `insert`. buf_clear(it0); } else { vector_remove(&arena->buffers, it0, itE); } } } } arena->size -= size; }
int ed_loop(Context_t *ctx) { Cmd_t *cmd; int r; char c; while (1) { if (ctx->mode == ED_QUITTING) break; c = getchar(); switch (ctx->mode) { case ED_HOTKEY_MODE: cmd = ed_get_cmd_by_hk(c); if (cmd) { r = cmd->cmd_cb(ctx, NULL); if (!r) break; lprintf(LL_CRITICAL, "Command \"%s\" was not executed succesfully", cmd->cmd_id); return -1; } switch (c) { case 'i': ed_set_mode(ctx, ED_INSERT_MODE); break; } break; case ED_CMD_MODE: if (c == 27) { ncs_rm_current_line(ctx->scr); ncs_set_cursor(ctx->scr, 0, 0); buf_clear(ctx->cmd_buffer); ed_set_mode(ctx, ED_HOTKEY_MODE); break; } if (c == '\r' || c == '\n') { ncs_rm_current_line(ctx->scr); buf_add_ch(ctx->cmd_buffer, '\0'); ed_parse_cmd_buf(ctx); buf_clear(ctx->cmd_buffer); ed_set_mode(ctx, ED_HOTKEY_MODE); //ncs_set_cursor(ctx->scr, 0, 0); break; } buf_add_ch(ctx->cmd_buffer, c); ncs_addch(ctx->scr, c); break; case ED_INSERT_MODE: if (c == 27) { ed_set_mode(ctx, ED_HOTKEY_MODE); break; } if (c & (0x1 << 7)) { ed_info(ctx, "Non-ASCII characters not supported"); break; } buf_add_ch(ctx->c_buffer, c); ncs_addch(ctx->scr, c); break; default: lprintf(LL_ERROR, "Mode not set!\n"); break; } } return 0; }
int chain_select(int hop[], char *chainstr, int maxrem, REMAILER *remailer, int type, BUFFER *feedback) { /* hop[] is returned containing the chain as integers (0 means random like *) * chainstr is the input desired chain such as *,*,*,* * remailer is an input list of remailer details (see mix2_rlist()) */ int len = 0; int i, j, k; BUFFER *chain, *selected, *addr; chain = buf_new(); selected = buf_new(); addr = buf_new(); if (chainstr == NULL || chainstr[0] == '\0') buf_sets(chain, CHAIN); else buf_sets(chain, chainstr); /* put the chain backwards: final hop is in hop[0] */ for (i = chain->length; i >= 0; i--) if (i == 0 || chain->data[i - 1] == ',' || chain->data[i - 1] == ';' || chain->data[i - 1] == ':') { for (j = i; isspace(chain->data[j]);) /* ignore whitespace */ j++; if (chain->data[j] == '\0') break; if (chain->data[j] == '*') k = 0; #if 0 else if (isdigit(chain->data[j])) k = atoi(chain->data + j); #endif /* 0 */ else { buf_sets(selected, chain->data + j); rfc822_addr(selected, addr); buf_clear(selected); buf_getline(addr, selected); if (!selected->length) buf_sets(selected, chain->data + j); for (k = 0; k < maxrem; k++) if (((remailer[k].flags.mix && type == 0) || (remailer[k].flags.cpunk && type == 1) || (remailer[k].flags.newnym && type == 2)) && (streq(remailer[k].name, selected->data) || strieq(remailer[k].addr, selected->data) || (selected->data[0] == '@' && strifind(remailer[k].addr, selected->data)))) break; } if (k < 0 || k >= maxrem) { if (feedback != NULL) { buf_appendf(feedback, "No such remailer: %b", selected); buf_nl(feedback); } #if 0 k = 0; #else /* end of 0 */ len = -1; goto end; #endif /* else not 0 */ } hop[len++] = k; if (len >= 20) { /* array passed in is has length 20 */ if (feedback != NULL) { buf_appends(feedback, "Chain too long.\n"); } break; } if (i > 0) chain->data[i - 1] = '\0'; } end: buf_free(chain); buf_free(selected); buf_free(addr); return len; }
/* * Balloc defines the structure of file system storage * by allocating the physical blocks on a device given * the inode and the logical block number in a file. */ ffs_balloc( register struct inode *ip, register ufs_daddr_t lbn, int size, kauth_cred_t cred, struct buf **bpp, int flags, int * blk_alloc) { register struct fs *fs; register ufs_daddr_t nb; struct buf *bp, *nbp; struct vnode *vp = ITOV(ip); struct indir indirs[NIADDR + 2]; ufs_daddr_t newb, *bap, pref; int deallocated, osize, nsize, num, i, error; ufs_daddr_t *allocib, *blkp, *allocblk, allociblk[NIADDR + 1]; int devBlockSize=0; int alloc_buffer = 1; struct mount *mp=vp->v_mount; #if REV_ENDIAN_FS int rev_endian=(mp->mnt_flag & MNT_REVEND); #endif /* REV_ENDIAN_FS */ *bpp = NULL; if (lbn < 0) return (EFBIG); fs = ip->i_fs; if (flags & B_NOBUFF) alloc_buffer = 0; if (blk_alloc) *blk_alloc = 0; /* * If the next write will extend the file into a new block, * and the file is currently composed of a fragment * this fragment has to be extended to be a full block. */ nb = lblkno(fs, ip->i_size); if (nb < NDADDR && nb < lbn) { /* the filesize prior to this write can fit in direct * blocks (ie. fragmentaion is possibly done) * we are now extending the file write beyond * the block which has end of file prior to this write */ osize = blksize(fs, ip, nb); /* osize gives disk allocated size in the last block. It is * either in fragments or a file system block size */ if (osize < fs->fs_bsize && osize > 0) { /* few fragments are already allocated,since the * current extends beyond this block * allocate the complete block as fragments are only * in last block */ error = ffs_realloccg(ip, nb, ffs_blkpref(ip, nb, (int)nb, &ip->i_db[0]), osize, (int)fs->fs_bsize, cred, &bp); if (error) return (error); /* adjust the inode size we just grew */ /* it is in nb+1 as nb starts from 0 */ ip->i_size = (nb + 1) * fs->fs_bsize; ubc_setsize(vp, (off_t)ip->i_size); ip->i_db[nb] = dbtofsb(fs, (ufs_daddr_t)buf_blkno(bp)); ip->i_flag |= IN_CHANGE | IN_UPDATE; if ((flags & B_SYNC) || (!alloc_buffer)) { if (!alloc_buffer) buf_setflags(bp, B_NOCACHE); buf_bwrite(bp); } else buf_bdwrite(bp); /* note that bp is already released here */ } } /* * The first NDADDR blocks are direct blocks */ if (lbn < NDADDR) { nb = ip->i_db[lbn]; if (nb != 0 && ip->i_size >= (lbn + 1) * fs->fs_bsize) { if (alloc_buffer) { error = (int)buf_bread(vp, (daddr64_t)((unsigned)lbn), fs->fs_bsize, NOCRED, &bp); if (error) { buf_brelse(bp); return (error); } *bpp = bp; } return (0); } if (nb != 0) { /* * Consider need to reallocate a fragment. */ osize = fragroundup(fs, blkoff(fs, ip->i_size)); nsize = fragroundup(fs, size); if (nsize <= osize) { if (alloc_buffer) { error = (int)buf_bread(vp, (daddr64_t)((unsigned)lbn), osize, NOCRED, &bp); if (error) { buf_brelse(bp); return (error); } ip->i_flag |= IN_CHANGE | IN_UPDATE; *bpp = bp; return (0); } else { ip->i_flag |= IN_CHANGE | IN_UPDATE; return (0); } } else { error = ffs_realloccg(ip, lbn, ffs_blkpref(ip, lbn, (int)lbn, &ip->i_db[0]), osize, nsize, cred, &bp); if (error) return (error); ip->i_db[lbn] = dbtofsb(fs, (ufs_daddr_t)buf_blkno(bp)); ip->i_flag |= IN_CHANGE | IN_UPDATE; /* adjust the inode size we just grew */ ip->i_size = (lbn * fs->fs_bsize) + size; ubc_setsize(vp, (off_t)ip->i_size); if (!alloc_buffer) { buf_setflags(bp, B_NOCACHE); if (flags & B_SYNC) buf_bwrite(bp); else buf_bdwrite(bp); } else *bpp = bp; return (0); } } else { if (ip->i_size < (lbn + 1) * fs->fs_bsize) nsize = fragroundup(fs, size); else nsize = fs->fs_bsize; error = ffs_alloc(ip, lbn, ffs_blkpref(ip, lbn, (int)lbn, &ip->i_db[0]), nsize, cred, &newb); if (error) return (error); if (alloc_buffer) { bp = buf_getblk(vp, (daddr64_t)((unsigned)lbn), nsize, 0, 0, BLK_WRITE); buf_setblkno(bp, (daddr64_t)((unsigned)fsbtodb(fs, newb))); if (flags & B_CLRBUF) buf_clear(bp); } ip->i_db[lbn] = newb; ip->i_flag |= IN_CHANGE | IN_UPDATE; if (blk_alloc) { *blk_alloc = nsize; } if (alloc_buffer) *bpp = bp; return (0); } } /* * Determine the number of levels of indirection. */ pref = 0; if (error = ufs_getlbns(vp, lbn, indirs, &num)) return(error); #if DIAGNOSTIC if (num < 1) panic ("ffs_balloc: ufs_bmaparray returned indirect block"); #endif /* * Fetch the first indirect block allocating if necessary. */ --num; nb = ip->i_ib[indirs[0].in_off]; allocib = NULL; allocblk = allociblk; if (nb == 0) { pref = ffs_blkpref(ip, lbn, 0, (ufs_daddr_t *)0); if (error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize, cred, &newb)) return (error); nb = newb; *allocblk++ = nb; bp = buf_getblk(vp, (daddr64_t)((unsigned)(indirs[1].in_lbn)), fs->fs_bsize, 0, 0, BLK_META); buf_setblkno(bp, (daddr64_t)((unsigned)fsbtodb(fs, nb))); buf_clear(bp); /* * Write synchronously conditional on mount flags. */ if ((vp)->v_mount->mnt_flag & MNT_ASYNC) { error = 0; buf_bdwrite(bp); } else if ((error = buf_bwrite(bp)) != 0) { goto fail; } allocib = &ip->i_ib[indirs[0].in_off]; *allocib = nb; ip->i_flag |= IN_CHANGE | IN_UPDATE; } /* * Fetch through the indirect blocks, allocating as necessary. */ for (i = 1;;) { error = (int)buf_meta_bread(vp, (daddr64_t)((unsigned)(indirs[i].in_lbn)), (int)fs->fs_bsize, NOCRED, &bp); if (error) { buf_brelse(bp); goto fail; } bap = (ufs_daddr_t *)buf_dataptr(bp); #if REV_ENDIAN_FS if (rev_endian) nb = OSSwapInt32(bap[indirs[i].in_off]); else { #endif /* REV_ENDIAN_FS */ nb = bap[indirs[i].in_off]; #if REV_ENDIAN_FS } #endif /* REV_ENDIAN_FS */ if (i == num) break; i += 1; if (nb != 0) { buf_brelse(bp); continue; } if (pref == 0) pref = ffs_blkpref(ip, lbn, 0, (ufs_daddr_t *)0); if (error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize, cred, &newb)) { buf_brelse(bp); goto fail; } nb = newb; *allocblk++ = nb; nbp = buf_getblk(vp, (daddr64_t)((unsigned)(indirs[i].in_lbn)), fs->fs_bsize, 0, 0, BLK_META); buf_setblkno(nbp, (daddr64_t)((unsigned)fsbtodb(fs, nb))); buf_clear(nbp); /* * Write synchronously conditional on mount flags. */ if ((vp)->v_mount->mnt_flag & MNT_ASYNC) { error = 0; buf_bdwrite(nbp); } else if (error = buf_bwrite(nbp)) { buf_brelse(bp); goto fail; } #if REV_ENDIAN_FS if (rev_endian) bap[indirs[i - 1].in_off] = OSSwapInt32(nb); else { #endif /* REV_ENDIAN_FS */ bap[indirs[i - 1].in_off] = nb; #if REV_ENDIAN_FS } #endif /* REV_ENDIAN_FS */ /* * If required, write synchronously, otherwise use * delayed write. */ if (flags & B_SYNC) { buf_bwrite(bp); } else { buf_bdwrite(bp); } } /* * Get the data block, allocating if necessary. */ if (nb == 0) { pref = ffs_blkpref(ip, lbn, indirs[i].in_off, &bap[0]); if (error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize, cred, &newb)) { buf_brelse(bp); goto fail; } nb = newb; *allocblk++ = nb; #if REV_ENDIAN_FS if (rev_endian) bap[indirs[i].in_off] = OSSwapInt32(nb); else { #endif /* REV_ENDIAN_FS */ bap[indirs[i].in_off] = nb; #if REV_ENDIAN_FS } #endif /* REV_ENDIAN_FS */ /* * If required, write synchronously, otherwise use * delayed write. */ if ((flags & B_SYNC)) { buf_bwrite(bp); } else { buf_bdwrite(bp); } if(alloc_buffer ) { nbp = buf_getblk(vp, (daddr64_t)((unsigned)lbn), fs->fs_bsize, 0, 0, BLK_WRITE); buf_setblkno(nbp, (daddr64_t)((unsigned)fsbtodb(fs, nb))); if (flags & B_CLRBUF) buf_clear(nbp); } if (blk_alloc) { *blk_alloc = fs->fs_bsize; } if(alloc_buffer) *bpp = nbp; return (0); } buf_brelse(bp); if (alloc_buffer) { if (flags & B_CLRBUF) { error = (int)buf_bread(vp, (daddr64_t)((unsigned)lbn), (int)fs->fs_bsize, NOCRED, &nbp); if (error) { buf_brelse(nbp); goto fail; } } else { nbp = buf_getblk(vp, (daddr64_t)((unsigned)lbn), fs->fs_bsize, 0, 0, BLK_WRITE); buf_setblkno(nbp, (daddr64_t)((unsigned)fsbtodb(fs, nb))); } *bpp = nbp; } return (0); fail: /* * If we have failed part way through block allocation, we * have to deallocate any indirect blocks that we have allocated. */ for (deallocated = 0, blkp = allociblk; blkp < allocblk; blkp++) { ffs_blkfree(ip, *blkp, fs->fs_bsize); deallocated += fs->fs_bsize; } if (allocib != NULL) *allocib = 0; if (deallocated) { devBlockSize = vfs_devblocksize(mp); #if QUOTA /* * Restore user's disk quota because allocation failed. */ (void) chkdq(ip, (int64_t)-deallocated, cred, FORCE); #endif /* QUOTA */ ip->i_blocks -= btodb(deallocated, devBlockSize); ip->i_flag |= IN_CHANGE | IN_UPDATE; } return (error); }