static int mempacket_test_read(BIO *bio, char *out, int outl) { MEMPACKET_TEST_CTX *ctx = BIO_get_data(bio); MEMPACKET *thispkt; unsigned char *rec; int rem; unsigned int seq, offset, len, epoch; BIO_clear_retry_flags(bio); thispkt = sk_MEMPACKET_value(ctx->pkts, 0); if (thispkt == NULL || thispkt->num != ctx->currpkt) { /* Probably run out of data */ BIO_set_retry_read(bio); return -1; } (void)sk_MEMPACKET_shift(ctx->pkts); ctx->currpkt++; if (outl > thispkt->len) outl = thispkt->len; if (thispkt->type != INJECT_PACKET_IGNORE_REC_SEQ && (ctx->injected || ctx->droprec >= 0)) { /* * Overwrite the record sequence number. We strictly number them in * the order received. Since we are actually a reliable transport * we know that there won't be any re-ordering. We overwrite to deal * with any packets that have been injected */ for (rem = thispkt->len, rec = thispkt->data; rem > 0; rem -= len) { if (rem < DTLS1_RT_HEADER_LENGTH) return -1; epoch = (rec[EPOCH_HI] << 8) | rec[EPOCH_LO]; if (epoch != ctx->epoch) { ctx->epoch = epoch; ctx->currrec = 0; } seq = ctx->currrec; offset = 0; do { rec[RECORD_SEQUENCE - offset] = seq & 0xFF; seq >>= 8; offset++; } while (seq > 0); len = ((rec[RECORD_LEN_HI] << 8) | rec[RECORD_LEN_LO]) + DTLS1_RT_HEADER_LENGTH; if (rem < (int)len) return -1; if (ctx->droprec == (int)ctx->currrec && ctx->dropepoch == epoch) { if (rem > (int)len) memmove(rec, rec + len, rem - len); outl -= len; ctx->droprec = -1; if (outl == 0) BIO_set_retry_read(bio); } else { rec += len; } ctx->currrec++; } } memcpy(out, thispkt->data, outl); mempacket_free(thispkt); return outl; }
static int bio_write(BIO *b, const char *buf, int len) { git_stream *io = (git_stream *) BIO_get_data(b); return (int) git_stream_write(io, buf, len, 0); }
static long md_ctrl(BIO *b, int cmd, long num, void *ptr) { EVP_MD_CTX *ctx, *dctx, **pctx; const EVP_MD **ppmd; EVP_MD *md; long ret = 1; BIO *dbio, *next; ctx = BIO_get_data(b); next = BIO_next(b); switch (cmd) { case BIO_CTRL_RESET: if (BIO_get_init(b)) ret = EVP_DigestInit_ex(ctx, ctx->digest, NULL); else ret = 0; if (ret > 0) ret = BIO_ctrl(next, cmd, num, ptr); break; case BIO_C_GET_MD: if (BIO_get_init(b)) { ppmd = ptr; *ppmd = ctx->digest; } else ret = 0; break; case BIO_C_GET_MD_CTX: pctx = ptr; *pctx = ctx; BIO_set_init(b, 1); break; case BIO_C_SET_MD_CTX: if (BIO_get_init(b)) BIO_set_data(b, ptr); else ret = 0; break; case BIO_C_DO_STATE_MACHINE: BIO_clear_retry_flags(b); ret = BIO_ctrl(next, cmd, num, ptr); BIO_copy_next_retry(b); break; case BIO_C_SET_MD: md = ptr; ret = EVP_DigestInit_ex(ctx, md, NULL); if (ret > 0) BIO_set_init(b, 1); break; case BIO_CTRL_DUP: dbio = ptr; dctx = BIO_get_data(dbio); if (!EVP_MD_CTX_copy_ex(dctx, ctx)) return 0; BIO_set_init(b, 1); break; default: ret = BIO_ctrl(next, cmd, num, ptr); break; } return ret; }
static int ok_write(BIO *b, const char *in, int inl) { int ret = 0, n, i; BIO_OK_CTX *ctx; BIO *next; if (inl <= 0) return inl; ctx = BIO_get_data(b); next = BIO_next(b); ret = inl; if ((ctx == NULL) || (next == NULL) || (BIO_get_init(b) == 0)) return 0; if (ctx->sigio && !sig_out(b)) return 0; do { BIO_clear_retry_flags(b); n = ctx->buf_len - ctx->buf_off; while (ctx->blockout && n > 0) { i = BIO_write(next, &(ctx->buf[ctx->buf_off]), n); if (i <= 0) { BIO_copy_next_retry(b); if (!BIO_should_retry(b)) ctx->cont = 0; return i; } ctx->buf_off += i; n -= i; } /* at this point all pending data has been written */ ctx->blockout = 0; if (ctx->buf_len == ctx->buf_off) { ctx->buf_len = OK_BLOCK_BLOCK; ctx->buf_off = 0; } if ((in == NULL) || (inl <= 0)) return 0; n = (inl + ctx->buf_len > OK_BLOCK_SIZE + OK_BLOCK_BLOCK) ? (int)(OK_BLOCK_SIZE + OK_BLOCK_BLOCK - ctx->buf_len) : inl; memcpy(&ctx->buf[ctx->buf_len], in, n); ctx->buf_len += n; inl -= n; in += n; if (ctx->buf_len >= OK_BLOCK_SIZE + OK_BLOCK_BLOCK) { if (!block_out(b)) { BIO_clear_retry_flags(b); return 0; } } } while (inl > 0); BIO_clear_retry_flags(b); BIO_copy_next_retry(b); return ret; }
static long asn1_bio_ctrl(BIO *b, int cmd, long arg1, void *arg2) { BIO_ASN1_BUF_CTX *ctx; BIO_ASN1_EX_FUNCS *ex_func; long ret = 1; BIO *next; ctx = BIO_get_data(b); if (ctx == NULL) return 0; next = BIO_next(b); switch (cmd) { case BIO_C_SET_PREFIX: ex_func = arg2; ctx->prefix = ex_func->ex_func; ctx->prefix_free = ex_func->ex_free_func; break; case BIO_C_GET_PREFIX: ex_func = arg2; ex_func->ex_func = ctx->prefix; ex_func->ex_free_func = ctx->prefix_free; break; case BIO_C_SET_SUFFIX: ex_func = arg2; ctx->suffix = ex_func->ex_func; ctx->suffix_free = ex_func->ex_free_func; break; case BIO_C_GET_SUFFIX: ex_func = arg2; ex_func->ex_func = ctx->suffix; ex_func->ex_free_func = ctx->suffix_free; break; case BIO_C_SET_EX_ARG: ctx->ex_arg = arg2; break; case BIO_C_GET_EX_ARG: *(void **)arg2 = ctx->ex_arg; break; case BIO_CTRL_FLUSH: if (next == NULL) return 0; /* Call post function if possible */ if (ctx->state == ASN1_STATE_HEADER) { if (!asn1_bio_setup_ex(b, ctx, ctx->suffix, ASN1_STATE_POST_COPY, ASN1_STATE_DONE)) return 0; } if (ctx->state == ASN1_STATE_POST_COPY) { ret = asn1_bio_flush_ex(b, ctx, ctx->suffix_free, ASN1_STATE_DONE); if (ret <= 0) return ret; } if (ctx->state == ASN1_STATE_DONE) return BIO_ctrl(next, cmd, arg1, arg2); else { BIO_clear_retry_flags(b); return 0; } default: if (next == NULL) return 0; return BIO_ctrl(next, cmd, arg1, arg2); } return ret; }
static long bio_zlib_ctrl(BIO *b, int cmd, long num, void *ptr) { BIO_ZLIB_CTX *ctx; int ret, *ip; int ibs, obs; BIO *next = BIO_next(b); if (next == NULL) return 0; ctx = BIO_get_data(b); switch (cmd) { case BIO_CTRL_RESET: ctx->ocount = 0; ctx->odone = 0; ret = 1; break; case BIO_CTRL_FLUSH: ret = bio_zlib_flush(b); if (ret > 0) ret = BIO_flush(next); break; case BIO_C_SET_BUFF_SIZE: ibs = -1; obs = -1; if (ptr != NULL) { ip = ptr; if (*ip == 0) ibs = (int)num; else obs = (int)num; } else { ibs = (int)num; obs = ibs; } if (ibs != -1) { OPENSSL_free(ctx->ibuf); ctx->ibuf = NULL; ctx->ibufsize = ibs; } if (obs != -1) { OPENSSL_free(ctx->obuf); ctx->obuf = NULL; ctx->obufsize = obs; } ret = 1; break; case BIO_C_DO_STATE_MACHINE: BIO_clear_retry_flags(b); ret = BIO_ctrl(next, cmd, num, ptr); BIO_copy_next_retry(b); break; default: ret = BIO_ctrl(next, cmd, num, ptr); break; } return ret; }
static long ssl_ctrl(BIO *b, int cmd, long num, void *ptr) { SSL **sslp, *ssl; BIO_SSL *bs, *dbs; BIO *dbio, *bio; long ret = 1; BIO *next; bs = BIO_get_data(b); next = BIO_next(b); ssl = bs->ssl; if ((ssl == NULL) && (cmd != BIO_C_SET_SSL)) return 0; switch (cmd) { case BIO_CTRL_RESET: SSL_shutdown(ssl); if (ssl->handshake_func == ssl->method->ssl_connect) SSL_set_connect_state(ssl); else if (ssl->handshake_func == ssl->method->ssl_accept) SSL_set_accept_state(ssl); if (!SSL_clear(ssl)) { ret = 0; break; } if (next != NULL) ret = BIO_ctrl(next, cmd, num, ptr); else if (ssl->rbio != NULL) ret = BIO_ctrl(ssl->rbio, cmd, num, ptr); else ret = 1; break; case BIO_CTRL_INFO: ret = 0; break; case BIO_C_SSL_MODE: if (num) /* client mode */ SSL_set_connect_state(ssl); else SSL_set_accept_state(ssl); break; case BIO_C_SET_SSL_RENEGOTIATE_TIMEOUT: ret = bs->renegotiate_timeout; if (num < 60) num = 5; bs->renegotiate_timeout = (unsigned long)num; bs->last_time = (unsigned long)time(NULL); break; case BIO_C_SET_SSL_RENEGOTIATE_BYTES: ret = bs->renegotiate_count; if ((long)num >= 512) bs->renegotiate_count = (unsigned long)num; break; case BIO_C_GET_SSL_NUM_RENEGOTIATES: ret = bs->num_renegotiates; break; case BIO_C_SET_SSL: if (ssl != NULL) { ssl_free(b); if (!ssl_new(b)) return 0; } BIO_set_shutdown(b, num); ssl = (SSL *)ptr; bs->ssl = ssl; bio = SSL_get_rbio(ssl); if (bio != NULL) { if (next != NULL) BIO_push(bio, next); BIO_set_next(b, bio); BIO_up_ref(bio); } BIO_set_init(b, 1); break; case BIO_C_GET_SSL: if (ptr != NULL) { sslp = (SSL **)ptr; *sslp = ssl; } else ret = 0; break; case BIO_CTRL_GET_CLOSE: ret = BIO_get_shutdown(b); break; case BIO_CTRL_SET_CLOSE: BIO_set_shutdown(b, (int)num); break; case BIO_CTRL_WPENDING: ret = BIO_ctrl(ssl->wbio, cmd, num, ptr); break; case BIO_CTRL_PENDING: ret = SSL_pending(ssl); if (ret == 0) ret = BIO_pending(ssl->rbio); break; case BIO_CTRL_FLUSH: BIO_clear_retry_flags(b); ret = BIO_ctrl(ssl->wbio, cmd, num, ptr); BIO_copy_next_retry(b); break; case BIO_CTRL_PUSH: if ((next != NULL) && (next != ssl->rbio)) { /* * We are going to pass ownership of next to the SSL object...but * we don't own a reference to pass yet - so up ref */ BIO_up_ref(next); SSL_set_bio(ssl, next, next); } break; case BIO_CTRL_POP: /* Only detach if we are the BIO explicitly being popped */ if (b == ptr) { /* This will clear the reference we obtained during push */ SSL_set_bio(ssl, NULL, NULL); } break; case BIO_C_DO_STATE_MACHINE: BIO_clear_retry_flags(b); BIO_set_retry_reason(b, 0); ret = (int)SSL_do_handshake(ssl); switch (SSL_get_error(ssl, (int)ret)) { case SSL_ERROR_WANT_READ: BIO_set_flags(b, BIO_FLAGS_READ | BIO_FLAGS_SHOULD_RETRY); break; case SSL_ERROR_WANT_WRITE: BIO_set_flags(b, BIO_FLAGS_WRITE | BIO_FLAGS_SHOULD_RETRY); break; case SSL_ERROR_WANT_CONNECT: BIO_set_flags(b, BIO_FLAGS_IO_SPECIAL | BIO_FLAGS_SHOULD_RETRY); BIO_set_retry_reason(b, BIO_get_retry_reason(next)); break; case SSL_ERROR_WANT_X509_LOOKUP: BIO_set_retry_special(b); BIO_set_retry_reason(b, BIO_RR_SSL_X509_LOOKUP); break; default: break; } break; case BIO_CTRL_DUP: dbio = (BIO *)ptr; dbs = BIO_get_data(dbio); SSL_free(dbs->ssl); dbs->ssl = SSL_dup(ssl); dbs->num_renegotiates = bs->num_renegotiates; dbs->renegotiate_count = bs->renegotiate_count; dbs->byte_count = bs->byte_count; dbs->renegotiate_timeout = bs->renegotiate_timeout; dbs->last_time = bs->last_time; ret = (dbs->ssl != NULL); break; case BIO_C_GET_FD: ret = BIO_ctrl(ssl->rbio, cmd, num, ptr); break; case BIO_CTRL_SET_CALLBACK: ret = 0; /* use callback ctrl */ break; case BIO_CTRL_GET_CALLBACK: { void (**fptr) (const SSL *xssl, int type, int val); fptr = (void (**)(const SSL *xssl, int type, int val))ptr; *fptr = SSL_get_info_callback(ssl); } break; default: ret = BIO_ctrl(ssl->rbio, cmd, num, ptr); break; } return ret; }
static long rdg_bio_ctrl(BIO* bio, int cmd, long arg1, void* arg2) { int status = 0; rdpRdg* rdg = (rdpRdg*) BIO_get_data(bio); rdpTls* tlsOut = rdg->tlsOut; rdpTls* tlsIn = rdg->tlsIn; if (cmd == BIO_CTRL_FLUSH) { (void)BIO_flush(tlsOut->bio); (void)BIO_flush(tlsIn->bio); status = 1; } else if (cmd == BIO_C_GET_EVENT) { if (arg2) { BIO_get_event(rdg->tlsOut->bio, arg2); status = 1; } } else if (cmd == BIO_C_SET_NONBLOCK) { status = 1; } else if (cmd == BIO_C_READ_BLOCKED) { BIO* bio = tlsOut->bio; status = BIO_read_blocked(bio); } else if (cmd == BIO_C_WRITE_BLOCKED) { BIO* bio = tlsIn->bio; status = BIO_write_blocked(bio); } else if (cmd == BIO_C_WAIT_READ) { int timeout = (int) arg1; BIO* bio = tlsOut->bio; if (BIO_read_blocked(bio)) return BIO_wait_read(bio, timeout); else if (BIO_write_blocked(bio)) return BIO_wait_write(bio, timeout); else status = 1; } else if (cmd == BIO_C_WAIT_WRITE) { int timeout = (int) arg1; BIO* bio = tlsIn->bio; if (BIO_write_blocked(bio)) status = BIO_wait_write(bio, timeout); else if (BIO_read_blocked(bio)) status = BIO_wait_read(bio, timeout); else status = 1; } return status; }
static long enc_ctrl(BIO *b, int cmd, long num, void *ptr) { BIO *dbio; BIO_ENC_CTX *ctx, *dctx; long ret = 1; int i; EVP_CIPHER_CTX **c_ctx; BIO *next; ctx = BIO_get_data(b); next = BIO_next(b); if (ctx == NULL) return 0; switch (cmd) { case BIO_CTRL_RESET: ctx->ok = 1; ctx->finished = 0; if (!EVP_CipherInit_ex(ctx->cipher, NULL, NULL, NULL, NULL, EVP_CIPHER_CTX_encrypting(ctx->cipher))) return 0; ret = BIO_ctrl(next, cmd, num, ptr); break; case BIO_CTRL_EOF: /* More to read */ if (ctx->cont <= 0) ret = 1; else ret = BIO_ctrl(next, cmd, num, ptr); break; case BIO_CTRL_WPENDING: ret = ctx->buf_len - ctx->buf_off; if (ret <= 0) ret = BIO_ctrl(next, cmd, num, ptr); break; case BIO_CTRL_PENDING: /* More to read in buffer */ ret = ctx->buf_len - ctx->buf_off; if (ret <= 0) ret = BIO_ctrl(next, cmd, num, ptr); break; case BIO_CTRL_FLUSH: /* do a final write */ again: while (ctx->buf_len != ctx->buf_off) { i = enc_write(b, NULL, 0); if (i < 0) return i; } if (!ctx->finished) { ctx->finished = 1; ctx->buf_off = 0; ret = EVP_CipherFinal_ex(ctx->cipher, (unsigned char *)ctx->buf, &(ctx->buf_len)); ctx->ok = (int)ret; if (ret <= 0) break; /* push out the bytes */ goto again; } /* Finally flush the underlying BIO */ ret = BIO_ctrl(next, cmd, num, ptr); break; case BIO_C_GET_CIPHER_STATUS: ret = (long)ctx->ok; break; case BIO_C_DO_STATE_MACHINE: BIO_clear_retry_flags(b); ret = BIO_ctrl(next, cmd, num, ptr); BIO_copy_next_retry(b); break; case BIO_C_GET_CIPHER_CTX: c_ctx = (EVP_CIPHER_CTX **)ptr; *c_ctx = ctx->cipher; BIO_set_init(b, 1); break; case BIO_CTRL_DUP: dbio = (BIO *)ptr; dctx = BIO_get_data(dbio); dctx->cipher = EVP_CIPHER_CTX_new(); if (dctx->cipher == NULL) return 0; ret = EVP_CIPHER_CTX_copy(dctx->cipher, ctx->cipher); if (ret) BIO_set_init(dbio, 1); break; default: ret = BIO_ctrl(next, cmd, num, ptr); break; } return (ret); }
static int ssl_write(BIO *b, const char *buf, size_t size, size_t *written) { int ret, r = 0; int retry_reason = 0; SSL *ssl; BIO_SSL *bs; if (buf == NULL) return 0; bs = BIO_get_data(b); ssl = bs->ssl; BIO_clear_retry_flags(b); ret = ssl_write_internal(ssl, buf, size, written); switch (SSL_get_error(ssl, ret)) { case SSL_ERROR_NONE: if (bs->renegotiate_count > 0) { bs->byte_count += *written; if (bs->byte_count > bs->renegotiate_count) { bs->byte_count = 0; bs->num_renegotiates++; SSL_renegotiate(ssl); r = 1; } } if ((bs->renegotiate_timeout > 0) && (!r)) { unsigned long tm; tm = (unsigned long)time(NULL); if (tm > bs->last_time + bs->renegotiate_timeout) { bs->last_time = tm; bs->num_renegotiates++; SSL_renegotiate(ssl); } } break; case SSL_ERROR_WANT_WRITE: BIO_set_retry_write(b); break; case SSL_ERROR_WANT_READ: BIO_set_retry_read(b); break; case SSL_ERROR_WANT_X509_LOOKUP: BIO_set_retry_special(b); retry_reason = BIO_RR_SSL_X509_LOOKUP; break; case SSL_ERROR_WANT_CONNECT: BIO_set_retry_special(b); retry_reason = BIO_RR_CONNECT; case SSL_ERROR_SYSCALL: case SSL_ERROR_SSL: default: break; } BIO_set_retry_reason(b, retry_reason); return ret; }
static int enc_read(BIO *b, char *out, int outl) { int ret = 0, i, blocksize; BIO_ENC_CTX *ctx; BIO *next; if (out == NULL) return (0); ctx = BIO_get_data(b); next = BIO_next(b); if ((ctx == NULL) || (next == NULL)) return 0; /* First check if there are bytes decoded/encoded */ if (ctx->buf_len > 0) { i = ctx->buf_len - ctx->buf_off; if (i > outl) i = outl; memcpy(out, &(ctx->buf[ctx->buf_off]), i); ret = i; out += i; outl -= i; ctx->buf_off += i; if (ctx->buf_len == ctx->buf_off) { ctx->buf_len = 0; ctx->buf_off = 0; } } blocksize = EVP_CIPHER_CTX_block_size(ctx->cipher); if (blocksize == 1) blocksize = 0; /* * At this point, we have room of outl bytes and an empty buffer, so we * should read in some more. */ while (outl > 0) { if (ctx->cont <= 0) break; if (ctx->read_start == ctx->read_end) { /* time to read more data */ ctx->read_end = ctx->read_start = &(ctx->buf[BUF_OFFSET]); ctx->read_end += BIO_read(next, ctx->read_start, ENC_BLOCK_SIZE); } i = ctx->read_end - ctx->read_start; if (i <= 0) { /* Should be continue next time we are called? */ if (!BIO_should_retry(next)) { ctx->cont = i; i = EVP_CipherFinal_ex(ctx->cipher, ctx->buf, &(ctx->buf_len)); ctx->ok = i; ctx->buf_off = 0; } else { ret = (ret == 0) ? i : ret; break; } } else { if (outl > ENC_MIN_CHUNK) { /* * Depending on flags block cipher decrypt can write * one extra block and then back off, i.e. output buffer * has to accommodate extra block... */ int j = outl - blocksize, buf_len; if (!EVP_CipherUpdate(ctx->cipher, (unsigned char *)out, &buf_len, ctx->read_start, i > j ? j : i)) { BIO_clear_retry_flags(b); return 0; } ret += buf_len; out += buf_len; outl -= buf_len; if ((i -= j) <= 0) { ctx->read_start = ctx->read_end; continue; } ctx->read_start += j; } if (i > ENC_MIN_CHUNK) i = ENC_MIN_CHUNK; if (!EVP_CipherUpdate(ctx->cipher, ctx->buf, &ctx->buf_len, ctx->read_start, i)) { BIO_clear_retry_flags(b); ctx->ok = 0; return 0; } ctx->read_start += i; ctx->cont = 1; /* * Note: it is possible for EVP_CipherUpdate to decrypt zero * bytes because this is or looks like the final block: if this * happens we should retry and either read more data or decrypt * the final block */ if (ctx->buf_len == 0) continue; } if (ctx->buf_len <= outl) i = ctx->buf_len; else i = outl; if (i <= 0) break; memcpy(out, ctx->buf, i); ret += i; ctx->buf_off = i; outl -= i; out += i; } BIO_clear_retry_flags(b); BIO_copy_next_retry(b); return ((ret == 0) ? ctx->cont : ret); }
static int dill_tls_cbio_read(BIO *bio, char *buf, int len) { struct dill_tls_sock *self = (struct dill_tls_sock*)BIO_get_data(bio); int rc = dill_brecv(self->u, buf, len, self->deadline); if(dill_slow(rc < 0)) return -1; return len; }
int mempacket_test_inject(BIO *bio, const char *in, int inl, int pktnum, int type) { MEMPACKET_TEST_CTX *ctx = BIO_get_data(bio); MEMPACKET *thispkt = NULL, *looppkt, *nextpkt, *allpkts[3]; int i, duprec = ctx->duprec > 0; const unsigned char *inu = (const unsigned char *)in; size_t len = ((inu[RECORD_LEN_HI] << 8) | inu[RECORD_LEN_LO]) + DTLS1_RT_HEADER_LENGTH; if (ctx == NULL) return -1; if ((size_t)inl < len) return -1; if ((size_t)inl == len) duprec = 0; /* We don't support arbitrary injection when duplicating records */ if (duprec && pktnum != -1) return -1; /* We only allow injection before we've started writing any data */ if (pktnum >= 0) { if (ctx->noinject) return -1; ctx->injected = 1; } else { ctx->noinject = 1; } for (i = 0; i < (duprec ? 3 : 1); i++) { if (!TEST_ptr(allpkts[i] = OPENSSL_malloc(sizeof(*thispkt)))) goto err; thispkt = allpkts[i]; if (!TEST_ptr(thispkt->data = OPENSSL_malloc(inl))) goto err; /* * If we are duplicating the packet, we duplicate it three times. The * first two times we drop the first record if there are more than one. * In this way we know that libssl will not be able to make progress * until it receives the last packet, and hence will be forced to * buffer these records. */ if (duprec && i != 2) { memcpy(thispkt->data, in + len, inl - len); thispkt->len = inl - len; } else { memcpy(thispkt->data, in, inl); thispkt->len = inl; } thispkt->num = (pktnum >= 0) ? (unsigned int)pktnum : ctx->lastpkt + i; thispkt->type = type; } for(i = 0; (looppkt = sk_MEMPACKET_value(ctx->pkts, i)) != NULL; i++) { /* Check if we found the right place to insert this packet */ if (looppkt->num > thispkt->num) { if (sk_MEMPACKET_insert(ctx->pkts, thispkt, i) == 0) goto err; /* If we're doing up front injection then we're done */ if (pktnum >= 0) return inl; /* * We need to do some accounting on lastpkt. We increment it first, * but it might now equal the value of injected packets, so we need * to skip over those */ ctx->lastpkt++; do { i++; nextpkt = sk_MEMPACKET_value(ctx->pkts, i); if (nextpkt != NULL && nextpkt->num == ctx->lastpkt) ctx->lastpkt++; else return inl; } while(1); } else if (looppkt->num == thispkt->num) { if (!ctx->noinject) { /* We injected two packets with the same packet number! */ goto err; } ctx->lastpkt++; thispkt->num++; } } /* * We didn't find any packets with a packet number equal to or greater than * this one, so we just add it onto the end */ for (i = 0; i < (duprec ? 3 : 1); i++) { thispkt = allpkts[i]; if (!sk_MEMPACKET_push(ctx->pkts, thispkt)) goto err; if (pktnum < 0) ctx->lastpkt++; } return inl; err: for (i = 0; i < (ctx->duprec > 0 ? 3 : 1); i++) mempacket_free(allpkts[i]); return -1; }
static long transport_bio_simple_ctrl(BIO* bio, int cmd, long arg1, void* arg2) { int status = -1; WINPR_BIO_SIMPLE_SOCKET* ptr = (WINPR_BIO_SIMPLE_SOCKET*) BIO_get_data(bio); if (cmd == BIO_C_SET_SOCKET) { transport_bio_simple_uninit(bio); transport_bio_simple_init(bio, (SOCKET) arg2, (int) arg1); return 1; } else if (cmd == BIO_C_GET_SOCKET) { if (!BIO_get_init(bio) || !arg2) return 0; *((ULONG_PTR*) arg2) = (ULONG_PTR) ptr->socket; return 1; } else if (cmd == BIO_C_GET_EVENT) { if (!BIO_get_init(bio) || !arg2) return 0; *((ULONG_PTR*) arg2) = (ULONG_PTR) ptr->hEvent; return 1; } else if (cmd == BIO_C_SET_NONBLOCK) { #ifndef _WIN32 int flags; flags = fcntl((int) ptr->socket, F_GETFL); if (flags == -1) return 0; if (arg1) fcntl((int) ptr->socket, F_SETFL, flags | O_NONBLOCK); else fcntl((int) ptr->socket, F_SETFL, flags & ~(O_NONBLOCK)); #else /* the internal socket is always non-blocking */ #endif return 1; } else if (cmd == BIO_C_WAIT_READ) { int timeout = (int) arg1; int sockfd = (int) ptr->socket; #ifdef HAVE_POLL_H struct pollfd pollset; pollset.fd = sockfd; pollset.events = POLLIN; pollset.revents = 0; do { status = poll(&pollset, 1, timeout); } while ((status < 0) && (errno == EINTR)); #else fd_set rset; struct timeval tv; FD_ZERO(&rset); FD_SET(sockfd, &rset); if (timeout) { tv.tv_sec = timeout / 1000; tv.tv_usec = (timeout % 1000) * 1000; } do { status = select(sockfd + 1, &rset, NULL, NULL, timeout ? &tv : NULL); } while ((status < 0) && (errno == EINTR)); #endif } else if (cmd == BIO_C_WAIT_WRITE) { int timeout = (int) arg1; int sockfd = (int) ptr->socket; #ifdef HAVE_POLL_H struct pollfd pollset; pollset.fd = sockfd; pollset.events = POLLOUT; pollset.revents = 0; do { status = poll(&pollset, 1, timeout); } while ((status < 0) && (errno == EINTR)); #else fd_set rset; struct timeval tv; FD_ZERO(&rset); FD_SET(sockfd, &rset); if (timeout) { tv.tv_sec = timeout / 1000; tv.tv_usec = (timeout % 1000) * 1000; } do { status = select(sockfd + 1, NULL, &rset, NULL, timeout ? &tv : NULL); } while ((status < 0) && (errno == EINTR)); #endif } switch (cmd) { case BIO_C_SET_FD: if (arg2) { transport_bio_simple_uninit(bio); transport_bio_simple_init(bio, (SOCKET) *((int*) arg2), (int) arg1); status = 1; } break; case BIO_C_GET_FD: if (BIO_get_init(bio)) { if (arg2) *((int*) arg2) = (int) ptr->socket; status = (int) ptr->socket; } break; case BIO_CTRL_GET_CLOSE: status = BIO_get_shutdown(bio); break; case BIO_CTRL_SET_CLOSE: BIO_set_shutdown(bio, (int) arg1); status = 1; break; case BIO_CTRL_DUP: status = 1; break; case BIO_CTRL_FLUSH: status = 1; break; default: status = 0; break; } return status; }
static int ssl_read(BIO *b, char *buf, size_t size, size_t *readbytes) { int ret = 1; BIO_SSL *sb; SSL *ssl; int retry_reason = 0; int r = 0; if (buf == NULL) return 0; sb = BIO_get_data(b); ssl = sb->ssl; BIO_clear_retry_flags(b); ret = ssl_read_internal(ssl, buf, size, readbytes); switch (SSL_get_error(ssl, ret)) { case SSL_ERROR_NONE: if (sb->renegotiate_count > 0) { sb->byte_count += *readbytes; if (sb->byte_count > sb->renegotiate_count) { sb->byte_count = 0; sb->num_renegotiates++; SSL_renegotiate(ssl); r = 1; } } if ((sb->renegotiate_timeout > 0) && (!r)) { unsigned long tm; tm = (unsigned long)time(NULL); if (tm > sb->last_time + sb->renegotiate_timeout) { sb->last_time = tm; sb->num_renegotiates++; SSL_renegotiate(ssl); } } break; case SSL_ERROR_WANT_READ: BIO_set_retry_read(b); break; case SSL_ERROR_WANT_WRITE: BIO_set_retry_write(b); break; case SSL_ERROR_WANT_X509_LOOKUP: BIO_set_retry_special(b); retry_reason = BIO_RR_SSL_X509_LOOKUP; break; case SSL_ERROR_WANT_ACCEPT: BIO_set_retry_special(b); retry_reason = BIO_RR_ACCEPT; break; case SSL_ERROR_WANT_CONNECT: BIO_set_retry_special(b); retry_reason = BIO_RR_CONNECT; break; case SSL_ERROR_SYSCALL: case SSL_ERROR_SSL: case SSL_ERROR_ZERO_RETURN: default: break; } BIO_set_retry_reason(b, retry_reason); return ret; }
static int transport_bio_buffered_write(BIO* bio, const char* buf, int num) { int i, ret; int status; int nchunks; int committedBytes; DataChunk chunks[2]; WINPR_BIO_BUFFERED_SOCKET* ptr = (WINPR_BIO_BUFFERED_SOCKET*) BIO_get_data(bio); BIO* next_bio = NULL; ret = num; ptr->writeBlocked = FALSE; BIO_clear_flags(bio, BIO_FLAGS_WRITE); /* we directly append extra bytes in the xmit buffer, this could be prevented * but for now it makes the code more simple. */ if (buf && num && !ringbuffer_write(&ptr->xmitBuffer, (const BYTE*) buf, num)) { WLog_ERR(TAG, "an error occurred when writing (num: %d)", num); return -1; } committedBytes = 0; nchunks = ringbuffer_peek(&ptr->xmitBuffer, chunks, ringbuffer_used(&ptr->xmitBuffer)); next_bio = BIO_next(bio); for (i = 0; i < nchunks; i++) { while (chunks[i].size) { status = BIO_write(next_bio, chunks[i].data, chunks[i].size); if (status <= 0) { if (!BIO_should_retry(next_bio)) { BIO_clear_flags(bio, BIO_FLAGS_SHOULD_RETRY); ret = -1; /* fatal error */ goto out; } if (BIO_should_write(next_bio)) { BIO_set_flags(bio, BIO_FLAGS_WRITE); ptr->writeBlocked = TRUE; goto out; /* EWOULDBLOCK */ } } committedBytes += status; chunks[i].size -= status; chunks[i].data += status; } } out: ringbuffer_commit_read_bytes(&ptr->xmitBuffer, committedBytes); return ret; }
int mempacket_test_inject(BIO *bio, const char *in, int inl, int pktnum, int type) { MEMPACKET_TEST_CTX *ctx = BIO_get_data(bio); MEMPACKET *thispkt, *looppkt, *nextpkt; int i; if (ctx == NULL) return -1; /* We only allow injection before we've started writing any data */ if (pktnum >= 0) { if (ctx->noinject) return -1; } else { ctx->noinject = 1; } if (!TEST_ptr(thispkt = OPENSSL_malloc(sizeof(*thispkt)))) return -1; if (!TEST_ptr(thispkt->data = OPENSSL_malloc(inl))) { mempacket_free(thispkt); return -1; } memcpy(thispkt->data, in, inl); thispkt->len = inl; thispkt->num = (pktnum >= 0) ? (unsigned int)pktnum : ctx->lastpkt; thispkt->type = type; for(i = 0; (looppkt = sk_MEMPACKET_value(ctx->pkts, i)) != NULL; i++) { /* Check if we found the right place to insert this packet */ if (looppkt->num > thispkt->num) { if (sk_MEMPACKET_insert(ctx->pkts, thispkt, i) == 0) { mempacket_free(thispkt); return -1; } /* If we're doing up front injection then we're done */ if (pktnum >= 0) return inl; /* * We need to do some accounting on lastpkt. We increment it first, * but it might now equal the value of injected packets, so we need * to skip over those */ ctx->lastpkt++; do { i++; nextpkt = sk_MEMPACKET_value(ctx->pkts, i); if (nextpkt != NULL && nextpkt->num == ctx->lastpkt) ctx->lastpkt++; else return inl; } while(1); } else if (looppkt->num == thispkt->num) { if (!ctx->noinject) { /* We injected two packets with the same packet number! */ return -1; } ctx->lastpkt++; thispkt->num++; } } /* * We didn't find any packets with a packet number equal to or greater than * this one, so we just add it onto the end */ if (!sk_MEMPACKET_push(ctx->pkts, thispkt)) { mempacket_free(thispkt); return -1; } if (pktnum < 0) ctx->lastpkt++; return inl; }
static int asn1_bio_write(BIO *b, const char *in, int inl) { BIO_ASN1_BUF_CTX *ctx; int wrmax, wrlen, ret; unsigned char *p; BIO *next; ctx = BIO_get_data(b); next = BIO_next(b); if (in == NULL || inl < 0 || ctx == NULL || next == NULL) return 0; wrlen = 0; ret = -1; for (;;) { switch (ctx->state) { /* Setup prefix data, call it */ case ASN1_STATE_START: if (!asn1_bio_setup_ex(b, ctx, ctx->prefix, ASN1_STATE_PRE_COPY, ASN1_STATE_HEADER)) return 0; break; /* Copy any pre data first */ case ASN1_STATE_PRE_COPY: ret = asn1_bio_flush_ex(b, ctx, ctx->prefix_free, ASN1_STATE_HEADER); if (ret <= 0) goto done; break; case ASN1_STATE_HEADER: ctx->buflen = ASN1_object_size(0, inl, ctx->asn1_tag) - inl; if (!ossl_assert(ctx->buflen <= ctx->bufsize)) return 0; p = ctx->buf; ASN1_put_object(&p, 0, inl, ctx->asn1_tag, ctx->asn1_class); ctx->copylen = inl; ctx->state = ASN1_STATE_HEADER_COPY; break; case ASN1_STATE_HEADER_COPY: ret = BIO_write(next, ctx->buf + ctx->bufpos, ctx->buflen); if (ret <= 0) goto done; ctx->buflen -= ret; if (ctx->buflen) ctx->bufpos += ret; else { ctx->bufpos = 0; ctx->state = ASN1_STATE_DATA_COPY; } break; case ASN1_STATE_DATA_COPY: if (inl > ctx->copylen) wrmax = ctx->copylen; else wrmax = inl; ret = BIO_write(next, in, wrmax); if (ret <= 0) goto done; wrlen += ret; ctx->copylen -= ret; in += ret; inl -= ret; if (ctx->copylen == 0) ctx->state = ASN1_STATE_HEADER; if (inl == 0) goto done; break; case ASN1_STATE_POST_COPY: case ASN1_STATE_DONE: BIO_clear_retry_flags(b); return 0; } } done: BIO_clear_retry_flags(b); BIO_copy_next_retry(b); return (wrlen > 0) ? wrlen : ret; }
static int b64_read(BIO *b, char *out, int outl) { int ret = 0, i, ii, j, k, x, n, num, ret_code = 0; BIO_B64_CTX *ctx; unsigned char *p, *q; BIO *next; if (out == NULL) return 0; ctx = (BIO_B64_CTX *)BIO_get_data(b); next = BIO_next(b); if ((ctx == NULL) || (next == NULL)) return 0; BIO_clear_retry_flags(b); if (ctx->encode != B64_DECODE) { ctx->encode = B64_DECODE; ctx->buf_len = 0; ctx->buf_off = 0; ctx->tmp_len = 0; EVP_DecodeInit(ctx->base64); } /* First check if there are bytes decoded/encoded */ if (ctx->buf_len > 0) { OPENSSL_assert(ctx->buf_len >= ctx->buf_off); i = ctx->buf_len - ctx->buf_off; if (i > outl) i = outl; OPENSSL_assert(ctx->buf_off + i < (int)sizeof(ctx->buf)); memcpy(out, &(ctx->buf[ctx->buf_off]), i); ret = i; out += i; outl -= i; ctx->buf_off += i; if (ctx->buf_len == ctx->buf_off) { ctx->buf_len = 0; ctx->buf_off = 0; } } /* * At this point, we have room of outl bytes and an empty buffer, so we * should read in some more. */ ret_code = 0; while (outl > 0) { if (ctx->cont <= 0) break; i = BIO_read(next, &(ctx->tmp[ctx->tmp_len]), B64_BLOCK_SIZE - ctx->tmp_len); if (i <= 0) { ret_code = i; /* Should we continue next time we are called? */ if (!BIO_should_retry(next)) { ctx->cont = i; /* If buffer empty break */ if (ctx->tmp_len == 0) break; /* Fall through and process what we have */ else i = 0; } /* else we retry and add more data to buffer */ else break; } i += ctx->tmp_len; ctx->tmp_len = i; /* * We need to scan, a line at a time until we have a valid line if we * are starting. */ if (ctx->start && (BIO_get_flags(b) & BIO_FLAGS_BASE64_NO_NL)) { /* ctx->start=1; */ ctx->tmp_len = 0; } else if (ctx->start) { q = p = (unsigned char *)ctx->tmp; num = 0; for (j = 0; j < i; j++) { if (*(q++) != '\n') continue; /* * due to a previous very long line, we need to keep on * scanning for a '\n' before we even start looking for * base64 encoded stuff. */ if (ctx->tmp_nl) { p = q; ctx->tmp_nl = 0; continue; } k = EVP_DecodeUpdate(ctx->base64, (unsigned char *)ctx->buf, &num, p, q - p); if ((k <= 0) && (num == 0) && (ctx->start)) EVP_DecodeInit(ctx->base64); else { if (p != (unsigned char *) &(ctx->tmp[0])) { i -= (p - (unsigned char *) &(ctx->tmp[0])); for (x = 0; x < i; x++) ctx->tmp[x] = p[x]; } EVP_DecodeInit(ctx->base64); ctx->start = 0; break; } p = q; } /* we fell off the end without starting */ if ((j == i) && (num == 0)) { /* * Is this is one long chunk?, if so, keep on reading until a * new line. */ if (p == (unsigned char *)&(ctx->tmp[0])) { /* Check buffer full */ if (i == B64_BLOCK_SIZE) { ctx->tmp_nl = 1; ctx->tmp_len = 0; } } else if (p != q) { /* finished on a '\n' */ n = q - p; for (ii = 0; ii < n; ii++) ctx->tmp[ii] = p[ii]; ctx->tmp_len = n; } /* else finished on a '\n' */ continue; } else { ctx->tmp_len = 0; } } else if ((i < B64_BLOCK_SIZE) && (ctx->cont > 0)) { /* * If buffer isn't full and we can retry then restart to read in * more data. */ continue; } if (BIO_get_flags(b) & BIO_FLAGS_BASE64_NO_NL) { int z, jj; jj = i & ~3; /* process per 4 */ z = EVP_DecodeBlock((unsigned char *)ctx->buf, (unsigned char *)ctx->tmp, jj); if (jj > 2) { if (ctx->tmp[jj - 1] == '=') { z--; if (ctx->tmp[jj - 2] == '=') z--; } } /* * z is now number of output bytes and jj is the number consumed */ if (jj != i) { memmove(ctx->tmp, &ctx->tmp[jj], i - jj); ctx->tmp_len = i - jj; } ctx->buf_len = 0; if (z > 0) { ctx->buf_len = z; } i = z; } else { i = EVP_DecodeUpdate(ctx->base64, (unsigned char *)ctx->buf, &ctx->buf_len, (unsigned char *)ctx->tmp, i); ctx->tmp_len = 0; } /* * If eof or an error was signalled, then the condition * 'ctx->cont <= 0' will prevent b64_read() from reading * more data on subsequent calls. This assignment was * deleted accidentally in commit 5562cfaca4f3. */ ctx->cont = i; ctx->buf_off = 0; if (i < 0) { ret_code = 0; ctx->buf_len = 0; break; } if (ctx->buf_len <= outl) i = ctx->buf_len; else i = outl; memcpy(out, ctx->buf, i); ret += i; ctx->buf_off = i; if (ctx->buf_off == ctx->buf_len) { ctx->buf_len = 0; ctx->buf_off = 0; } outl -= i; out += i; } /* BIO_clear_retry_flags(b); */ BIO_copy_next_retry(b); return ((ret == 0) ? ret_code : ret); }
static int bio_zlib_write(BIO *b, const char *in, int inl) { BIO_ZLIB_CTX *ctx; int ret; z_stream *zout; BIO *next = BIO_next(b); if (!in || !inl) return 0; ctx = BIO_get_data(b); if (ctx->odone) return 0; zout = &ctx->zout; BIO_clear_retry_flags(b); if (!ctx->obuf) { ctx->obuf = OPENSSL_malloc(ctx->obufsize); /* Need error here */ if (ctx->obuf == NULL) { COMPerr(COMP_F_BIO_ZLIB_WRITE, ERR_R_MALLOC_FAILURE); return 0; } ctx->optr = ctx->obuf; ctx->ocount = 0; deflateInit(zout, ctx->comp_level); zout->next_out = ctx->obuf; zout->avail_out = ctx->obufsize; } /* Obtain input data directly from supplied buffer */ zout->next_in = (void *)in; zout->avail_in = inl; for (;;) { /* If data in output buffer write it first */ while (ctx->ocount) { ret = BIO_write(next, ctx->optr, ctx->ocount); if (ret <= 0) { /* Total data written */ int tot = inl - zout->avail_in; BIO_copy_next_retry(b); if (ret < 0) return (tot > 0) ? tot : ret; return tot; } ctx->optr += ret; ctx->ocount -= ret; } /* Have we consumed all supplied data? */ if (!zout->avail_in) return inl; /* Compress some more */ /* Reset buffer */ ctx->optr = ctx->obuf; zout->next_out = ctx->obuf; zout->avail_out = ctx->obufsize; /* Compress some more */ ret = deflate(zout, 0); if (ret != Z_OK) { COMPerr(COMP_F_BIO_ZLIB_WRITE, COMP_R_ZLIB_DEFLATE_ERROR); ERR_add_error_data(2, "zlib error:", zError(ret)); return 0; } ctx->ocount = ctx->obufsize - zout->avail_out; } }
static int b64_write(BIO *b, const char *in, int inl) { int ret = 0; int n; int i; BIO_B64_CTX *ctx; BIO *next; ctx = (BIO_B64_CTX *)BIO_get_data(b); next = BIO_next(b); if ((ctx == NULL) || (next == NULL)) return 0; BIO_clear_retry_flags(b); if (ctx->encode != B64_ENCODE) { ctx->encode = B64_ENCODE; ctx->buf_len = 0; ctx->buf_off = 0; ctx->tmp_len = 0; EVP_EncodeInit(ctx->base64); } OPENSSL_assert(ctx->buf_off < (int)sizeof(ctx->buf)); OPENSSL_assert(ctx->buf_len <= (int)sizeof(ctx->buf)); OPENSSL_assert(ctx->buf_len >= ctx->buf_off); n = ctx->buf_len - ctx->buf_off; while (n > 0) { i = BIO_write(next, &(ctx->buf[ctx->buf_off]), n); if (i <= 0) { BIO_copy_next_retry(b); return i; } OPENSSL_assert(i <= n); ctx->buf_off += i; OPENSSL_assert(ctx->buf_off <= (int)sizeof(ctx->buf)); OPENSSL_assert(ctx->buf_len >= ctx->buf_off); n -= i; } /* at this point all pending data has been written */ ctx->buf_off = 0; ctx->buf_len = 0; if ((in == NULL) || (inl <= 0)) return 0; while (inl > 0) { n = (inl > B64_BLOCK_SIZE) ? B64_BLOCK_SIZE : inl; if (BIO_get_flags(b) & BIO_FLAGS_BASE64_NO_NL) { if (ctx->tmp_len > 0) { OPENSSL_assert(ctx->tmp_len <= 3); n = 3 - ctx->tmp_len; /* * There's a theoretical possibility for this */ if (n > inl) n = inl; memcpy(&(ctx->tmp[ctx->tmp_len]), in, n); ctx->tmp_len += n; ret += n; if (ctx->tmp_len < 3) break; ctx->buf_len = EVP_EncodeBlock((unsigned char *)ctx->buf, (unsigned char *)ctx->tmp, ctx->tmp_len); OPENSSL_assert(ctx->buf_len <= (int)sizeof(ctx->buf)); OPENSSL_assert(ctx->buf_len >= ctx->buf_off); /* * Since we're now done using the temporary buffer, the * length should be 0'd */ ctx->tmp_len = 0; } else { if (n < 3) { memcpy(ctx->tmp, in, n); ctx->tmp_len = n; ret += n; break; } n -= n % 3; ctx->buf_len = EVP_EncodeBlock((unsigned char *)ctx->buf, (const unsigned char *)in, n); OPENSSL_assert(ctx->buf_len <= (int)sizeof(ctx->buf)); OPENSSL_assert(ctx->buf_len >= ctx->buf_off); ret += n; } } else { if (!EVP_EncodeUpdate(ctx->base64, (unsigned char *)ctx->buf, &ctx->buf_len, (unsigned char *)in, n)) return ((ret == 0) ? -1 : ret); OPENSSL_assert(ctx->buf_len <= (int)sizeof(ctx->buf)); OPENSSL_assert(ctx->buf_len >= ctx->buf_off); ret += n; } inl -= n; in += n; ctx->buf_off = 0; n = ctx->buf_len; while (n > 0) { i = BIO_write(next, &(ctx->buf[ctx->buf_off]), n); if (i <= 0) { BIO_copy_next_retry(b); return ((ret == 0) ? i : ret); } OPENSSL_assert(i <= n); n -= i; ctx->buf_off += i; OPENSSL_assert(ctx->buf_off <= (int)sizeof(ctx->buf)); OPENSSL_assert(ctx->buf_len >= ctx->buf_off); } ctx->buf_len = 0; ctx->buf_off = 0; } return ret; }
static long rdg_bio_ctrl(BIO* bio, int cmd, long arg1, void* arg2) { int status = -1; rdpRdg* rdg = (rdpRdg*) BIO_get_data(bio); rdpTls* tlsOut = rdg->tlsOut; rdpTls* tlsIn = rdg->tlsIn; if (cmd == BIO_CTRL_FLUSH) { (void)BIO_flush(tlsOut->bio); (void)BIO_flush(tlsIn->bio); status = 1; } else if (cmd == BIO_C_SET_NONBLOCK) { status = 1; } else if (cmd == BIO_C_READ_BLOCKED) { BIO* bio = tlsOut->bio; status = BIO_read_blocked(bio); } else if (cmd == BIO_C_WRITE_BLOCKED) { BIO* bio = tlsIn->bio; status = BIO_write_blocked(bio); } else if (cmd == BIO_C_WAIT_READ) { int timeout = (int) arg1; BIO* bio = tlsOut->bio; if (BIO_read_blocked(bio)) return BIO_wait_read(bio, timeout); else if (BIO_write_blocked(bio)) return BIO_wait_write(bio, timeout); else status = 1; } else if (cmd == BIO_C_WAIT_WRITE) { int timeout = (int) arg1; BIO* bio = tlsIn->bio; if (BIO_write_blocked(bio)) status = BIO_wait_write(bio, timeout); else if (BIO_read_blocked(bio)) status = BIO_wait_read(bio, timeout); else status = 1; } else if (cmd == BIO_C_GET_EVENT || cmd == BIO_C_GET_FD) { /* * A note about BIO_C_GET_FD: * Even if two FDs are part of RDG, only one FD can be returned here. * * In FreeRDP, BIO FDs are only used for polling, so it is safe to use the outgoing FD only * * See issue #3602 */ status = BIO_ctrl(tlsOut->bio, cmd, arg1, arg2); } return status; }
static long b64_ctrl(BIO *b, int cmd, long num, void *ptr) { BIO_B64_CTX *ctx; long ret = 1; int i; BIO *next; ctx = (BIO_B64_CTX *)BIO_get_data(b); next = BIO_next(b); if ((ctx == NULL) || (next == NULL)) return 0; switch (cmd) { case BIO_CTRL_RESET: ctx->cont = 1; ctx->start = 1; ctx->encode = B64_NONE; ret = BIO_ctrl(next, cmd, num, ptr); break; case BIO_CTRL_EOF: /* More to read */ if (ctx->cont <= 0) ret = 1; else ret = BIO_ctrl(next, cmd, num, ptr); break; case BIO_CTRL_WPENDING: /* More to write in buffer */ OPENSSL_assert(ctx->buf_len >= ctx->buf_off); ret = ctx->buf_len - ctx->buf_off; if ((ret == 0) && (ctx->encode != B64_NONE) && (EVP_ENCODE_CTX_num(ctx->base64) != 0)) ret = 1; else if (ret <= 0) ret = BIO_ctrl(next, cmd, num, ptr); break; case BIO_CTRL_PENDING: /* More to read in buffer */ OPENSSL_assert(ctx->buf_len >= ctx->buf_off); ret = ctx->buf_len - ctx->buf_off; if (ret <= 0) ret = BIO_ctrl(next, cmd, num, ptr); break; case BIO_CTRL_FLUSH: /* do a final write */ again: while (ctx->buf_len != ctx->buf_off) { i = b64_write(b, NULL, 0); if (i < 0) return i; } if (BIO_get_flags(b) & BIO_FLAGS_BASE64_NO_NL) { if (ctx->tmp_len != 0) { ctx->buf_len = EVP_EncodeBlock((unsigned char *)ctx->buf, (unsigned char *)ctx->tmp, ctx->tmp_len); ctx->buf_off = 0; ctx->tmp_len = 0; goto again; } } else if (ctx->encode != B64_NONE && EVP_ENCODE_CTX_num(ctx->base64) != 0) { ctx->buf_off = 0; EVP_EncodeFinal(ctx->base64, (unsigned char *)ctx->buf, &(ctx->buf_len)); /* push out the bytes */ goto again; } /* Finally flush the underlying BIO */ ret = BIO_ctrl(next, cmd, num, ptr); break; case BIO_C_DO_STATE_MACHINE: BIO_clear_retry_flags(b); ret = BIO_ctrl(next, cmd, num, ptr); BIO_copy_next_retry(b); break; case BIO_CTRL_DUP: break; case BIO_CTRL_INFO: case BIO_CTRL_GET: case BIO_CTRL_SET: default: ret = BIO_ctrl(next, cmd, num, ptr); break; } return ret; }
static int ok_read(BIO *b, char *out, int outl) { int ret = 0, i, n; BIO_OK_CTX *ctx; BIO *next; if (out == NULL) return 0; ctx = BIO_get_data(b); next = BIO_next(b); if ((ctx == NULL) || (next == NULL) || (BIO_get_init(b) == 0)) return 0; while (outl > 0) { /* copy clean bytes to output buffer */ if (ctx->blockout) { i = ctx->buf_len - ctx->buf_off; if (i > outl) i = outl; memcpy(out, &(ctx->buf[ctx->buf_off]), i); ret += i; out += i; outl -= i; ctx->buf_off += i; /* all clean bytes are out */ if (ctx->buf_len == ctx->buf_off) { ctx->buf_off = 0; /* * copy start of the next block into proper place */ if (ctx->buf_len_save - ctx->buf_off_save > 0) { ctx->buf_len = ctx->buf_len_save - ctx->buf_off_save; memmove(ctx->buf, &(ctx->buf[ctx->buf_off_save]), ctx->buf_len); } else { ctx->buf_len = 0; } ctx->blockout = 0; } } /* output buffer full -- cancel */ if (outl == 0) break; /* no clean bytes in buffer -- fill it */ n = IOBS - ctx->buf_len; i = BIO_read(next, &(ctx->buf[ctx->buf_len]), n); if (i <= 0) break; /* nothing new */ ctx->buf_len += i; /* no signature yet -- check if we got one */ if (ctx->sigio == 1) { if (!sig_in(b)) { BIO_clear_retry_flags(b); return 0; } } /* signature ok -- check if we got block */ if (ctx->sigio == 0) { if (!block_in(b)) { BIO_clear_retry_flags(b); return 0; } } /* invalid block -- cancel */ if (ctx->cont <= 0) break; } BIO_clear_retry_flags(b); BIO_copy_next_retry(b); return ret; }
static int bio_read(BIO *b, char *buf, int len) { git_stream *io = (git_stream *) BIO_get_data(b); return (int) git_stream_read(io, buf, len); }
static long ok_ctrl(BIO *b, int cmd, long num, void *ptr) { BIO_OK_CTX *ctx; EVP_MD *md; const EVP_MD **ppmd; long ret = 1; int i; BIO *next; ctx = BIO_get_data(b); next = BIO_next(b); switch (cmd) { case BIO_CTRL_RESET: ctx->buf_len = 0; ctx->buf_off = 0; ctx->buf_len_save = 0; ctx->buf_off_save = 0; ctx->cont = 1; ctx->finished = 0; ctx->blockout = 0; ctx->sigio = 1; ret = BIO_ctrl(next, cmd, num, ptr); break; case BIO_CTRL_EOF: /* More to read */ if (ctx->cont <= 0) ret = 1; else ret = BIO_ctrl(next, cmd, num, ptr); break; case BIO_CTRL_PENDING: /* More to read in buffer */ case BIO_CTRL_WPENDING: /* More to read in buffer */ ret = ctx->blockout ? ctx->buf_len - ctx->buf_off : 0; if (ret <= 0) ret = BIO_ctrl(next, cmd, num, ptr); break; case BIO_CTRL_FLUSH: /* do a final write */ if (ctx->blockout == 0) if (!block_out(b)) return 0; while (ctx->blockout) { i = ok_write(b, NULL, 0); if (i < 0) { ret = i; break; } } ctx->finished = 1; ctx->buf_off = ctx->buf_len = 0; ctx->cont = (int)ret; /* Finally flush the underlying BIO */ ret = BIO_ctrl(next, cmd, num, ptr); break; case BIO_C_DO_STATE_MACHINE: BIO_clear_retry_flags(b); ret = BIO_ctrl(next, cmd, num, ptr); BIO_copy_next_retry(b); break; case BIO_CTRL_INFO: ret = (long)ctx->cont; break; case BIO_C_SET_MD: md = ptr; if (!EVP_DigestInit_ex(ctx->md, md, NULL)) return 0; BIO_set_init(b, 1); break; case BIO_C_GET_MD: if (BIO_get_init(b)) { ppmd = ptr; *ppmd = EVP_MD_CTX_md(ctx->md); } else ret = 0; break; default: ret = BIO_ctrl(next, cmd, num, ptr); break; } return ret; }
static int enc_read(BIO *b, char *out, int outl) { int ret = 0, i; BIO_ENC_CTX *ctx; BIO *next; if (out == NULL) return (0); ctx = BIO_get_data(b); next = BIO_next(b); if ((ctx == NULL) || (next == NULL)) return 0; /* First check if there are bytes decoded/encoded */ if (ctx->buf_len > 0) { i = ctx->buf_len - ctx->buf_off; if (i > outl) i = outl; memcpy(out, &(ctx->buf[ctx->buf_off]), i); ret = i; out += i; outl -= i; ctx->buf_off += i; if (ctx->buf_len == ctx->buf_off) { ctx->buf_len = 0; ctx->buf_off = 0; } } /* * At this point, we have room of outl bytes and an empty buffer, so we * should read in some more. */ while (outl > 0) { if (ctx->cont <= 0) break; /* * read in at IV offset, read the EVP_Cipher documentation about why */ i = BIO_read(next, &(ctx->buf[BUF_OFFSET]), ENC_BLOCK_SIZE); if (i <= 0) { /* Should be continue next time we are called? */ if (!BIO_should_retry(next)) { ctx->cont = i; i = EVP_CipherFinal_ex(ctx->cipher, (unsigned char *)ctx->buf, &(ctx->buf_len)); ctx->ok = i; ctx->buf_off = 0; } else { ret = (ret == 0) ? i : ret; break; } } else { if (!EVP_CipherUpdate(ctx->cipher, (unsigned char *)ctx->buf, &ctx->buf_len, (unsigned char *)&(ctx->buf[BUF_OFFSET]), i)) { BIO_clear_retry_flags(b); ctx->ok = 0; return 0; } ctx->cont = 1; /* * Note: it is possible for EVP_CipherUpdate to decrypt zero * bytes because this is or looks like the final block: if this * happens we should retry and either read more data or decrypt * the final block */ if (ctx->buf_len == 0) continue; } if (ctx->buf_len <= outl) i = ctx->buf_len; else i = outl; if (i <= 0) break; memcpy(out, ctx->buf, i); ret += i; ctx->buf_off = i; outl -= i; out += i; } BIO_clear_retry_flags(b); BIO_copy_next_retry(b); return ((ret == 0) ? ctx->cont : ret); }