/* thread routine of each connection. thread reads request * from client and if the request is cached, write the cached * content to client; if the request is not cached, it passes * the request to server, reads response from server and passes * response to client. */ void *thread(void *vargp) { int clientfd; struct client_request *request; struct cache_cell *cell; Pthread_detach(pthread_self()); clientfd = (int)(long)vargp; request = allocate_request(); parse_request_h(request, clientfd); /* handle hit case */ if ((cell = search_cell(request->request_line)) != NULL) hit_handler(clientfd, cell, request); /* handle miss case */ else miss_handler(clientfd, request); return NULL; }
static void allocate_resp_handler(int err, uint16_t scode, const char *reason, const struct stun_msg *msg, void *arg) { struct stun_attr *map = NULL, *rel = NULL, *ltm, *alt; struct turnc *turnc = arg; if (err || turnc_request_loops(&turnc->ls, scode)) goto out; switch (scode) { case 0: map = stun_msg_attr(msg, STUN_ATTR_XOR_MAPPED_ADDR); rel = stun_msg_attr(msg, STUN_ATTR_XOR_RELAY_ADDR); ltm = stun_msg_attr(msg, STUN_ATTR_LIFETIME); if (!rel || !map) { DEBUG_WARNING("xor_mapped/relay addr attr missing\n"); err = EPROTO; break; } if (ltm) turnc->lifetime = ltm->v.lifetime; turnc->allocated = true; refresh_timer(turnc); break; case 300: if (turnc->proto == IPPROTO_TCP || turnc->proto == STUN_TRANSP_DTLS) break; alt = stun_msg_attr(msg, STUN_ATTR_ALT_SERVER); if (!alt) break; turnc->psrv = turnc->srv; turnc->srv = alt->v.alt_server; err = allocate_request(turnc); if (err) break; return; case 401: case 438: err = turnc_keygen(turnc, msg); if (err) break; err = allocate_request(turnc); if (err) break; return; default: break; } out: turnc->th(err, scode, reason, rel ? &rel->v.xor_relay_addr : NULL, map ? &map->v.xor_mapped_addr : NULL, msg, turnc->arg); }
static bool request_handler(struct restund_msgctx *ctx, int proto, void *sock, const struct sa *src, const struct sa *dst, const struct stun_msg *msg) { const uint16_t met = stun_msg_method(msg); struct allocation *al; int err = 0; switch (met) { case STUN_METHOD_ALLOCATE: case STUN_METHOD_REFRESH: case STUN_METHOD_CREATEPERM: case STUN_METHOD_CHANBIND: break; default: return false; } if (ctx->ua.typec > 0) { err = stun_ereply(proto, sock, src, 0, msg, 420, "Unknown Attribute", ctx->key, ctx->keylen, ctx->fp, 2, STUN_ATTR_UNKNOWN_ATTR, &ctx->ua, STUN_ATTR_SOFTWARE, restund_software); goto out; } al = allocation_find(proto, src, dst); if (!al && met != STUN_METHOD_ALLOCATE) { restund_debug("turn: allocation does not exist\n"); err = stun_ereply(proto, sock, src, 0, msg, 437, "Allocation Mismatch", ctx->key, ctx->keylen, ctx->fp, 1, STUN_ATTR_SOFTWARE, restund_software); goto out; } if (al && al->username && ctx->key) { struct stun_attr *usr = stun_msg_attr(msg, STUN_ATTR_USERNAME); if (!usr || strcmp(usr->v.username, al->username)) { restund_debug("turn: wrong credetials\n"); err = stun_ereply(proto, sock, src, 0, msg, 441, "Wrong Credentials", ctx->key, ctx->keylen, ctx->fp, 1, STUN_ATTR_SOFTWARE,restund_software); goto out; } } switch (met) { case STUN_METHOD_ALLOCATE: allocate_request(&turnd, al, ctx, proto, sock, src, dst, msg); break; case STUN_METHOD_REFRESH: refresh_request(&turnd, al, ctx, proto, sock, src, msg); break; case STUN_METHOD_CREATEPERM: createperm_request(al, ctx, proto, sock, src, msg); break; case STUN_METHOD_CHANBIND: chanbind_request(al, ctx, proto, sock, src, msg); break; } out: if (err) { restund_warning("turn reply error: %m\n", err); } return true; }
/** * Allocate a TURN Client * * @param turncp Pointer to allocated TURN Client * @param conf Optional STUN Configuration * @param proto Transport Protocol * @param sock Transport socket * @param layer Transport layer * @param srv TURN Server IP-address * @param username Authentication username * @param password Authentication password * @param lifetime Allocate lifetime in [seconds] * @param th TURN handler * @param arg Handler argument * * @return 0 if success, otherwise errorcode */ int turnc_alloc(struct turnc **turncp, const struct stun_conf *conf, int proto, void *sock, int layer, const struct sa *srv, const char *username, const char *password, uint32_t lifetime, turnc_h *th, void *arg) { struct turnc *turnc; int err; if (!turncp || !sock || !srv || !username || !password || !th) return EINVAL; turnc = mem_zalloc(sizeof(*turnc), destructor); if (!turnc) return ENOMEM; err = stun_alloc(&turnc->stun, conf, NULL, NULL); if (err) goto out; err = str_dup(&turnc->username, username); if (err) goto out; err = str_dup(&turnc->password, password); if (err) goto out; err = turnc_perm_hash_alloc(&turnc->perms, PERM_HASH_SIZE); if (err) goto out; err = turnc_chan_hash_alloc(&turnc->chans, CHAN_HASH_SIZE); if (err) goto out; tmr_init(&turnc->tmr); turnc->proto = proto; turnc->sock = mem_ref(sock); turnc->psrv = *srv; turnc->srv = *srv; turnc->lifetime = lifetime; turnc->th = th; turnc->arg = arg; switch (proto) { case IPPROTO_UDP: err = udp_register_helper(&turnc->uh, sock, layer, udp_send_handler, udp_recv_handler, turnc); break; default: err = 0; break; } if (err) goto out; err = allocate_request(turnc); if (err) goto out; out: if (err) mem_deref(turnc); else *turncp = turnc; return err; }
static term_t ol_disk_control(outlet_t *ol, uint32_t op, uint8_t *data, int dlen, term_t reply_to, heap_t *hp) { char rbuf[256]; char *reply = rbuf; switch (op) { case DISK_REQ_READ: { // async_tag[2] sector_start[8] num_sectors[4] if (dlen != 2 +8 +4) goto error; uint16_t async_tag = GET_UINT_16(data); uint64_t sector_start = GET_UINT_64(data +2); uint32_t num_sectors = GET_UINT_32(data +2 +8); if (ol->free_reqs == 0) goto error; // Allocate the disk_req_t later as disk_read may fail disk_req_t *req = ol->free_reqs; req->async_tag = async_tag; req->num_sectors = num_sectors; req->reply_to = reply_to; if (disk_read(sector_start, num_sectors, read_complete_cb, req) < 0) goto error; // Callback is imminent - allocate allocate_request(req); *reply++ = DISK_REP_OK; break; } case DISK_REQ_WRITE: { // async_tag[2] sector_start[8] num_sectors[4] data[n] if (dlen <= 2 +8 +4) goto error; uint16_t async_tag = GET_UINT_16(data); uint64_t sector_start = GET_UINT_64(data +2); uint32_t num_sectors = GET_UINT_32(data +2 +8); dlen -= (2 +8 +4); data += (2 +8 +4); if (dlen != num_sectors *SECTOR_SIZE) goto error; if (ol->free_reqs == 0) goto error; // Allocate the disk_req_t later as disk_write may fail disk_req_t *req = ol->free_reqs; req->async_tag = async_tag; req->num_sectors = num_sectors; req->reply_to = reply_to; if (disk_write(sector_start, data, dlen, simple_complete_cb, req) < 0) goto error; // Callback is imminent - allocate allocate_request(req); *reply++ = DISK_REP_OK; break; } case DISK_REQ_BARRIER: { // async_tag[2] if (dlen != 2) goto error; uint16_t async_tag = GET_UINT_16(data); if (ol->free_reqs == 0) goto error; // Allocate the disk_req_t later as disk_write may fail disk_req_t *req = ol->free_reqs; req->async_tag = async_tag; req->reply_to = reply_to; if (disk_barrier(simple_complete_cb, req) < 0) goto error; // Callback is imminent - allocate allocate_request(req); *reply++ = DISK_REP_OK; break; } case DISK_REQ_FLUSH: { // async_tag[2] if (dlen != 2) goto error; uint16_t async_tag = GET_UINT_16(data); if (ol->free_reqs == 0) goto error; // Allocate the disk_req_t later as disk_write may fail disk_req_t *req = ol->free_reqs; req->async_tag = async_tag; req->reply_to = reply_to; if (disk_flush(simple_complete_cb, req) < 0) goto error; // Callback is imminent - allocate allocate_request(req); *reply++ = DISK_REP_OK; break; } case DISK_REQ_TRIM: REPLY_DISK_ERROR("enotsup"); break; default: error: REPLY_DISK_ERROR("einval"); } int rlen = reply -rbuf; assert(rlen >= 1 && rlen <= sizeof(rbuf)); term_t result = heap_str_N(hp, rbuf, rlen); if (result == noval) return A_NO_MEMORY; return result; }