/* * Find a a previous EAP-Request sent by us, which matches * the current EAP-Response. * * Then, release the handle from the list, and return it to * the caller. * * Also since we fill the eap_ds with the present EAP-Response we * got to free the prev_eapds & move the eap_ds to prev_eapds */ static EAP_HANDLER *eaplist_find(rlm_eap_t *inst, REQUEST *request) { int i; VALUE_PAIR *state; rbnode_t *node; EAP_HANDLER *handler, myHandler; /* * We key the sessions off of the 'state' attribute, so it * must exist. */ state = pairfind(request->packet->vps, PW_STATE, 0, TAG_ANY); if (!state || (state->length != EAP_STATE_LEN)) { return NULL; } myHandler.src_ipaddr = request->packet->src_ipaddr; memcpy(myHandler.state, state->vp_strvalue, sizeof(myHandler.state)); /* * Playing with a data structure shared among threads * means that we need a lock, to avoid conflict. */ pthread_mutex_lock(&(inst->session_mutex)); /* * Check the first few handlers in the list, and delete * them if they're too old. We don't need to check them * all, as incoming requests will quickly cause older * handlers to be deleted. * */ for (i = 0; i < 2; i++) { handler = inst->session_head; if (handler && ((request->timestamp - handler->timestamp) > inst->timer_limit)) { node = rbtree_find(inst->session_tree, handler); rad_assert(node != NULL); rbtree_delete(inst->session_tree, node); /* * handler == inst->session_head */ inst->session_head = handler->next; if (handler->next) { handler->next->prev = NULL; } else { inst->session_head = NULL; } eap_handler_free(handler); } } handler = NULL; node = rbtree_find(inst->session_tree, &myHandler); if (node) { handler = rbtree_node2data(inst->session_tree, node); /* * Delete old handler from the tree. */ rbtree_delete(inst->session_tree, node); /* * And unsplice it from the linked list. */ if (handler->prev) { handler->prev->next = handler->next; } else { inst->session_head = handler->next; } if (handler->next) { handler->next->prev = handler->prev; } else { inst->session_tail = handler->prev; } handler->prev = handler->next = NULL; } pthread_mutex_unlock(&(inst->session_mutex)); /* * Not found. */ if (!node) { RDEBUG2("Request not found in the list"); return NULL; } /* * Found, but state verification failed. */ if (!handler) { radlog(L_ERR, "rlm_eap2: State verification failed."); return NULL; } RDEBUG2("Request found, released from the list"); return handler; }
/* * Delete a particular request. */ void rl_delete(REQUEST *request) { int id; REQNODE *prev, *next; prev = ((REQNODE *) request->container)->prev; next = ((REQNODE *) request->container)->next; id = request->packet->id; /* * Update the last request we touched. * * This is so the periodic "walk & clean list" * function, below, doesn't walk over all requests * all of the time. Rather, it tries to amortize * the cost... */ if (last_request == request) { last_request = rl_next(last_request); } if (prev == NULL) { request_list[id].first_request = next; } else { prev->next = next; } if (next == NULL) { request_list[id].last_request = prev; } else { next->prev = prev; } free(request->container); #ifdef WITH_SNMP /* * Update the SNMP statistics. * * Note that we do NOT do this in rad_respond(), * as that function is called from child threads. * Instead, we update the stats when a request is * deleted, because only the main server thread calls * this function... */ if (mainconfig.do_snmp) { switch (request->reply->code) { case PW_AUTHENTICATION_ACK: rad_snmp.auth.total_responses++; rad_snmp.auth.total_access_accepts++; break; case PW_AUTHENTICATION_REJECT: rad_snmp.auth.total_responses++; rad_snmp.auth.total_access_rejects++; break; case PW_ACCESS_CHALLENGE: rad_snmp.auth.total_responses++; rad_snmp.auth.total_access_challenges++; break; case PW_ACCOUNTING_RESPONSE: rad_snmp.acct.total_responses++; break; default: break; } } #endif /* * Delete the request from the tree. */ { rbnode_t *node; node = rbtree_find(request_tree, request); rad_assert(node != NULL); rbtree_delete(request_tree, node); /* * If there's a proxied packet, and we're still * waiting for a reply, then delete the packet * from the list of outstanding proxied requests. */ if (request->proxy && (request->proxy_outstanding > 0)) { pthread_mutex_lock(&proxy_mutex); node = rbtree_find(proxy_tree, request); rl_delete_proxy(request, node); pthread_mutex_unlock(&proxy_mutex); } } request_free(&request); request_list[id].request_count--; }
int main() { int i; rbtree t = NULL; t = rbtree_create(); for(i=0; i<20; i++) { int x = rand() % 10000; int y = rand() % 10000; #ifdef TRACE print_tree(t); printf("Inserting %d -> %d\n\n", x, y); #endif rbtree_insert(t, (void*)x, (void*)y, compare_int); assert(rbtree_lookup(t, (void*)x, compare_int) == (void*)y); } print_tree(t); puts(""); // TODO: memory leak! free(t); t = rbtree_create(); for(i=0; i<20; i++) { int x = i; int y = i; #ifdef TRACE print_tree(t); printf("Inserting %d -> %d\n\n", x, y); #endif rbtree_insert(t, (void*)x, (void*)y, compare_int); assert(rbtree_lookup(t, (void*)x, compare_int) == (void*)y); } print_tree(t); puts(""); // TODO: memory leak! free(t); t = rbtree_create(); for(i=0; i<20; i++) { int x = 19 - i; int y = 19 - i; #ifdef TRACE print_tree(t); printf("Inserting %d -> %d\n\n", x, y); #endif rbtree_insert(t, (void*)x, (void*)y, compare_int); assert(rbtree_lookup(t, (void*)x, compare_int) == (void*)y); } print_tree(t); puts(""); #if 0 for(i=0; i<60000; i++) { int x = rand() % 10000; #ifdef TRACE print_tree(t); printf("Deleting key %d\n\n", x); #endif rbtree_delete(t, (void*)x, compare_int); } #endif return 0; }
static void unmap_path(struct watch *watch) { rbtree_delete(&tree_path_wd, watch); }