static tux_req_t * get_cachemiss (iothread_t *iot) { struct list_head *tmp; tux_req_t *req = NULL; spin_lock(&iot->async_lock); if (!list_empty(&iot->async_queue)) { tmp = iot->async_queue.next; req = list_entry(tmp, tux_req_t, work); Dprintk("get_cachemiss(%p): got req %p.\n", iot, req); list_del(tmp); DEBUG_DEL_LIST(tmp); iot->nr_async_pending--; DEC_STAT(nr_cachemiss_pending); if (req->ti->iot != iot) TUX_BUG(); } spin_unlock(&iot->async_lock); return req; }
/* * Return 1 if the output space condition went away * before adding the handler. */ int add_output_space_event (tux_req_t *req, struct socket *sock) { struct sock *sk = sock->sk; /* * blocked due to socket IO? */ spin_lock_irq(&req->ti->work_lock); add_keepalive_timer(req); if (test_and_set_bit(0,&req->wait_output_space)) TUX_BUG(); INC_STAT(nr_output_space_pending); if ((sk->sk_state == TCP_ESTABLISHED) && enough_wspace(sk)) { if (test_and_clear_bit(0, &req->wait_output_space)) { DEC_STAT(nr_output_space_pending); del_keepalive_timer(req); spin_unlock_irq(&req->ti->work_lock); return 1; } } spin_unlock_irq(&req->ti->work_lock); return 0; }