/*
 * a write operation may block at 3 stages:
 * 1. ccci_alloc_req
 * 2. wait until the queue has available slot (threshold check)
 * 3. wait until the SDIO transfer is complete --> abandoned, see the reason below.
 * the 1st one is decided by @blk1. and the 2nd and 3rd are decided by @blk2, wating on @wq.
 * NULL is returned if no available skb, even when you set blk1=1.
 *
 * we removed the wait_queue_head_t in ccci_request, so user can NOT wait for certain request to
 * be completed. this is because request will be recycled and its state will be reset, so if a request
 * is completed and then used again, the poor guy who is waiting for it may never see the state
 * transition (FLYING->IDLE/COMPLETE->FLYING) and wait forever.
 */
struct ccci_request *ccci_alloc_req(DIRECTION dir, int size, char blk1, char blk2)
{
    struct ccci_request *req = NULL;

retry:
    req = ccci_req_dequeue(&req_pool);
    if(req) {
        if(size>0) {
            req->skb = ccci_alloc_skb(size, blk1);
            req->policy = RECYCLE;
            if(req->skb)
                CCCI_DBG_MSG(-1, BM, "alloc ok, req=%p skb=%p, len=%d\n", req, req->skb, skb_size(req->skb));
        } else {
            req->skb = NULL;
            req->policy = NOOP;
        }
        req->blocking = blk2;
    } else {
        if(blk1) {
            wait_event_interruptible(req_pool.req_wq, (req_pool.count>0));
            goto retry;
        }
        CCCI_INF_MSG(-1, BM, "fail to alloc req for %ps, no retry\n", __builtin_return_address(0));
    }
    if(unlikely(size>0 && !req->skb)) {
        CCCI_ERR_MSG(-1, BM, "fail to alloc skb for %ps, size=%d\n", __builtin_return_address(0), size);
        req->policy = NOOP;
        ccci_free_req(req);
        req = NULL;
    }
    return req;
}
Esempio n. 2
0
/*
 * a write operation may block at 3 stages:
 * 1. ccci_alloc_req
 * 2. wait until the queue has available slot (threshold check)
 * 3. wait until the SDIO transfer is complete --> abandoned, see the reason below.
 * the 1st one is decided by @blk1. and the 2nd and 3rd are decided by @blk2, wating on @wq.
 * NULL is returned if no available skb, even when you set blk1=1.
 *
 * we removed the wait_queue_head_t in ccci_request, so user can NOT wait for certain request to
 * be completed. this is because request will be recycled and its state will be reset, so if a request
 * is completed and then used again, the poor guy who is waiting for it may never see the state 
 * transition (FLYING->IDLE/COMPLETE->FLYING) and wait forever.
 */
struct ccci_request *ccci_alloc_req(DIRECTION dir, int size, char blk1, char blk2)
{
	int i;
	struct ccci_request *req = NULL;
	unsigned long flags;

retry:
	spin_lock_irqsave(&req_pool_lock, flags);
	for(i=0; i<BM_POOL_SIZE; i++) {
		if(req_pool[i].state == IDLE) {
			// important checking when reqeust is passed cross-layer, make sure this request is no longer in any list
			if(req_pool[i].entry.next == LIST_POISON1 && req_pool[i].entry.prev == LIST_POISON2) {
				req = &req_pool[i];
				CCCI_DBG_MSG(-1, BM, "%ps alloc req=%p, i=%d size=%d\n", __builtin_return_address(0), req, i, size);
				req->state = FLYING;
				break;
			} else {
				// should not happen
				CCCI_ERR_MSG(-1, BM, "idle but in list i=%d, from %ps\n", i, __builtin_return_address(0));
				list_del(&req_pool[i].entry);
			}
		}
	}
	if(req) {
		req->dir = dir;
		req_pool_cnt--;
		CCCI_DBG_MSG(-1, BM, "pool count-=%d\n", req_pool_cnt);
	}
	spin_unlock_irqrestore(&req_pool_lock, flags);
	if(req) {
		if(size>0) {
			req->skb = ccci_alloc_skb(size, blk1);
			req->policy = RECYCLE;
			if(req->skb)
				CCCI_DBG_MSG(-1, BM, "alloc ok, req=%p skb=%p, len=%d\n", req, req->skb, skb_size(req->skb));
		} else {
			req->skb = NULL;
			req->policy = NOOP;
		}
		req->blocking = blk2;
	} else {
		if(blk1) {
			wait_event_interruptible(req_pool_wq, (req_pool_cnt>0));
			goto retry;
		}
		CCCI_INF_MSG(-1, BM, "fail to allock req for %ps, no retry\n", __builtin_return_address(0));
	}
	if(unlikely(size>0 && !req->skb)) {
		CCCI_ERR_MSG(-1, BM, "fail to allock skb for %ps, size=%d\n", __builtin_return_address(0), size);
		req->policy = NOOP;
		ccci_free_req(req);
		req = NULL;
	}
	return req;
}
/*
 * a write operation may block at 3 stages:
 * 1. ccci_alloc_req
 * 2. wait until the queue has available slot (threshold check)
 * 3. wait until the SDIO transfer is complete --> abandoned
 * the 1st one is decided by @blk1. and the 2nd and 3rd are decided by @blk2, wating on @wq.
 *
 * we removed the wait_queue_head_t in ccci_request, so user can NOT wait for certain request to
 * be completed. this is because request will be recycled and its state will be reset, so if a request
 * is completed and then used again, the poor guy who is waiting for it may never see the state 
 * transition (FLYING->IDLE/COMPLETE->FLYING) and wait forever.
 */
struct ccci_request *ccci_alloc_req(DIRECTION dir, int size, char blk1, char blk2)
{
	int i;
	struct ccci_request *req = NULL;
	struct sk_buff *skb = NULL;
	unsigned long flags;

#ifdef CCCI_STATISTIC
	core_statistic_data.alloc_count++;
#endif

retry:
	spin_lock_irqsave(&req_pool_lock, flags);
	for(i=0; i<BM_POOL_SIZE; i++) {
		if(req_pool[i].state == IDLE) {
			// important checking when reqeust is passed cross-layer, make sure this request is no longer in any list
			if(req_pool[i].entry.next == LIST_POISON1 && req_pool[i].entry.prev == LIST_POISON2) {
				req = &req_pool[i];
				CCCI_DBG_MSG(-1, BM, "%ps alloc req=%p, i=%d size=%d\n", __builtin_return_address(0), req, i, size);
				req->state = FLYING;
				break;
			} else {
				// should not happen
				CCCI_ERR_MSG(-1, BM, "idle but in list i=%d\n", i);
			}
		}
	}
	if(req) {
		req->dir = dir;
#ifdef CCCI_STATISTIC
		req->time_step = 0;
		req->time_stamp = ktime_get_real();
		memset(req->time_trace, 0, sizeof(req->time_trace));
#endif
		req_pool_cnt--;
		CCCI_DBG_MSG(-1, BM, "pool count-=%d\n", req_pool_cnt);
	}
	spin_unlock_irqrestore(&req_pool_lock, flags);
	if(req) {
		if(size>0) {
			skb = ccci_alloc_skb(size);
			req->skb = skb;
			if(!skb) // should not happen
				CCCI_ERR_MSG(-1, BM, "NULL skb for req %p size %d\n", req, size);
			CCCI_DBG_MSG(-1, BM, "req=%p skb=%p, len=%d\n", req, req->skb, skb_size(req->skb));
		} else {
			req->skb = NULL;
		}
		req->blocking = blk2;
	} else {
#ifdef CCCI_STATISTIC
		core_statistic_data.alloc_empty_count++;
#endif
		if(blk1) {
			wait_event_interruptible(req_pool_wq, (req_pool_cnt>0));
			goto retry;
		}
		CCCI_INF_MSG(-1, BM, "%ps alloc req fail, no retry\n", __builtin_return_address(0));
	}
	
	return req;
}