Example #1
0
void
tws_circular_aenq_insert(struct tws_softc *sc, struct tws_circular_q *cq,
struct tws_event_packet *aen)
{

    struct tws_event_packet *q = (struct tws_event_packet *)cq->q;
    volatile u_int16_t head, tail;
    u_int8_t retr;
    KKASSERT(lockstatus(&sc->gen_lock, curthread) != 0);

    head = cq->head;
    tail = cq->tail;
    retr = q[tail].retrieved;

    memcpy(&q[tail], aen, sizeof(struct tws_event_packet));
    tail = (tail+1) % cq->depth;

    if ( head == tail ) { /* q is full */
        if ( retr != TWS_AEN_RETRIEVED )
            cq->overflow = 1;
        cq->head = (head+1) % cq->depth;
    }
    cq->tail = tail;

}
Example #2
0
int
tws_bus_scan(struct tws_softc *sc)
{
    struct cam_path *path;
    union ccb       *ccb;

    TWS_TRACE_DEBUG(sc, "entry", sc, 0);
    KASSERT(sc->sim, ("sim not allocated"));
    KKASSERT(lockstatus(&sc->sim_lock, curthread) != 0);

    ccb = sc->scan_ccb;

    if (xpt_create_path(&path, xpt_periph, cam_sim_path(sc->sim),
                  CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
        /* lockmgr(&sc->sim_lock, LK_RELEASE); */
        return(EIO);
    }
    xpt_setup_ccb(&ccb->ccb_h, path, 5);
    ccb->ccb_h.func_code = XPT_SCAN_BUS;
    ccb->ccb_h.cbfcnp = tws_bus_scan_cb;
    ccb->crcn.flags = CAM_FLAG_NONE;
    xpt_action(ccb);

    return(0);
}
Example #3
0
static void
puffs_msgpark_reference(struct puffs_msgpark *park)
{

	KKASSERT(lockstatus(&park->park_mtx, curthread) == LK_EXCLUSIVE);
	park->park_refcount++;
}
Example #4
0
int
msdosfs_islocked(void *v)
{
	struct vop_islocked_args *ap = v;

	return (lockstatus(&VTODE(ap->a_vp)->de_lock));
}
Example #5
0
void
mfi_syspd_enable(struct mfi_system_pd *sc)
{

	KKASSERT(lockstatus(&sc->pd_controller->mfi_io_lock, curthread) != 0);
	sc->pd_flags &= ~MFI_DISK_FLAGS_DISABLED;
}
Example #6
0
/*
 * Check for a locked inode.
 */
int
ufs_islocked(void *v)
{
	struct vop_islocked_args *ap = v;

	return (lockstatus(&VTOI(ap->a_vp)->i_lock));
}
Example #7
0
/*
 * bfq_cancel_all(): .cancel_all callback of the bfq policy. Cancel
 * all bios that queue in each bfq_thread_io structure in the
 * wf2q tree.
 *
 * lock:
 *	BFQ_LOCK: protect from wf2q_insert operation in bfq_queue() and
 *	bfq_dequeue(); wf2q_get_next operation in bfq_dequeue()
 *	THREAD_IO_LOCK: protect from queue iteration in bfq_dequeue() and
 *	queue insertion in bfq_queue()
 *
 * refcount:
 *	unref thread_io structures; they are referenced in queue(),
 *	when a bio is queued. The refcount may decrease to zero.
 *
 */
static void
bfq_cancel_all(struct dsched_disk_ctx *diskctx)
{
	struct bio *bio;
	struct bfq_thread_io *bfq_tdio;
	struct bfq_disk_ctx *bfq_diskctx = (struct bfq_disk_ctx *)diskctx;

	BFQ_LOCK(bfq_diskctx);

	while ((bfq_tdio = wf2q_get_next_thread_io(&bfq_diskctx->bfq_wf2q))) {
		DSCHED_THREAD_IO_LOCK(&bfq_tdio->head);
		KKASSERT(lockstatus(&bfq_tdio->head.lock, curthread) == LK_EXCLUSIVE);

		while ((bio = TAILQ_FIRST(&bfq_tdio->head.queue))) {
			bfq_tdio->head.qlength--;
			TAILQ_REMOVE(&bfq_tdio->head.queue, bio, link);
			dsched_cancel_bio(bio);
			dsched_thread_io_unref(&bfq_tdio->head);
		}

		KKASSERT(bfq_tdio->head.qlength == 0);
		DSCHED_THREAD_IO_UNLOCK(&bfq_tdio->head);
	}

	BFQ_UNLOCK(bfq_diskctx);
}
Example #8
0
/*
 * Release reference to park structure.
 */
static void
puffs_msgpark_release1(struct puffs_msgpark *park, int howmany)
{
	struct puffs_req *preq = park->park_preq;
	struct puffs_req *creq = park->park_creq;
	int refcnt;

	KKASSERT(lockstatus(&park->park_mtx, curthread) == LK_EXCLUSIVE);
	refcnt = park->park_refcount -= howmany;
	lockmgr(&park->park_mtx, LK_RELEASE);

	KKASSERT(refcnt >= 0);

	if (refcnt == 0) {
		if (preq)
			kfree(preq, M_PUFFS);
#if 1
		if (creq)
			kfree(creq, M_PUFFS);
#endif
		objcache_put(parkpc, park);

#ifdef PUFFSDEBUG
		totalpark--;
#endif
	}
}
Example #9
0
/*
 * Wait for command completion. Assumes mutex is held.
 * Returns an SMB_* error code.
 */
static int
ichsmb_wait(sc_p sc)
{
	const device_t dev = sc->dev;
	int error, smb_error;

	KASSERT(sc->ich_cmd != -1,
	    ("%s: ich_cmd=%d\n", __func__ , sc->ich_cmd));
	KKASSERT(lockstatus(&sc->mutex, curthread) != 0);
	error = lksleep(sc, &sc->mutex, 0, "ichsmb", hz / 4);
	DBG("msleep -> %d\n", error);
	switch (error) {
	case 0:
		smb_error = sc->smb_error;
		break;
	case EWOULDBLOCK:
		device_printf(dev, "device timeout, status=0x%02x\n",
		    bus_read_1(sc->io_res, ICH_HST_STA));
		sc->ich_cmd = -1;
		smb_error = SMB_ETIMEOUT;
		break;
	default:
		smb_error = SMB_EABORT;
		break;
	}
	return (smb_error);
}
Example #10
0
/*
 * Check for a locked inode.
 */
int
exfs_islocked(void *v)
{
	struct vop_islocked_args *ap = v;

	printf("zawel v exfs_islocked\n");
	return (lockstatus(&VTOI(ap->a_vp)->i_lock));
}
Example #11
0
int
fusefs_islocked(void *v)
{
	struct vop_islocked_args *ap = v;

	DPRINTF("fusefs_islocked\n");
	return (lockstatus(&VTOI(ap->a_vp)->ufs_ino.i_lock));
}
Example #12
0
void
sim_lock_assert_unowned(sim_lock *lock)
{
	if (lock) {
		if (lock != &sim_mplock)
			KKASSERT(lockstatus(lock, curthread) == 0);
	}
}
Example #13
0
/*
 * lock can be NULL if sim was &dead_sim
 */
void
sim_lock_assert_owned(sim_lock *lock)
{
	if (lock) {
		if (lock == &sim_mplock)
			ASSERT_MP_LOCK_HELD();
		else
			KKASSERT(lockstatus(lock, curthread) != 0);
	}
}
Example #14
0
/*
 * Return the lock status of a vnode and unlock the vnode
 * if we owned the lock.  This is not a boolean, if the
 * caller cares what the lock status is the caller must
 * check the various possible values.
 *
 * This only unlocks exclusive locks held by the caller,
 * it will NOT unlock shared locks (there is no way to
 * tell who the shared lock belongs to).
 *
 * MPSAFE
 */
int
vn_islocked_unlock(struct vnode *vp)
{
	int vpls;

	vpls = lockstatus(&vp->v_lock, curthread);
	if (vpls == LK_EXCLUSIVE)
		lockmgr(&vp->v_lock, LK_RELEASE);
	return(vpls);
}
/*
 * Run the chain and if the bottom-most object is a vnode-type lock the
 * underlying vnode.  A locked vnode or NULL is returned.
 */
struct vnode *
vnode_pager_lock(vm_object_t object)
{
	struct vnode *vp = NULL;
	vm_object_t lobject;
	vm_object_t tobject;
	int error;

	if (object == NULL)
		return(NULL);

	ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
	lobject = object;

	while (lobject->type != OBJT_VNODE) {
		if (lobject->flags & OBJ_DEAD)
			break;
		tobject = lobject->backing_object;
		if (tobject == NULL)
			break;
		vm_object_hold_shared(tobject);
		if (tobject == lobject->backing_object) {
			if (lobject != object) {
				vm_object_lock_swap();
				vm_object_drop(lobject);
			}
			lobject = tobject;
		} else {
			vm_object_drop(tobject);
		}
	}
	while (lobject->type == OBJT_VNODE &&
	       (lobject->flags & OBJ_DEAD) == 0) {
		/*
		 * Extract the vp
		 */
		vp = lobject->handle;
		error = vget(vp, LK_SHARED | LK_RETRY | LK_CANRECURSE);
		if (error == 0) {
			if (lobject->handle == vp)
				break;
			vput(vp);
		} else {
			kprintf("vnode_pager_lock: vp %p error %d "
				"lockstatus %d, retrying\n",
				vp, error,
				lockstatus(&vp->v_lock, curthread));
			tsleep(object->handle, 0, "vnpgrl", hz);
		}
		vp = NULL;
	}
	if (lobject != object)
		vm_object_drop(lobject);
	return (vp);
}
Example #16
0
int
cam_sim_cond_lock(sim_lock *lock)
{
	if (lock == &sim_mplock) {
		get_mplock();
		return(1);
	} else if (lockstatus(lock, curthread) != LK_EXCLUSIVE) {
		lockmgr(lock, LK_EXCLUSIVE);
		return(1);
	}
	return(0);
}
void
drm_event_wakeup(struct drm_pending_event *e)
{
	struct drm_file *file_priv;
	struct drm_device *dev;

	file_priv = e->file_priv;
	dev = file_priv->dev;
	KKASSERT(lockstatus(&dev->event_lock, curthread) != 0);

	wakeup(&file_priv->event_space);
}
Example #18
0
int
mfi_syspd_disable(struct mfi_system_pd *sc)
{

	KKASSERT(lockstatus(&sc->pd_controller->mfi_io_lock, curthread) != 0);
	if (sc->pd_flags & MFI_DISK_FLAGS_OPEN) {
		if (sc->pd_controller->mfi_delete_busy_volumes)
			return (0);
		device_printf(sc->pd_dev, "Unable to delete busy syspd device\n");
		return (EBUSY);
	}
	sc->pd_flags |= MFI_DISK_FLAGS_DISABLED;
	return (0);
}
Example #19
0
void
tws_send_event(struct tws_softc *sc, u_int8_t event)
{
    KKASSERT(lockstatus(&sc->gen_lock, curthread) != 0);
    TWS_TRACE_DEBUG(sc, "received event ", 0, event);
    switch (event) {

        case TWS_INIT_START:
            sc->tws_state = TWS_INIT;
            break;

        case TWS_INIT_COMPLETE:
            KASSERT(sc->tws_state == TWS_INIT , ("invalid state transition"));
            sc->tws_state = TWS_ONLINE;
            break;

        case TWS_RESET_START:
            /* multiple reset ? */
            KASSERT(sc->tws_state != TWS_RESET, ("invalid state transition"));

            /* we can transition to reset state from any state */
            sc->tws_prev_state = sc->tws_state;
            sc->tws_state = TWS_RESET;
            break;

        case TWS_RESET_COMPLETE:
            KASSERT(sc->tws_state == TWS_RESET, ("invalid state transition"));
            sc->tws_state = sc->tws_prev_state;
            break;

        case TWS_SCAN_FAILURE:
            KASSERT(sc->tws_state == TWS_ONLINE , ("invalid state transition"));
            sc->tws_state = TWS_OFFLINE;
            break;

        case TWS_UNINIT_START:
            KASSERT(sc->tws_state == TWS_ONLINE || sc->tws_state == TWS_OFFLINE,
                           ("invalid state transition"));
            sc->tws_state = TWS_UNINIT;
            break;
    }

}
Example #20
0
static int
devfs_vop_reclaim(struct vop_reclaim_args *ap)
{
	struct devfs_node *node;
	struct vnode *vp;
	int locked;

	/*
	 * Check if it is locked already. if not, we acquire the devfs lock
	 */
	if ((lockstatus(&devfs_lock, curthread)) != LK_EXCLUSIVE) {
		lockmgr(&devfs_lock, LK_EXCLUSIVE);
		locked = 1;
	} else {
		locked = 0;
	}

	/*
	 * Get rid of the devfs_node if it is no longer linked into the
	 * topology.
	 */
	vp = ap->a_vp;
	if ((node = DEVFS_NODE(vp)) != NULL) {
		node->v_node = NULL;
		if ((node->flags & DEVFS_NODE_LINKED) == 0)
			devfs_freep(node);
	}

	if (locked)
		lockmgr(&devfs_lock, LK_RELEASE);

	/*
	 * v_rdev needs to be properly released using v_release_rdev
	 * Make sure v_data is NULL as well.
	 */
	vp->v_data = NULL;
	v_release_rdev(vp);
	return 0;
}
Example #21
0
int
sim_lock_sleep(void *ident, int flags, const char *wmesg, int timo,
	       sim_lock *lock)
{
	int retval;

	if (lock != &sim_mplock) {
		/* lock should be held already */
		KKASSERT(lockstatus(lock, curthread) != 0);
		tsleep_interlock(ident, flags);
		lockmgr(lock, LK_RELEASE);
		retval = tsleep(ident, flags | PINTERLOCKED, wmesg, timo);
	} else {
		retval = tsleep(ident, flags, wmesg, timo);
	}

	if (lock != &sim_mplock) {
		lockmgr(lock, LK_EXCLUSIVE);
	}

	return (retval);
}
Example #22
0
/*
 * bfq_dequeue(): dispatch bios to the disk driver.
 *
 * This function will push as many bios as the number of free slots
 * in the tag queue.
 *
 * In the progress of dispatching, the following events may happen:
 *  - Current thread is timeout: Expire the current thread for
 *    BFQ_REASON_TIMEOUT, and select a new thread to serve in the
 *    wf2q tree.
 *
 *  - Current thread runs out of its budget: Expire the current thread
 *    for BFQ_REASON_OUT_OF_BUDGET, and select a new thread to serve
 *
 *  - Current thread has no further bios in its queue: if the AS feature
 *    is turned on, the bfq scheduler sets an alarm and starts to suspend.
 *    The bfq_timeout() or bfq_queue() calls may resume the scheduler.
 *
 * Implementation note: The bios selected to be dispatched will first
 * be stored in an array bio_do_dispatch. After this function releases
 * all the locks it holds, it will call dsched_strategy_request_polling()
 * for each bio stored.
 *
 * With the help of bfq_disk_ctx->pending_dequeue,
 * there will be only one bfq_dequeue pending on the BFQ_LOCK.
 *
 * lock:
 *	BFQ_LOCK: protect from wf2q_augtree operations in bfq_queue()
 *	THREAD_IO_LOCK: locks the active_tdio. Protect from queue insertions
 *	in bfq_queue; Protect the active_tdio->budget
 *
 * refcount:
 *  If the scheduler decides to suspend, the refcount of active_tdio
 *  increases by 1. The counterpart decreasing is in bfq_queue() and
 *  bfq_timeout()
 * blocking:
 *  May be blocking on the disk driver lock. It depends on drivers.
 *
 * Calling path:
 * The callers could be:
 *	bfq_queue(), bfq_timeout() and the registered polling function.
 *
 *	caller --> helper_msg_dequeue --lwkt_msg--> helper_thread-> me
 *
 */
void
bfq_dequeue(struct dsched_disk_ctx *diskctx)
{
	int free_slots,
	    bio_index = 0, i,
	    remaining_budget = 0;/* remaining budget of current active process */

	struct bio *bio, *bio_to_dispatch[33];
	struct bfq_thread_io *active_tdio = NULL;
	struct bfq_disk_ctx *bfq_diskctx = (struct bfq_disk_ctx *)diskctx;

	BFQ_LOCK(bfq_diskctx);
	atomic_cmpset_int(&bfq_diskctx->pending_dequeue, 1, 0);

	/*
	 * The whole scheduler is waiting for further bios
	 * from process currently being served
	 */
	if (bfq_diskctx->bfq_blockon != NULL)
		goto rtn;

	remaining_budget = bfq_diskctx->bfq_remaining_budget;
	active_tdio = bfq_diskctx->bfq_active_tdio;
	dsched_debug(BFQ_DEBUG_VERBOSE, "BFQ: dequeue: Im in. active_tdio = %p\n", active_tdio);

	free_slots = diskctx->max_tag_queue_depth - diskctx->current_tag_queue_depth;
	KKASSERT(free_slots >= 0 && free_slots <= 32);

	if (active_tdio)
		DSCHED_THREAD_IO_LOCK(&active_tdio->head);

	while (free_slots) {
		/* Here active_tdio must be locked ! */
		if (active_tdio) {
			/*
			 * the bio_done function has marked the current
			 * tdio timeout
			 */
			if (active_tdio->maybe_timeout) {
				dsched_debug(BFQ_DEBUG_VERBOSE, "BFQ: %p time out in dequeue()\n", active_tdio);
				wf2q_update_vd(active_tdio, active_tdio->budget - remaining_budget);
				bfq_expire(bfq_diskctx, active_tdio, BFQ_REASON_TIMEOUT);

				/* there still exist bios not dispatched,
				 * reinsert the tdio into aug-tree*/
				if (active_tdio->head.qlength > 0) {
					wf2q_insert_thread_io(&bfq_diskctx->bfq_wf2q, active_tdio);
					KKASSERT(bfq_diskctx->bfq_wf2q.wf2q_tdio_count);
				}

				active_tdio->maybe_timeout = 0;
				DSCHED_THREAD_IO_UNLOCK(&active_tdio->head);
				active_tdio = NULL;
				continue;
			}

			/* select next bio to dispatch */
			/* TODO: a wiser slection */
			KKASSERT(lockstatus(&active_tdio->head.lock, curthread) == LK_EXCLUSIVE);
			bio = TAILQ_FIRST(&active_tdio->head.queue);
			dsched_debug(BFQ_DEBUG_NORMAL, "bfq: the first bio in queue of active_tdio %p is %p\n", active_tdio, bio);

			dsched_debug(BFQ_DEBUG_VERBOSE, "bfq: active_tdio %p exists, remaining budget = %d, tdio budget = %d\n, qlength = %d, first bio = %p, first bio cmd = %d, first bio size = %d\n", active_tdio, remaining_budget, active_tdio->budget, active_tdio->head.qlength, bio, bio?bio->bio_buf->b_cmd:-1, bio?bio->bio_buf->b_bcount:-1);

			/*
			 * The bio is not read or write, just
			 * push it down.
			 */
			if (bio && (bio->bio_buf->b_cmd != BUF_CMD_READ) &&
			    (bio->bio_buf->b_cmd != BUF_CMD_WRITE)) {
				dsched_debug(BFQ_DEBUG_NORMAL, "bfq: remove bio %p from the queue of %p\n", bio, active_tdio);
				KKASSERT(lockstatus(&active_tdio->head.lock, curthread) == LK_EXCLUSIVE);
				TAILQ_REMOVE(&active_tdio->head.queue, bio, link);
				active_tdio->head.qlength--;
				free_slots--;

#if 0
				dsched_strategy_request_polling(diskctx->dp, bio, diskctx);
#endif
				bio_to_dispatch[bio_index++] = bio;
				KKASSERT(bio_index <= bfq_diskctx->head.max_tag_queue_depth);
				continue;
			}
			/*
			 * Run out of budget
			 * But this is not because the size of bio is larger
			 * than the complete budget.
			 * If the size of bio is larger than the complete
			 * budget, then use a complete budget to cover it.
			 */
			if (bio && (remaining_budget < BIO_SIZE(bio)) &&
			    (remaining_budget != active_tdio->budget)) {
				/* charge budget used */
				wf2q_update_vd(active_tdio, active_tdio->budget - remaining_budget);
				bfq_expire(bfq_diskctx, active_tdio, BFQ_REASON_OUT_OF_BUDGET);
				wf2q_insert_thread_io(&bfq_diskctx->bfq_wf2q, active_tdio);
				dsched_debug(BFQ_DEBUG_VERBOSE, "BFQ: thread %p ran out of budget\n", active_tdio);
				DSCHED_THREAD_IO_UNLOCK(&active_tdio->head);
				active_tdio = NULL;
			} else { /* if (bio && remaining_budget < BIO_SIZE(bio) && remaining_budget != active_tdio->budget) */

				/*
				 * Having enough budget,
				 * or having a complete budget and the size of bio
				 * is larger than that.
				 */
				if (bio) {
					/* dispatch */
					remaining_budget -= BIO_SIZE(bio);
					/*
					 * The size of the first bio is larger
					 * than the whole budget, we should
					 * charge the extra part
					 */
					if (remaining_budget < 0)
						wf2q_update_vd(active_tdio, -remaining_budget);
					/* compensate */
					wf2q_update_vd(active_tdio, -remaining_budget);
					/*
					 * remaining_budget may be < 0,
					 * but to prevent the budget of current tdio
					 * to substract a negative number,
					 * the remaining_budget has to be >= 0
					 */
					remaining_budget = MAX(0, remaining_budget);
					dsched_debug(BFQ_DEBUG_NORMAL, "bfq: remove bio %p from the queue of %p\n", bio, active_tdio);
					KKASSERT(lockstatus(&active_tdio->head.lock, curthread) == LK_EXCLUSIVE);
					TAILQ_REMOVE(&active_tdio->head.queue, bio, link);
					free_slots--;
					active_tdio->head.qlength--;
					active_tdio->bio_dispatched++;
					wf2q_inc_tot_service(&bfq_diskctx->bfq_wf2q, BIO_SIZE(bio));
					dsched_debug(BFQ_DEBUG_VERBOSE,
					    "BFQ: %p's bio dispatched, size=%d, remaining_budget = %d\n",
					    active_tdio, BIO_SIZE(bio), remaining_budget);
#if 0
					dsched_strategy_request_polling(diskctx->dp, bio, diskctx);
#endif
					bio_to_dispatch[bio_index++] = bio;
					KKASSERT(bio_index <= bfq_diskctx->head.max_tag_queue_depth);

				} else { /* if (bio) */

					KKASSERT(active_tdio);
					/*
					 * If AS feature is switched off,
					 * expire the tdio as well
					 */
					if ((remaining_budget <= 0) ||
					    !(bfq_diskctx->bfq_flag & BFQ_FLAG_AS) ||
					    !active_tdio->tdio_as_switch) {
						active_tdio->budget -= remaining_budget;
						wf2q_update_vd(active_tdio, active_tdio->budget);
						bfq_expire(bfq_diskctx, active_tdio, BFQ_REASON_OUT_OF_BUDGET);
						DSCHED_THREAD_IO_UNLOCK(&active_tdio->head);
						active_tdio = NULL;
					} else {

						/* no further bio, wait for a while */
						bfq_diskctx->bfq_blockon = active_tdio;
						/*
						 * Increase ref count to ensure that
						 * tdio will not be destroyed during waiting.
						 */
						dsched_thread_io_ref(&active_tdio->head);
						/*
						 * If the tdio is seeky but not thingking for
						 * too long, we wait for it a little shorter
						 */
						if (active_tdio->seek_samples >= BFQ_VALID_MIN_SAMPLES && BFQ_TDIO_SEEKY(active_tdio))
							callout_reset(&bfq_diskctx->bfq_callout, BFQ_T_WAIT_MIN, (void (*) (void *))helper_msg_as_timeout, bfq_diskctx);
						else
							callout_reset(&bfq_diskctx->bfq_callout, BFQ_T_WAIT, (void (*) (void *))helper_msg_as_timeout, bfq_diskctx);

						/* save the start time of blocking */
						getmicrotime(&active_tdio->as_start_time);

						dsched_debug(BFQ_DEBUG_VERBOSE, "BFQ: blocked on %p, remaining_budget = %d\n", active_tdio, remaining_budget);
						DSCHED_THREAD_IO_UNLOCK(&active_tdio->head);
						goto save_and_rtn;
					}
				}
			}
		} else { /* if (active_tdio) */
			/* there is no active tdio */

			/* no pending bios at all */
			active_tdio = wf2q_get_next_thread_io(&bfq_diskctx->bfq_wf2q);

			if (!active_tdio) {
				KKASSERT(bfq_diskctx->bfq_wf2q.wf2q_tdio_count == 0);
				dsched_debug(BFQ_DEBUG_VERBOSE, "BFQ: no more eligible tdio!\n");
				goto save_and_rtn;
			}

			/*
			 * A new tdio is picked,
			 * initialize the service related statistic data
			 */
			DSCHED_THREAD_IO_LOCK(&active_tdio->head);
			active_tdio->service_received = 0;

			/*
			 * Reset the maybe_timeout flag, which
			 * may be set by a biodone after the the service is done
			 */
			getmicrotime(&active_tdio->service_start_time);
			active_tdio->maybe_timeout = 0;

			remaining_budget = active_tdio->budget;
			dsched_debug(BFQ_DEBUG_VERBOSE, "bfq: active_tdio %p selected, remaining budget = %d, tdio budget = %d\n, qlength = %d\n", active_tdio, remaining_budget, active_tdio->budget, active_tdio->head.qlength);
		}

	}/* while (free_slots) */

	/* reach here only when free_slots == 0 */
	if (active_tdio) /* && lockcount(&active_tdio->head.lock) > 0) */
		DSCHED_THREAD_IO_UNLOCK(&active_tdio->head);

save_and_rtn:
	/* save the remaining budget */
	bfq_diskctx->bfq_remaining_budget = remaining_budget;
	bfq_diskctx->bfq_active_tdio = active_tdio;
rtn:
	BFQ_UNLOCK(bfq_diskctx);
	/*dispatch the planned bios*/
	for (i = 0; i < bio_index; i++)
		dsched_strategy_request_polling(diskctx->dp, bio_to_dispatch[i], diskctx);

}
Example #23
0
/*
 * bfq_queue(): .queue callback of the bfq policy.
 *
 * A thread calls this function to hand in its I/O requests (bio).
 * Their bios are stored in the per-thread queue, in tdio structure.
 * Currently, the sync/async bios are queued together, which may cause
 * some issues on performance.
 *
 * Besides queueing bios, this function also calculates the average
 * thinking time and average seek distance of a thread, using the
 * information in bio structure.
 *
 * If the calling thread is waiting by the bfq scheduler due to
 * the AS feature, this function will cancel the callout alarm
 * and resume the scheduler to continue serving this thread.
 *
 * lock:
 *   THREAD_IO_LOCK: protect from queue iteration in bfq_dequeue()
 *   BFQ_LOCK: protect from other insertions/deletions in wf2q_augtree
 *   in bfq_queue() or bfq_dequeue().
 *
 * refcount:
 *   If the calling thread is waited by the scheduler, the refcount
 *   of the related tdio will decrease by 1 after this function. The
 *   counterpart increasing is in bfq_dequeue(), before resetting the
 *   callout alarm.
 *
 * Return value:
 *  EINVAL: if bio->bio_buf->b_cmd == BUF_CMD_FLUSH
 *  0: bio is queued successfully.
 */
static int
bfq_queue(struct dsched_disk_ctx *diskctx, struct dsched_thread_io *tdio,
		struct  bio *bio)
{
	struct bfq_disk_ctx *bfq_diskctx = (struct bfq_disk_ctx *)diskctx;
	struct bfq_thread_io *bfq_tdio = (struct bfq_thread_io *)tdio;
	int original_qlength;

	/* we do not handle flush requests. push it down to dsched */
	if (__predict_false(bio->bio_buf->b_cmd == BUF_CMD_FLUSH))
		return (EINVAL);

	DSCHED_THREAD_IO_LOCK(tdio);
	KKASSERT(tdio->debug_priv == 0xF00FF00F);
	dsched_debug(BFQ_DEBUG_NORMAL, "bfq: tdio %p pushes bio %p\n", bfq_tdio, bio);

	dsched_set_bio_priv(bio, tdio);
	dsched_thread_io_ref(tdio);

	if ((bio->bio_buf->b_cmd == BUF_CMD_READ) ||
	    (bio->bio_buf->b_cmd == BUF_CMD_WRITE)) {
		bfq_update_tdio_seek_avg(bfq_tdio, bio);
	}

	bfq_update_tdio_ttime_avg(bfq_tdio);

	/* update last_bio_pushed_time */
	getmicrotime(&bfq_tdio->last_bio_pushed_time);

	if ((bfq_tdio->seek_samples > BFQ_VALID_MIN_SAMPLES) &&
	    BFQ_TDIO_SEEKY(bfq_tdio))
		dsched_debug(BFQ_DEBUG_NORMAL, "BFQ: tdio %p is seeky\n", bfq_tdio);

	/*
	 * If a tdio taks too long to think, we disable the AS feature of it.
	 */
	if ((bfq_tdio->ttime_samples > BFQ_VALID_MIN_SAMPLES) &&
	    (bfq_tdio->ttime_avg > BFQ_T_WAIT * (1000 / hz) * 1000) &&
	    (bfq_tdio->service_received > bfq_tdio->budget / 8)) {
		dsched_debug(BFQ_DEBUG_NORMAL, "BFQ: tdio %p takes too long time to think\n", bfq_tdio);
		bfq_tdio->tdio_as_switch = 0;
	} else {
		bfq_tdio->tdio_as_switch = 1;
	}

	/* insert the bio into the tdio's own queue */
	KKASSERT(lockstatus(&tdio->lock, curthread) == LK_EXCLUSIVE);
	TAILQ_INSERT_TAIL(&tdio->queue, bio, link);
#if 0
	tdio->qlength++;
#endif
	original_qlength = atomic_fetchadd_int(&tdio->qlength, 1);
	DSCHED_THREAD_IO_UNLOCK(tdio);
	/*
	 * A new thread:
	 * In dequeue function, we remove the thread
	 * from the aug-tree if it has no further bios.
	 * Therefore "new" means a really new thread (a
	 * newly created thread or a thread that pushed no more
	 * bios when the scheduler was waiting for it) or
	 * one that was removed from the aug-tree earlier.
	 */
	if (original_qlength == 0) {
		/*
		 * a really new thread
		 */
		BFQ_LOCK(bfq_diskctx);
		if (bfq_tdio != bfq_diskctx->bfq_active_tdio) {
			/* insert the tdio into the wf2q queue */
			wf2q_insert_thread_io(&bfq_diskctx->bfq_wf2q, bfq_tdio);
		} else {
			/*
			 * the thread being waited by the scheduler
			 */
			if (bfq_diskctx->bfq_blockon == bfq_tdio) {
				/*
				 * XXX: possible race condition here:
				 * if the callout function is triggered when
				 * the following code is executed, then after
				 * releasing the TDIO lock, the callout function
				 * will set the thread inactive and it will never
				 * be inserted into the aug-tree (so its bio pushed
				 * this time will not be dispatched) until it pushes
				 * further bios
				 */
				bfq_diskctx->bfq_as_hit++;
				bfq_update_as_avg_wait(bfq_diskctx, bfq_tdio, BFQ_AS_STAT_ALL);

				if (callout_pending(&bfq_diskctx->bfq_callout))
					callout_stop(&bfq_diskctx->bfq_callout);
				bfq_diskctx->bfq_blockon = NULL;

				/* ref'ed in dequeue(), before resetting callout */
				dsched_thread_io_unref(&bfq_tdio->head);

				dsched_debug(BFQ_DEBUG_VERBOSE, "BFQ: %p pushes a new bio when AS\n", bfq_tdio);
			}
		}

		BFQ_UNLOCK(bfq_diskctx);
	}

	helper_msg_dequeue(bfq_diskctx);

	return 0;
}
Example #24
0
/*
 * MPSAFE
 */
int
vn_islocked(struct vnode *vp)
{
	return (lockstatus(&vp->v_lock, curthread));
}
Example #25
0
static int32_t
tws_execute_scsi(struct tws_softc *sc, union ccb *ccb)
{
    struct tws_command_packet *cmd_pkt;
    struct tws_request *req;
    struct ccb_hdr *ccb_h = &(ccb->ccb_h);
    struct ccb_scsiio *csio = &(ccb->csio);
    int error;
    u_int16_t lun;

    KKASSERT(lockstatus(&sc->sim_lock, curthread) != 0);
    if (ccb_h->target_id >= TWS_MAX_NUM_UNITS) {
        TWS_TRACE_DEBUG(sc, "traget id too big", ccb_h->target_id, ccb_h->target_lun);
        ccb_h->status |= CAM_TID_INVALID;
        xpt_done(ccb);
        return(0);
    }
    if (ccb_h->target_lun >= TWS_MAX_NUM_LUNS) {
        TWS_TRACE_DEBUG(sc, "target lun 2 big", ccb_h->target_id, ccb_h->target_lun);
        ccb_h->status |= CAM_LUN_INVALID;
        xpt_done(ccb);
        return(0);
    }

    if(ccb_h->flags & CAM_CDB_PHYS) {
        TWS_TRACE_DEBUG(sc, "cdb phy", ccb_h->target_id, ccb_h->target_lun);
        ccb_h->status = CAM_REQ_CMP_ERR;
        xpt_done(ccb);
        return(0);
    }

    /*
     * We are going to work on this request.  Mark it as enqueued (though
     * we don't actually queue it...)
     */
    ccb_h->status |= CAM_SIM_QUEUED;

    req = tws_get_request(sc, TWS_SCSI_IO_REQ);
    if ( !req ) {
        TWS_TRACE_DEBUG(sc, "no reqs", ccb_h->target_id, ccb_h->target_lun);
        /* tws_freeze_simq(sc); */
        ccb_h->status |= CAM_REQUEUE_REQ;
        xpt_done(ccb);
        return(0);
    }

    if((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
        if(ccb_h->flags & CAM_DIR_IN)
            req->flags = TWS_DIR_IN;
        else
            req->flags = TWS_DIR_OUT;
    } else {
        req->flags = TWS_DIR_NONE; /* no data */
    }

    req->type = TWS_SCSI_IO_REQ;
    req->cb = tws_scsi_complete;

    cmd_pkt = req->cmd_pkt;
    /* cmd_pkt->hdr.header_desc.size_header = 128; */
    cmd_pkt->cmd.pkt_a.res__opcode = TWS_FW_CMD_EXECUTE_SCSI;
    cmd_pkt->cmd.pkt_a.unit = ccb_h->target_id;
    cmd_pkt->cmd.pkt_a.status = 0;
    cmd_pkt->cmd.pkt_a.sgl_offset = 16;

    /* lower nibble */
    lun = ccb_h->target_lun & 0XF;
    lun = lun << 12;
    cmd_pkt->cmd.pkt_a.lun_l4__req_id = lun | req->request_id;
    /* upper nibble */
    lun = ccb_h->target_lun & 0XF0;
    lun = lun << 8;
    cmd_pkt->cmd.pkt_a.lun_h4__sgl_entries = lun;

#ifdef TWS_DEBUG
    if ( csio->cdb_len > 16 )
         TWS_TRACE(sc, "cdb len too big", ccb_h->target_id, csio->cdb_len);
#endif

    if(ccb_h->flags & CAM_CDB_POINTER)
        bcopy(csio->cdb_io.cdb_ptr, cmd_pkt->cmd.pkt_a.cdb, csio->cdb_len);
    else
        bcopy(csio->cdb_io.cdb_bytes, cmd_pkt->cmd.pkt_a.cdb, csio->cdb_len);

    if (!(ccb_h->flags & CAM_DATA_PHYS)) {
         /* Virtual data addresses.  Need to convert them... */
         if (!(ccb_h->flags & CAM_SCATTER_VALID)) {
             if (csio->dxfer_len > TWS_MAX_IO_SIZE) {
                 TWS_TRACE(sc, "I/O is big", csio->dxfer_len, 0);
                 tws_release_request(req);
                 ccb_h->status = CAM_REQ_TOO_BIG;
                 xpt_done(ccb);
                 return(0);
             }

             req->length = csio->dxfer_len;
             if (req->length) {
                 req->data = csio->data_ptr;
                 /* there is 1 sgl_entrie */
                 /* cmd_pkt->cmd.pkt_a.lun_h4__sgl_entries |= 1; */
             }
         } else {
             TWS_TRACE_DEBUG(sc, "got sglist", ccb_h->target_id, ccb_h->target_lun);
             tws_release_request(req);
             ccb_h->status = CAM_REQ_CMP_ERR;
             xpt_done(ccb);
             return(0);
         }
    } else {
         /* Data addresses are physical. */
         TWS_TRACE_DEBUG(sc, "Phy data addr", ccb_h->target_id, ccb_h->target_lun);
         tws_release_request(req);
         ccb_h->status = CAM_REQ_CMP_ERR;
         ccb_h->status |= CAM_RELEASE_SIMQ;
         ccb_h->status &= ~CAM_SIM_QUEUED;
         xpt_done(ccb);
         return(0);
    }
    /* save ccb ptr */
    req->ccb_ptr = ccb;
    /*
     * tws_map_load_data_callback will fill in the SGL,
     * and submit the I/O.
     */
    sc->stats.scsi_ios++;
    callout_reset(ccb_h->timeout_ch, (ccb_h->timeout * hz)/1000,
		  tws_timeout, req);
    error = tws_map_request(sc, req);
    return(error);
}
Example #26
0
/***********************************************************************
 * Handle a request for action from CAM
 */
static void
amr_cam_action(struct cam_sim *sim, union ccb *ccb)
{
	struct amr_softc	*sc = cam_sim_softc(sim);

	switch(ccb->ccb_h.func_code) {

	/*
	 * Perform SCSI I/O to a physical device.
	 */
	case XPT_SCSI_IO:
	{
		struct ccb_hdr		*ccbh = &ccb->ccb_h;
		struct ccb_scsiio	*csio = &ccb->csio;

		/* Validate the CCB */
		ccbh->status = CAM_REQ_INPROG;

		/* check the CDB length */
		if (csio->cdb_len > AMR_MAX_EXTCDB_LEN)
			ccbh->status = CAM_REQ_INVALID;

		if ((csio->cdb_len > AMR_MAX_CDB_LEN) &&
		    (sc->support_ext_cdb == 0))
			ccbh->status = CAM_REQ_INVALID;

		/* check that the CDB pointer is not to a physical address */
		if ((ccbh->flags & CAM_CDB_POINTER) &&
		    (ccbh->flags & CAM_CDB_PHYS))
			ccbh->status = CAM_REQ_INVALID;
		/*
		 * if there is data transfer, it must be to/from a virtual
		 * address
		 */
		if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
			if (ccbh->flags & CAM_DATA_PHYS)
				/* we can't map it */
				ccbh->status = CAM_REQ_INVALID;
			if (ccbh->flags & CAM_SCATTER_VALID)
				/* we want to do the s/g setup */
				ccbh->status = CAM_REQ_INVALID;
		}

		/*
		 * If the command is to a LUN other than 0, fail it.
		 * This is probably incorrect, but during testing the
		 * firmware did not seem to respect the LUN field, and thus
		 * devices appear echoed.
		 */
		if (csio->ccb_h.target_lun != 0)
			ccbh->status = CAM_DEV_NOT_THERE;

		/* if we're happy with the request, queue it for attention */
		if (ccbh->status == CAM_REQ_INPROG) {

			/* save the channel number in the ccb */
			csio->ccb_h.sim_priv.entries[0].field= cam_sim_bus(sim);

			amr_enqueue_ccb(sc, ccb);
			amr_startio(sc);
			return;
		}
		break;
	}

	case XPT_CALC_GEOMETRY:
	{
		cam_calc_geometry(&ccb->ccg, /*extended*/1);
		break;
	}

	/*
	 * Return path stats.  Some of these should probably be amended.
	 */
	case XPT_PATH_INQ:
	{
		struct ccb_pathinq	  *cpi = & ccb->cpi;

		debug(3, "XPT_PATH_INQ");
		cpi->version_num = 1;		   /* XXX??? */
		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
		cpi->target_sprt = 0;
		cpi->hba_misc = PIM_NOBUSRESET|PIM_SEQSCAN;
		cpi->hba_eng_cnt = 0;
		cpi->max_target = AMR_MAX_TARGETS;
		cpi->max_lun = 0 /* AMR_MAX_LUNS*/;
		cpi->initiator_id = 7;		  /* XXX variable? */
		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
		strncpy(cpi->hba_vid, "LSI", HBA_IDLEN);
		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
		cpi->unit_number = cam_sim_unit(sim);
		cpi->bus_id = cam_sim_bus(sim);
		cpi->base_transfer_speed = 132 * 1024;  /* XXX */
		cpi->transport = XPORT_SPI;
		cpi->transport_version = 2;
		cpi->protocol = PROTO_SCSI;
		cpi->protocol_version = SCSI_REV_2;
		cpi->ccb_h.status = CAM_REQ_CMP;

		break;
	}

	case XPT_RESET_BUS:
	{
		struct ccb_pathinq	*cpi = & ccb->cpi;

		debug(1, "XPT_RESET_BUS");
		cpi->ccb_h.status = CAM_REQ_CMP;
		break;
	}

	case XPT_RESET_DEV:
	{
		debug(1, "XPT_RESET_DEV");
		ccb->ccb_h.status = CAM_REQ_CMP;
		break;
	}

	case XPT_GET_TRAN_SETTINGS:
	{
		struct ccb_trans_settings	*cts = &(ccb->cts);

		debug(3, "XPT_GET_TRAN_SETTINGS");

		struct ccb_trans_settings_scsi *scsi;
		struct ccb_trans_settings_spi *spi;

		scsi = &cts->proto_specific.scsi;
		spi = &cts->xport_specific.spi;

		cts->protocol = PROTO_SCSI;
		cts->protocol_version = SCSI_REV_2;
		cts->transport = XPORT_SPI;
		cts->transport_version = 2;

		if (cts->type == CTS_TYPE_USER_SETTINGS) {
			ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
			break;
		}

		spi->flags = CTS_SPI_FLAGS_DISC_ENB;
		spi->bus_width = MSG_EXT_WDTR_BUS_32_BIT;
		spi->sync_period = 6;   /* 40MHz how wide is this bus? */
		spi->sync_offset = 31;  /* How to extract this from board? */

		spi->valid = CTS_SPI_VALID_SYNC_RATE
			| CTS_SPI_VALID_SYNC_OFFSET
			| CTS_SPI_VALID_BUS_WIDTH
			| CTS_SPI_VALID_DISC;
		scsi->valid = CTS_SCSI_VALID_TQ;
		ccb->ccb_h.status = CAM_REQ_CMP;
		break;
	}

	case XPT_SET_TRAN_SETTINGS:
		debug(3, "XPT_SET_TRAN_SETTINGS");
		ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
		break;


	/*
	 * Reject anything else as unsupported.
	 */
	default:
		/* we can't do this */
		ccb->ccb_h.status = CAM_REQ_INVALID;
		break;
	}

	KKASSERT(lockstatus(&sc->amr_list_lock, curthread) != 0);
	xpt_done(ccb);
}