Beispiel #1
0
/*
 * iscsi_door_upcall
 *
 * This function tries to call the iscsi_door.
 */
static
boolean_t
iscsi_door_upcall(door_arg_t *arg)
{
	int	error;

	/*
	 * This semaphore limits the number of simultaneous calls
	 * to the door.
	 */
	sema_p(&iscsi_door_sema);
	/*
	 * The mutex protecting the iscsi_door_handle is entered.
	 */
	rw_enter(&iscsi_door_lock, RW_READER);

	if (iscsi_door_handle == NULL) {
		/* There's no door handle. */
		rw_exit(&iscsi_door_lock);
		sema_v(&iscsi_door_sema);
		return (B_FALSE);
	}
	error = door_ki_upcall(iscsi_door_handle, arg);

	rw_exit(&iscsi_door_lock);
	sema_v(&iscsi_door_sema);

	if (error != 0) {
		return (B_FALSE);
	} else {
		return (B_TRUE);
	}
}
Beispiel #2
0
/*
 * Ensure that a specified block is up-to-date on disk.
 */
void
blkflush(dev_t dev, daddr_t blkno)
{
	struct buf *bp, *dp;
	struct hbuf *hp;
	struct buf *sbp = NULL;
	uint_t index;
	kmutex_t *hmp;

	index = bio_bhash(dev, blkno);
	hp    = &hbuf[index];
	dp    = (struct buf *)hp;
	hmp   = &hp->b_lock;

	/*
	 * Identify the buffer in the cache belonging to
	 * this device and blkno (if any).
	 */
	mutex_enter(hmp);
	for (bp = dp->b_forw; bp != dp; bp = bp->b_forw) {
		if (bp->b_blkno != blkno || bp->b_edev != dev ||
		    (bp->b_flags & B_STALE))
			continue;
		sbp = bp;
		break;
	}
	mutex_exit(hmp);
	if (sbp == NULL)
		return;
	/*
	 * Now check the buffer we have identified and
	 * make sure it still belongs to the device and is B_DELWRI
	 */
	sema_p(&sbp->b_sem);
	if (sbp->b_blkno == blkno && sbp->b_edev == dev &&
	    (sbp->b_flags & (B_DELWRI|B_STALE)) == B_DELWRI) {
		mutex_enter(hmp);
		hp->b_length--;
		notavail(sbp);
		mutex_exit(hmp);
		/*
		 * XXX - There is nothing to guarantee a synchronous
		 * write here if the B_ASYNC flag is set.  This needs
		 * some investigation.
		 */
		if (sbp->b_vp == NULL) {		/* !ufs */
			BWRITE(sbp);	/* synchronous write */
		} else {				/* ufs */
			UFS_BWRITE(VTOI(sbp->b_vp)->i_ufsvfs, sbp);
		}
	} else {
		sema_v(&sbp->b_sem);
	}
}
Beispiel #3
0
int
trans_not_wait(struct buf *cb)
{
	/*
	 * In case of panic, busy wait for completion
	 */
	if (panicstr) {
		trans_wait_panic(cb);
	} else {
		sema_p(&cb->b_io);
	}

	return (geterror(cb));
}
Beispiel #4
0
void start_host (Host * host, Host * join)
{

    char port[16], key[160];
    char *arg[9];
    int i;

    memset (port, 0, 16);
    memset (key, 0, 160);

    sprintf (port, "%d", host->port);
    sprintf (key, "%s", host->key.keystr);
//  arg[0] = "ssh";
//  arg[1] = host->name;
    arg[0] = "./bighost";
    if (join != NULL)
	{
	    arg[1] = "-j";
	    arg[2] = join->hnp;
	    arg[3] = port;
	    arg[4] = key;
	    arg[5] = NULL;
	}
    else
	{
	    arg[1] = port;
	    arg[2] = key;
	    arg[3] = NULL;
	}


    host->pid = fork ();

    if (host->pid == 0)
	{
	    execvp ("./bighost", arg);
	    perror ("./bighost");
	    exit (1);
	}

    sema_p (sem, 0.0);

}
Beispiel #5
0
int _thread_zlog_do(void *param)
{
    while (1) {
        sema_p(gs_zlog.sema);
        mutex_lock(gs_zlog.mutex);
        if (gs_zlog.is_to_exit == TRUE && gs_zlog.head == NULL) {
            mutex_unlock(gs_zlog.mutex);
            break;
        }
        if (gs_zlog.head == NULL) {
            mutex_unlock(gs_zlog.mutex);
            continue;
        }

        zlog_info_t *zlog_info = gs_zlog.head;
        gs_zlog.head = gs_zlog.head->next;
        mutex_unlock(gs_zlog.mutex);

        out_print(zlog_info->content);
        free(zlog_info->content);
    }
    return 0;
}
int
kadmin(int cmd, int fcn, void *mdep, cred_t *credp)
{
	int error = 0;
	char *buf;
	size_t buflen = 0;
	boolean_t invoke_cb = B_FALSE;

	/*
	 * We might be called directly by the kernel's fault-handling code, so
	 * we can't assert that the caller is in the global zone.
	 */

	/*
	 * Make sure that cmd is one of the valid <sys/uadmin.h> command codes
	 * and that we have appropriate privileges for this action.
	 */
	switch (cmd) {
	case A_FTRACE:
	case A_SHUTDOWN:
	case A_REBOOT:
	case A_REMOUNT:
	case A_FREEZE:
	case A_DUMP:
	case A_SDTTEST:
	case A_CONFIG:
		if (secpolicy_sys_config(credp, B_FALSE) != 0)
			return (EPERM);
		break;

	default:
		return (EINVAL);
	}

	/*
	 * Serialize these operations on ualock.  If it is held, the
	 * system should shutdown, reboot, or remount shortly, unless there is
	 * an error.  We need a cv rather than just a mutex because proper
	 * functioning of A_REBOOT relies on being able to interrupt blocked
	 * userland callers.
	 *
	 * We only clear ua_shutdown_thread after A_REMOUNT or A_CONFIG.
	 * Other commands should never return.
	 */
	if (cmd == A_SHUTDOWN || cmd == A_REBOOT || cmd == A_REMOUNT ||
	    cmd == A_CONFIG) {
		mutex_enter(&ualock);
		while (ua_shutdown_thread != NULL) {
			if (cv_wait_sig(&uacond, &ualock) == 0) {
				/*
				 * If we were interrupted, leave, and handle
				 * the signal (or exit, depending on what
				 * happened)
				 */
				mutex_exit(&ualock);
				return (EINTR);
			}
		}
		ua_shutdown_thread = curthread;
		mutex_exit(&ualock);
	}

	switch (cmd) {
	case A_SHUTDOWN:
	{
		proc_t *p = ttoproc(curthread);

		/*
		 * Release (almost) all of our own resources if we are called
		 * from a user context, however if we are calling kadmin() from
		 * a kernel context then we do not release these resources.
		 */
		if (p != &p0) {
			proc_is_exiting(p);
			if ((error = exitlwps(0)) != 0) {
				/*
				 * Another thread in this process also called
				 * exitlwps().
				 */
				mutex_enter(&ualock);
				ua_shutdown_thread = NULL;
				cv_signal(&uacond);
				mutex_exit(&ualock);
				return (error);
			}
			mutex_enter(&p->p_lock);
			p->p_flag |= SNOWAIT;
			sigfillset(&p->p_ignore);
			curthread->t_lwp->lwp_cursig = 0;
			curthread->t_lwp->lwp_extsig = 0;
			if (p->p_exec) {
				vnode_t *exec_vp = p->p_exec;
				p->p_exec = NULLVP;
				mutex_exit(&p->p_lock);
				VN_RELE(exec_vp);
			} else {
				mutex_exit(&p->p_lock);
			}

			pollcleanup();
			closeall(P_FINFO(curproc));
			relvm();

		} else {
			/*
			 * Reset t_cred if not set because much of the
			 * filesystem code depends on CRED() being valid.
			 */
			if (curthread->t_cred == NULL)
				curthread->t_cred = kcred;
		}

		/* indicate shutdown in progress */
		sys_shutdown = 1;

		/*
		 * Communcate that init shouldn't be restarted.
		 */
		zone_shutdown_global();

		killall(ALL_ZONES);
		/*
		 * If we are calling kadmin() from a kernel context then we
		 * do not release these resources.
		 */
		if (ttoproc(curthread) != &p0) {
			VN_RELE(PTOU(curproc)->u_cdir);
			if (PTOU(curproc)->u_rdir)
				VN_RELE(PTOU(curproc)->u_rdir);
			if (PTOU(curproc)->u_cwd)
				refstr_rele(PTOU(curproc)->u_cwd);

			PTOU(curproc)->u_cdir = rootdir;
			PTOU(curproc)->u_rdir = NULL;
			PTOU(curproc)->u_cwd = NULL;
		}

		/*
		 * Allow the reboot/halt/poweroff code a chance to do
		 * anything it needs to whilst we still have filesystems
		 * mounted, like loading any modules necessary for later
		 * performing the actual poweroff.
		 */
		if ((mdep != NULL) && (*(char *)mdep == '/')) {
			buf = i_convert_boot_device_name(mdep, NULL, &buflen);
			mdpreboot(cmd, fcn, buf);
		} else
			mdpreboot(cmd, fcn, mdep);

		/*
		 * Allow fsflush to finish running and then prevent it
		 * from ever running again so that vfs_unmountall() and
		 * vfs_syncall() can acquire the vfs locks they need.
		 */
		sema_p(&fsflush_sema);
		(void) callb_execute_class(CB_CL_UADMIN_PRE_VFS, NULL);

		vfs_unmountall();
		(void) VFS_MOUNTROOT(rootvfs, ROOT_UNMOUNT);
		vfs_syncall();

		dump_ereports();
		dump_messages();

		invoke_cb = B_TRUE;

		/* FALLTHROUGH */
	}

	case A_REBOOT:
		if ((mdep != NULL) && (*(char *)mdep == '/')) {
			buf = i_convert_boot_device_name(mdep, NULL, &buflen);
			mdboot(cmd, fcn, buf, invoke_cb);
		} else
			mdboot(cmd, fcn, mdep, invoke_cb);
		/* no return expected */
		break;

	case A_CONFIG:
		switch (fcn) {
		case AD_UPDATE_BOOT_CONFIG:
#ifndef	__sparc
		{
			extern void fastboot_update_config(const char *);

			fastboot_update_config(mdep);
		}
#endif

			break;
		}
		/* Let other threads enter the shutdown path now */
		mutex_enter(&ualock);
		ua_shutdown_thread = NULL;
		cv_signal(&uacond);
		mutex_exit(&ualock);
		break;

	case A_REMOUNT:
		(void) VFS_MOUNTROOT(rootvfs, ROOT_REMOUNT);
		/* Let other threads enter the shutdown path now */
		mutex_enter(&ualock);
		ua_shutdown_thread = NULL;
		cv_signal(&uacond);
		mutex_exit(&ualock);
		break;

	case A_FREEZE:
	{
		/*
		 * This is the entrypoint for all suspend/resume actions.
		 */
		extern int cpr(int, void *);

		if (modload("misc", "cpr") == -1)
			return (ENOTSUP);
		/* Let the CPR module decide what to do with mdep */
		error = cpr(fcn, mdep);
		break;
	}

	case A_FTRACE:
	{
		switch (fcn) {
		case AD_FTRACE_START:
			(void) FTRACE_START();
			break;
		case AD_FTRACE_STOP:
			(void) FTRACE_STOP();
			break;
		default:
			error = EINVAL;
		}
		break;
	}

	case A_DUMP:
	{
		if (fcn == AD_NOSYNC) {
			in_sync = 1;
			break;
		}

		panic_bootfcn = fcn;
		panic_forced = 1;

		if ((mdep != NULL) && (*(char *)mdep == '/')) {
			panic_bootstr = i_convert_boot_device_name(mdep,
			    NULL, &buflen);
		} else
			panic_bootstr = mdep;

#ifndef	__sparc
		extern void fastboot_update_and_load(int, char *);

		fastboot_update_and_load(fcn, mdep);
#endif

		panic("forced crash dump initiated at user request");
		/*NOTREACHED*/
	}

	case A_SDTTEST:
	{
		DTRACE_PROBE7(test, int, 1, int, 2, int, 3, int, 4, int, 5,
		    int, 6, int, 7);
		break;
	}

	default:
		error = EINVAL;
	}

	return (error);
}
/*
 * As part of file system hardening, this daemon is awakened
 * every second to flush cached data which includes the
 * buffer cache, the inode cache and mapped pages.
 */
void
fsflush()
{
	struct buf *bp, *dwp;
	struct hbuf *hp;
	int autoup;
	unsigned int ix, icount, count = 0;
	callb_cpr_t cprinfo;
	uint_t		bcount;
	kmutex_t	*hmp;
	struct vfssw *vswp;

	proc_fsflush = ttoproc(curthread);
	proc_fsflush->p_cstime = 0;
	proc_fsflush->p_stime =  0;
	proc_fsflush->p_cutime =  0;
	proc_fsflush->p_utime = 0;
	bcopy("fsflush", curproc->p_user.u_psargs, 8);
	bcopy("fsflush", curproc->p_user.u_comm, 7);

	mutex_init(&fsflush_lock, NULL, MUTEX_DEFAULT, NULL);
	sema_init(&fsflush_sema, 0, NULL, SEMA_DEFAULT, NULL);

	/*
	 * Setup page coalescing.
	 */
	fsf_npgsz = page_num_pagesizes();
	ASSERT(fsf_npgsz < MAX_PAGESIZES);
	for (ix = 0; ix < fsf_npgsz - 1; ++ix) {
		fsf_pgcnt[ix] =
		    page_get_pagesize(ix + 1) / page_get_pagesize(ix);
		fsf_mask[ix] = page_get_pagecnt(ix + 1) - 1;
	}

	autoup = v.v_autoup * hz;
	icount = v.v_autoup / tune.t_fsflushr;
	CALLB_CPR_INIT(&cprinfo, &fsflush_lock, callb_generic_cpr, "fsflush");
loop:
	sema_v(&fsflush_sema);
	mutex_enter(&fsflush_lock);
	CALLB_CPR_SAFE_BEGIN(&cprinfo);
	cv_wait(&fsflush_cv, &fsflush_lock);		/* wait for clock */
	CALLB_CPR_SAFE_END(&cprinfo, &fsflush_lock);
	mutex_exit(&fsflush_lock);
	sema_p(&fsflush_sema);

	/*
	 * Write back all old B_DELWRI buffers on the freelist.
	 */
	bcount = 0;
	for (ix = 0; ix < v.v_hbuf; ix++) {

		hp = &hbuf[ix];
		dwp = (struct buf *)&dwbuf[ix];

		bcount += (hp->b_length);

		if (dwp->av_forw == dwp) {
			continue;
		}

		hmp = &hbuf[ix].b_lock;
		mutex_enter(hmp);
		bp = dwp->av_forw;

		/*
		 * Go down only on the delayed write lists.
		 */
		while (bp != dwp) {

			ASSERT(bp->b_flags & B_DELWRI);

			if ((bp->b_flags & B_DELWRI) &&
			    (ddi_get_lbolt() - bp->b_start >= autoup) &&
			    sema_tryp(&bp->b_sem)) {
				bp->b_flags |= B_ASYNC;
				hp->b_length--;
				notavail(bp);
				mutex_exit(hmp);
				if (bp->b_vp == NULL) {
					BWRITE(bp);
				} else {
					UFS_BWRITE(VTOI(bp->b_vp)->i_ufsvfs,
					    bp);
				}
				mutex_enter(hmp);
				bp = dwp->av_forw;
			} else {
				bp = bp->av_forw;
			}
		}
		mutex_exit(hmp);
	}

	/*
	 *
	 * There is no need to wakeup any thread waiting on bio_mem_cv
	 * since brelse will wake them up as soon as IO is complete.
	 */
	bfreelist.b_bcount = bcount;

	if (dopageflush)
		fsflush_do_pages();

	if (!doiflush)
		goto loop;

	/*
	 * If the system was not booted to single user mode, skip the
	 * inode flushing until after fsflush_iflush_delay secs have elapsed.
	 */
	if ((boothowto & RB_SINGLE) == 0 &&
	    (ddi_get_lbolt64() / hz) < fsflush_iflush_delay)
		goto loop;

	/*
	 * Flush cached attribute information (e.g. inodes).
	 */
	if (++count >= icount) {
		count = 0;

		/*
		 * Sync back cached data.
		 */
		RLOCK_VFSSW();
		for (vswp = &vfssw[1]; vswp < &vfssw[nfstype]; vswp++) {
			if (ALLOCATED_VFSSW(vswp) && VFS_INSTALLED(vswp)) {
				vfs_refvfssw(vswp);
				RUNLOCK_VFSSW();
				(void) fsop_sync_by_kind(vswp - vfssw,
				    SYNC_ATTR, kcred);
				vfs_unrefvfssw(vswp);
				RLOCK_VFSSW();
			}
		}
		RUNLOCK_VFSSW();
	}
	goto loop;
}
Beispiel #8
0
/*
 * Make sure all write-behind blocks on dev (or NODEV for all)
 * are flushed out.
 */
void
bflush(dev_t dev)
{
	struct buf *bp, *dp;
	struct hbuf *hp;
	struct buf *delwri_list = EMPTY_LIST;
	int i, index;
	kmutex_t *hmp;

	mutex_enter(&blist_lock);
	/*
	 * Wait for any invalidates or flushes ahead of us to finish.
	 * We really could split blist_lock up per device for better
	 * parallelism here.
	 */
	while (bio_doinginval || bio_doingflush) {
		bio_flinv_cv_wanted = 1;
		cv_wait(&bio_flushinval_cv, &blist_lock);
	}
	bio_doingflush++;
	/*
	 * Gather all B_DELWRI buffer for device.
	 * Lock ordering is b_sem > hash lock (brelse).
	 * Since we are finding the buffer via the delayed write list,
	 * it may be busy and we would block trying to get the
	 * b_sem lock while holding hash lock. So transfer all the
	 * candidates on the delwri_list and then drop the hash locks.
	 */
	for (i = 0; i < v.v_hbuf; i++) {
		vfs_syncprogress();
		hmp = &hbuf[i].b_lock;
		dp = (struct buf *)&dwbuf[i];
		mutex_enter(hmp);
		for (bp = dp->av_forw; bp != dp; bp = bp->av_forw) {
			if (dev == NODEV || bp->b_edev == dev) {
				if (bp->b_list == NULL) {
					bp->b_list = delwri_list;
					delwri_list = bp;
				}
			}
		}
		mutex_exit(hmp);
	}
	mutex_exit(&blist_lock);

	/*
	 * Now that the hash locks have been dropped grab the semaphores
	 * and write back all the buffers that have B_DELWRI set.
	 */
	while (delwri_list != EMPTY_LIST) {
		vfs_syncprogress();
		bp = delwri_list;

		sema_p(&bp->b_sem);	/* may block */
		if ((dev != bp->b_edev && dev != NODEV) ||
		    (panicstr && bp->b_flags & B_BUSY)) {
			sema_v(&bp->b_sem);
			delwri_list = bp->b_list;
			bp->b_list = NULL;
			continue;	/* No longer a candidate */
		}
		if (bp->b_flags & B_DELWRI) {
			index = bio_bhash(bp->b_edev, bp->b_blkno);
			hp = &hbuf[index];
			hmp = &hp->b_lock;
			dp = (struct buf *)hp;

			bp->b_flags |= B_ASYNC;
			mutex_enter(hmp);
			hp->b_length--;
			notavail(bp);
			mutex_exit(hmp);
			if (bp->b_vp == NULL) {		/* !ufs */
				BWRITE(bp);
			} else {			/* ufs */
				UFS_BWRITE(VTOI(bp->b_vp)->i_ufsvfs, bp);
			}
		} else {
			sema_v(&bp->b_sem);
		}
		delwri_list = bp->b_list;
		bp->b_list = NULL;
	}
	mutex_enter(&blist_lock);
	bio_doingflush--;
	if (bio_flinv_cv_wanted) {
		bio_flinv_cv_wanted = 0;
		cv_broadcast(&bio_flushinval_cv);
	}
	mutex_exit(&blist_lock);
}
Beispiel #9
0
/*
 * Assign a buffer for the given block.  If the appropriate
 * block is already associated, return it; otherwise search
 * for the oldest non-busy buffer and reassign it.
 */
struct buf *
getblk_common(void * arg, dev_t dev, daddr_t blkno, long bsize, int errflg)
{
	ufsvfs_t *ufsvfsp = (struct ufsvfs *)arg;
	struct buf *bp;
	struct buf *dp;
	struct buf *nbp = NULL;
	struct buf *errbp;
	uint_t		index;
	kmutex_t	*hmp;
	struct	hbuf	*hp;

	if (getmajor(dev) >= devcnt)
		cmn_err(CE_PANIC, "blkdev");

	biostats.bio_lookup.value.ui32++;

	index = bio_bhash(dev, blkno);
	hp    = &hbuf[index];
	dp    = (struct buf *)hp;
	hmp   = &hp->b_lock;

	mutex_enter(hmp);
loop:
	for (bp = dp->b_forw; bp != dp; bp = bp->b_forw) {
		if (bp->b_blkno != blkno || bp->b_edev != dev ||
		    (bp->b_flags & B_STALE))
			continue;
		/*
		 * Avoid holding the hash lock in the event that
		 * the buffer is locked by someone. Since the hash chain
		 * may change when we drop the hash lock
		 * we have to start at the beginning of the chain if the
		 * buffer identity/contents aren't valid.
		 */
		if (!sema_tryp(&bp->b_sem)) {
			biostats.bio_bufbusy.value.ui32++;
			mutex_exit(hmp);
			/*
			 * OK, we are dealing with a busy buffer.
			 * In the case that we are panicking and we
			 * got called from bread(), we have some chance
			 * for error recovery. So better bail out from
			 * here since sema_p() won't block. If we got
			 * called directly from ufs routines, there is
			 * no way to report an error yet.
			 */
			if (panicstr && errflg)
				goto errout;
			/*
			 * For the following line of code to work
			 * correctly never kmem_free the buffer "header".
			 */
			sema_p(&bp->b_sem);
			if (bp->b_blkno != blkno || bp->b_edev != dev ||
			    (bp->b_flags & B_STALE)) {
				sema_v(&bp->b_sem);
				mutex_enter(hmp);
				goto loop;	/* start over */
			}
			mutex_enter(hmp);
		}
		/* Found */
		biostats.bio_hit.value.ui32++;
		bp->b_flags &= ~B_AGE;

		/*
		 * Yank it off the free/delayed write lists
		 */
		hp->b_length--;
		notavail(bp);
		mutex_exit(hmp);

		ASSERT((bp->b_flags & B_NOCACHE) == NULL);

		if (nbp == NULL) {
			/*
			 * Make the common path short.
			 */
			ASSERT(SEMA_HELD(&bp->b_sem));
			return (bp);
		}

		biostats.bio_bufdup.value.ui32++;

		/*
		 * The buffer must have entered during the lock upgrade
		 * so free the new buffer we allocated and return the
		 * found buffer.
		 */
		kmem_free(nbp->b_un.b_addr, nbp->b_bufsize);
		nbp->b_un.b_addr = NULL;

		/*
		 * Account for the memory
		 */
		mutex_enter(&bfree_lock);
		bfreelist.b_bufsize += nbp->b_bufsize;
		mutex_exit(&bfree_lock);

		/*
		 * Destroy buf identity, and place on avail list
		 */
		nbp->b_dev = (o_dev_t)NODEV;
		nbp->b_edev = NODEV;
		nbp->b_flags = 0;
		nbp->b_file = NULL;
		nbp->b_offset = -1;

		sema_v(&nbp->b_sem);
		bio_bhdr_free(nbp);

		ASSERT(SEMA_HELD(&bp->b_sem));
		return (bp);
	}

	/*
	 * bio_getfreeblk may block so check the hash chain again.
	 */
	if (nbp == NULL) {
		mutex_exit(hmp);
		nbp = bio_getfreeblk(bsize);
		mutex_enter(hmp);
		goto loop;
	}

	/*
	 * New buffer. Assign nbp and stick it on the hash.
	 */
	nbp->b_flags = B_BUSY;
	nbp->b_edev = dev;
	nbp->b_dev = (o_dev_t)cmpdev(dev);
	nbp->b_blkno = blkno;
	nbp->b_iodone = NULL;
	nbp->b_bcount = bsize;
	/*
	 * If we are given a ufsvfsp and the vfs_root field is NULL
	 * then this must be I/O for a superblock.  A superblock's
	 * buffer is set up in mountfs() and there is no root vnode
	 * at that point.
	 */
	if (ufsvfsp && ufsvfsp->vfs_root) {
		nbp->b_vp = ufsvfsp->vfs_root;
	} else {
		nbp->b_vp = NULL;
	}

	ASSERT((nbp->b_flags & B_NOCACHE) == NULL);

	binshash(nbp, dp);
	mutex_exit(hmp);

	ASSERT(SEMA_HELD(&nbp->b_sem));

	return (nbp);


	/*
	 * Come here in case of an internal error. At this point we couldn't
	 * get a buffer, but he have to return one. Hence we allocate some
	 * kind of error reply buffer on the fly. This buffer is marked as
	 * B_NOCACHE | B_AGE | B_ERROR | B_DONE to assure the following:
	 *	- B_ERROR will indicate error to the caller.
	 *	- B_DONE will prevent us from reading the buffer from
	 *	  the device.
	 *	- B_NOCACHE will cause that this buffer gets free'd in
	 *	  brelse().
	 */

errout:
	errbp = geteblk();
	sema_p(&errbp->b_sem);
	errbp->b_flags &= ~B_BUSY;
	errbp->b_flags |= (B_ERROR | B_DONE);
	return (errbp);
}
Beispiel #10
0
/*
 * Same as binval, except can force-invalidate delayed-write buffers
 * (which are not be already flushed because of device errors).  Also
 * makes sure that the retry write flag is cleared.
 */
int
bfinval(dev_t dev, int force)
{
	struct buf *dp;
	struct buf *bp;
	struct buf *binval_list = EMPTY_LIST;
	int i, error = 0;
	kmutex_t *hmp;
	uint_t index;
	struct buf **backp;

	mutex_enter(&blist_lock);
	/*
	 * Wait for any flushes ahead of us to finish, it's ok to
	 * do invalidates in parallel.
	 */
	while (bio_doingflush) {
		bio_flinv_cv_wanted = 1;
		cv_wait(&bio_flushinval_cv, &blist_lock);
	}
	bio_doinginval++;

	/* Gather bp's */
	for (i = 0; i < v.v_hbuf; i++) {
		dp = (struct buf *)&hbuf[i];
		hmp = &hbuf[i].b_lock;

		mutex_enter(hmp);
		for (bp = dp->b_forw; bp != dp; bp = bp->b_forw) {
			if (bp->b_edev == dev) {
				if (bp->b_list == NULL) {
					bp->b_list = binval_list;
					binval_list = bp;
				}
			}
		}
		mutex_exit(hmp);
	}
	mutex_exit(&blist_lock);

	/* Invalidate all bp's found */
	while (binval_list != EMPTY_LIST) {
		bp = binval_list;

		sema_p(&bp->b_sem);
		if (bp->b_edev == dev) {
			if (force && (bp->b_flags & B_DELWRI)) {
				/* clear B_DELWRI, move to non-dw freelist */
				index = bio_bhash(bp->b_edev, bp->b_blkno);
				hmp = &hbuf[index].b_lock;
				dp = (struct buf *)&hbuf[index];
				mutex_enter(hmp);

				/* remove from delayed write freelist */
				notavail(bp);

				/* add to B_AGE side of non-dw freelist */
				backp = &dp->av_forw;
				(*backp)->av_back = bp;
				bp->av_forw = *backp;
				*backp = bp;
				bp->av_back = dp;

				/*
				 * make sure write retries and busy are cleared
				 */
				bp->b_flags &=
				    ~(B_BUSY | B_DELWRI | B_RETRYWRI);
				mutex_exit(hmp);
			}
			if ((bp->b_flags & B_DELWRI) == 0)
				bp->b_flags |= B_STALE|B_AGE;
			else
				error = EIO;
		}
		sema_v(&bp->b_sem);
		binval_list = bp->b_list;
		bp->b_list = NULL;
	}
	mutex_enter(&blist_lock);
	bio_doinginval--;
	if (bio_flinv_cv_wanted) {
		cv_broadcast(&bio_flushinval_cv);
		bio_flinv_cv_wanted = 0;
	}
	mutex_exit(&blist_lock);
	return (error);
}
/*
 * iscsi_ioctl_set_config_sess - sets configured session information
 *
 * This function is an ioctl helper function to set the
 * configured session information in the persistent store.
 * In addition it will notify any active sessions of the
 * changed so this can update binding information.  It
 * will also destroy sessions that were removed and add
 * new sessions.
 */
int
iscsi_ioctl_set_config_sess(iscsi_hba_t *ihp, iscsi_config_sess_t *ics)
{
	uchar_t *name;
	iscsi_sess_t *isp;

	/* check range infomration */
	if ((ics->ics_in < ISCSI_MIN_CONFIG_SESSIONS) ||
	    (ics->ics_in > ISCSI_MAX_CONFIG_SESSIONS)) {
		/* invalid range information */
		return (EINVAL);
	}

	if (ics->ics_oid == ISCSI_INITIATOR_OID) {
		name = ihp->hba_name;
	} else {
		/* get target name */
		name = iscsi_targetparam_get_name(ics->ics_oid);
		if (name == NULL) {
			/* invalid node name */
			return (EINVAL);
		}
	}

	/* store the new information */
	if (persistent_set_config_session((char *)name, ics) == B_FALSE) {
		/* failed to store new information */
		return (EINVAL);
	}

	/* notify existing sessions of change */
	rw_enter(&ihp->hba_sess_list_rwlock, RW_READER);
	isp = ihp->hba_sess_list;
	while (isp != NULL) {

		if ((ics->ics_oid == ISCSI_INITIATOR_OID) ||
		    (strncmp((char *)isp->sess_name, (char *)name,
		    ISCSI_MAX_NAME_LEN) == 0)) {

			/*
			 * If this sessions least signficant byte
			 * of the isid is less than or equal to
			 * the the number of configured sessions
			 * then we need to tear down this session.
			 */
			if (ics->ics_in <= isp->sess_isid[5]) {
				/* First attempt to destory the session */
				if (ISCSI_SUCCESS(iscsi_sess_destroy(isp))) {
					isp = ihp->hba_sess_list;
				} else {
					/*
					 * If we can't destroy it then
					 * atleast poke it to disconnect
					 * it.
					 */
					mutex_enter(&isp->sess_state_mutex);
					iscsi_sess_state_machine(isp,
					    ISCSI_SESS_EVENT_N7);
					mutex_exit(&isp->sess_state_mutex);
					isp = isp->sess_next;
				}
			} else {
				isp = isp->sess_next;
			}
		} else {
			isp = isp->sess_next;
		}
	}
	rw_exit(&ihp->hba_sess_list_rwlock);

	/*
	 * The number of targets has changed.  Since we don't expect
	 * this to be a common operation lets keep the code simple and
	 * just use a slightly larger hammer and poke discovery.  This
	 * force the reevaulation of this target and all other targets.
	 */
	iscsid_poke_discovery(ihp, iSCSIDiscoveryMethodUnknown);
	/* lock so only one config operation occrs */
	sema_p(&iscsid_config_semaphore);
	iscsid_config_all(ihp, B_FALSE);
	sema_v(&iscsid_config_semaphore);

	return (0);
}
Beispiel #12
0
/*ARGSUSED*/
static void
kctl_wr_thread(void *arg)
{
	callb_cpr_t cprinfo;
	kmutex_t cprlock;

	mutex_init(&cprlock, NULL, MUTEX_DEFAULT, NULL);
	CALLB_CPR_INIT(&cprinfo, &cprlock, callb_generic_cpr, "kmdb work");

	for (;;) {
		/*
		 * XXX what should I do here for panic?  It'll spin unless I
		 * can figure out a way to park it.  Presumably I don't want to
		 * let it exit.
		 */
		mutex_enter(&cprlock);
		CALLB_CPR_SAFE_BEGIN(&cprinfo);
		mutex_exit(&cprlock);

		sema_p(&kctl.kctl_wr_avail_sem);

		mutex_enter(&cprlock);
		CALLB_CPR_SAFE_END(&cprinfo, &cprlock);
		mutex_exit(&cprlock);

		kctl_dprintf("kctl worker thread - waking up");

		if (kmdb_kdi_get_unload_request() ||
		    kctl.kctl_wr_state != KCTL_WR_ST_RUN) {
			/*
			 * We've either got a debugger-initiated unload (if
			 * unload_request returned true), or we're stopping due
			 * to an error discovered by the driver (if
			 * kctl_worker_run is no longer non-zero).  Start
			 * cleaning up.
			 */

			/*
			 * The debugger has already deactivated itself, and will
			 * have dumped a bunch of stuff on the queue.  We need
			 * to process it before exiting.
			 */
			(void) kmdb_wr_driver_process(kctl_wr_process_cb,
			    KCTL_WR_PROCESS_UNLOADING);
			break;
		}

		/*
		 * A non-zero return means we've passed messages back to the
		 * debugger for processing, so we need to wake the debugger up.
		 */
		if (kctl_wr_process() > 0)
			kdi_dvec_enter();
	}

	/*
	 * NULL out the dmod search path, so we can send the current one back
	 * to the debugger.  XXX this should probably be somewhere else.
	 */
	kctl_dmod_path_reset();

	/*
	 * The debugger will send us unload notifications for each dmod that it
	 * noticed.  If, for example, the debugger is unloaded before the first
	 * start, it won't have noticed any of the dmods we loaded.  We'll need
	 * to initiate the unloads ourselves.
	 */
	kctl_dmod_unload_all();

	kctl.kctl_wr_state = KCTL_WR_ST_STOPPED;

	/*
	 * Must be last, as it concludes by setting state to INACTIVE.  The
	 * kctl data structure must not be accessed by this thread after that
	 * point.
	 */
	kctl_cleanup();

	mutex_enter(&cprlock);
	CALLB_CPR_EXIT(&cprinfo);
	mutex_destroy(&cprlock);
}
Beispiel #13
0
/*
 * similiar to sema_p except that it blocks at an interruptible
 * priority. if a signal is present then return 1 otherwise 0.
 */
int
sema_p_sig(ksema_t *sp)
{
	kthread_t	*t = curthread;
	klwp_t		*lwp = ttolwp(t);
	sema_impl_t	*s;
	disp_lock_t	*sqlp;

	if (lwp == NULL) {
		sema_p(sp);
		return (0);
	}

	s = (sema_impl_t *)sp;
	sqlp = &SQHASH(s)->sq_lock;
	disp_lock_enter(sqlp);
	ASSERT(s->s_count >= 0);
	while (s->s_count == 0) {
		proc_t *p = ttoproc(t);
		thread_lock_high(t);
		t->t_flag |= T_WAKEABLE;
		SEMA_BLOCK(s, sqlp);
		lwp->lwp_asleep = 1;
		lwp->lwp_sysabort = 0;
		thread_unlock_nopreempt(t);
		if (ISSIG(t, JUSTLOOKING) || MUSTRETURN(p, t))
			setrun(t);
		swtch();
		t->t_flag &= ~T_WAKEABLE;
		if (ISSIG(t, FORREAL) ||
		    lwp->lwp_sysabort || MUSTRETURN(p, t)) {
			kthread_t *sq, *tp;
			lwp->lwp_asleep = 0;
			lwp->lwp_sysabort = 0;
			disp_lock_enter(sqlp);
			sq = s->s_slpq;
			/*
			 * in case sema_v and interrupt happen
			 * at the same time, we need to pass the
			 * sema_v to the next thread.
			 */
			if ((sq != NULL) && (s->s_count > 0)) {
				tp = sq;
				ASSERT(THREAD_LOCK_HELD(tp));
				sq = sq->t_link;
				tp->t_link = NULL;
				DTRACE_SCHED1(wakeup, kthread_t *, tp);
				tp->t_sobj_ops = NULL;
				tp->t_wchan = NULL;
				ASSERT(tp->t_state == TS_SLEEP);
				CL_WAKEUP(tp);
				s->s_slpq = sq;
				disp_lock_exit_high(sqlp);
				thread_unlock(tp);
			} else {
				disp_lock_exit(sqlp);
			}
			return (1);
		}
		lwp->lwp_asleep = 0;
		disp_lock_enter(sqlp);
	}
	s->s_count--;
	disp_lock_exit(sqlp);
	return (0);
}