Beispiel #1
0
/*
 * iscsi_door_upcall
 *
 * This function tries to call the iscsi_door.
 */
static
boolean_t
iscsi_door_upcall(door_arg_t *arg)
{
	int	error;

	/*
	 * This semaphore limits the number of simultaneous calls
	 * to the door.
	 */
	sema_p(&iscsi_door_sema);
	/*
	 * The mutex protecting the iscsi_door_handle is entered.
	 */
	rw_enter(&iscsi_door_lock, RW_READER);

	if (iscsi_door_handle == NULL) {
		/* There's no door handle. */
		rw_exit(&iscsi_door_lock);
		sema_v(&iscsi_door_sema);
		return (B_FALSE);
	}
	error = door_ki_upcall(iscsi_door_handle, arg);

	rw_exit(&iscsi_door_lock);
	sema_v(&iscsi_door_sema);

	if (error != 0) {
		return (B_FALSE);
	} else {
		return (B_TRUE);
	}
}
/*
 * NAMES:	pw_read_done and pw_write_done
 * DESCRIPTION: don't know the usage yet ??? (TBD)
 * PARAMETERS:
 * RETURNS:
 */
static int
pw_read_done(buf_t *bp)
{
	ASSERT(SEMA_HELD(&bp->b_sem));
	ASSERT((bp->b_flags & B_DONE) == 0);

	bp->b_flags |= B_DONE;

	if (bp->b_flags & B_ASYNC)
		sema_v(&bp->b_sem);
	else
		/* wakeup the thread waiting on this buf */
		sema_v(&bp->b_io);
	return (0);
}
Beispiel #3
0
/*ARGSUSED*/
void
kctl_wrintr(void)
{
	kctl.kctl_wr_avail = 0;

	sema_v(&kctl.kctl_wr_avail_sem);
}
Beispiel #4
0
void
kctl_wr_thr_stop(void)
{
	ASSERT(kctl.kctl_wr_state == KCTL_WR_ST_RUN);
	kctl.kctl_wr_state = KCTL_WR_ST_STOP;
	sema_v(&kctl.kctl_wr_avail_sem);
}
Beispiel #5
0
int _add_zlog(zlog_level_t lvl,
              const char *file_name,
              const unsigned int line_num,
              const char *func_name,
              const char *format, ...)
{

    char *str_buf = malloc(MAXLEN_EACH_LOG);
    if (str_buf == NULL)
        return S_ERROR;
    memset(str_buf, 0, MAXLEN_EACH_LOG);

    const char *time_format = "%Y-%m-%d %H:%M:%S";
    time_t t_now = time(NULL);
    struct tm tm_now;
#ifdef _WIN32
    if (0 != localtime_s(&tm_now, &t_now))
        return S_ERROR;
#else
    tm_now = *localtime(&t_now);
#endif
    if (0 == strftime(str_buf, (size_t)25, time_format, &tm_now))
        return S_ERROR;

    sprintf(str_buf + strlen(str_buf), \
            " [%s](%s, %s:%d): ", \
            _get_levlename(lvl), func_name, _get_short_filename(file_name), line_num);

    va_list args;
    va_start(args, format);
#ifdef _WIN32
    _vsnprintf_s(str_buf + strlen(str_buf), MAXLEN_EACH_LOG - strlen(str_buf), _TRUNCATE, format, args);
#else
    vsnprintf(str_buf + strlen(str_buf), MAXLEN_EACH_LOG - strlen(str_buf), format, args);
#endif
    va_end(args);

    str_buf[MAXLEN_EACH_LOG - 1] = '\0';

    zlog_info_t *zlog_info = malloc(sizeof(zlog_info_t));
    if (zlog_info == NULL)
        return S_ERROR;
    memset(zlog_info, 0, sizeof(zlog_info_t));
    zlog_info->lvl = lvl;
    zlog_info->content = str_buf;

    mutex_lock(gs_zlog.mutex);
    if (gs_zlog.head == NULL) {
        gs_zlog.head = zlog_info;
        gs_zlog.tail = zlog_info;
    } else {
        gs_zlog.tail->next = zlog_info;
        gs_zlog.tail = gs_zlog.tail->next;
    }
    sema_v(gs_zlog.sema);
    mutex_unlock(gs_zlog.mutex);

    return S_OK;
}
Beispiel #6
0
void hello (ChimeraState * chstate, Message * msg)
{


    fprintf (stderr, "hello: %s\n", msg->payload);
    announced++;
    sema_v (sem);

}
Beispiel #7
0
void
fuse_queue_request_nowait(fuse_session_t *se, fuse_msg_node_t *req_p)
{
	FUSE_SESSION_MUTEX_LOCK(se);
	req_p->fmn_state = FUSE_MSG_STATE_QUEUE;
	/* the device write call removes the message from the queue */
	list_insert_tail(&(se->msg_list), req_p);
	FUSE_SESSION_MUTEX_UNLOCK(se);
	/* wake up the reader @ device */
	sema_v(&(se->session_sema));
}
Beispiel #8
0
/*
 * Ensure that a specified block is up-to-date on disk.
 */
void
blkflush(dev_t dev, daddr_t blkno)
{
	struct buf *bp, *dp;
	struct hbuf *hp;
	struct buf *sbp = NULL;
	uint_t index;
	kmutex_t *hmp;

	index = bio_bhash(dev, blkno);
	hp    = &hbuf[index];
	dp    = (struct buf *)hp;
	hmp   = &hp->b_lock;

	/*
	 * Identify the buffer in the cache belonging to
	 * this device and blkno (if any).
	 */
	mutex_enter(hmp);
	for (bp = dp->b_forw; bp != dp; bp = bp->b_forw) {
		if (bp->b_blkno != blkno || bp->b_edev != dev ||
		    (bp->b_flags & B_STALE))
			continue;
		sbp = bp;
		break;
	}
	mutex_exit(hmp);
	if (sbp == NULL)
		return;
	/*
	 * Now check the buffer we have identified and
	 * make sure it still belongs to the device and is B_DELWRI
	 */
	sema_p(&sbp->b_sem);
	if (sbp->b_blkno == blkno && sbp->b_edev == dev &&
	    (sbp->b_flags & (B_DELWRI|B_STALE)) == B_DELWRI) {
		mutex_enter(hmp);
		hp->b_length--;
		notavail(sbp);
		mutex_exit(hmp);
		/*
		 * XXX - There is nothing to guarantee a synchronous
		 * write here if the B_ASYNC flag is set.  This needs
		 * some investigation.
		 */
		if (sbp->b_vp == NULL) {		/* !ufs */
			BWRITE(sbp);	/* synchronous write */
		} else {				/* ufs */
			UFS_BWRITE(VTOI(sbp->b_vp)->i_ufsvfs, sbp);
		}
	} else {
		sema_v(&sbp->b_sem);
	}
}
Beispiel #9
0
int fini_zlog()
{
    gs_zlog.is_to_exit = TRUE;
    sema_v(gs_zlog.sema);
    thread_stop(gs_zlog.thread);
    sema_delete(gs_zlog.sema);
    mutex_delete(gs_zlog.mutex);
    if (gs_zlog.file_prefix != NULL)
        free(gs_zlog.file_prefix);
    if (gs_zlog.file != NULL)
        fclose(gs_zlog.file);
    return S_OK;
}
Beispiel #10
0
/*
 * Called from roll thread;
 *	buffer set for reading master
 * Returns
 *	0 - success, can continue with next buffer
 *	1 - failure due to logmap deltas being in use
 */
int
top_read_roll(rollbuf_t *rbp, ml_unit_t *ul)
{
	buf_t		*bp	= &rbp->rb_bh;
	offset_t	mof	= ldbtob(bp->b_blkno);

	/*
	 * get a list of deltas
	 */
	if (logmap_list_get_roll(ul->un_logmap, mof, rbp)) {
		/* logmap deltas are in use */
		return (1);
	}

	/*
	 * no deltas were found, nothing to roll
	 */
	if (rbp->rb_age == NULL) {
		bp->b_flags |= B_INVAL;
		return (0);
	}

	/*
	 * If there is one cached roll buffer that cover all the deltas then
	 * we can use that instead of copying to a separate roll buffer.
	 */
	if (rbp->rb_crb) {
		rbp->rb_bh.b_blkno = lbtodb(rbp->rb_crb->c_mof);
		return (0);
	}

	/*
	 * Set up the read.
	 * If no read is needed logmap_setup_read() returns 0.
	 */
	if (logmap_setup_read(rbp->rb_age, rbp)) {
		/*
		 * async read the data from master
		 */
		logstats.ls_rreads.value.ui64++;
		bp->b_bcount = MAPBLOCKSIZE;
		(void) bdev_strategy(bp);
		lwp_stat_update(LWP_STAT_INBLK, 1);
	} else {
		sema_v(&bp->b_io); /* mark read as complete */
	}
	return (0);
}
Beispiel #11
0
void del (ChimeraState * chstate, Message * msg)
{

    Key cur, dest;
    char deststr[64];
    char curstr[64];
    sscanf (msg->payload, "%s %s", curstr, deststr);
    str_to_key (curstr, &cur);
    str_to_key (deststr, &dest);
    fprintf (stderr, "message to %s routes delivered to %s\n", dest.keystr,
	     cur.keystr);

    if (key_equal (dest, current))
	{
	    sema_v (sem);
	}

}
Beispiel #12
0
/*
 * Return a buffer w/o sleeping
 */
struct buf *
trygetblk(dev_t dev, daddr_t blkno)
{
	struct buf	*bp;
	struct buf	*dp;
	struct hbuf	*hp;
	kmutex_t	*hmp;
	uint_t		index;

	index = bio_bhash(dev, blkno);
	hp = &hbuf[index];
	hmp = &hp->b_lock;

	if (!mutex_tryenter(hmp))
		return (NULL);

	dp = (struct buf *)hp;
	for (bp = dp->b_forw; bp != dp; bp = bp->b_forw) {
		if (bp->b_blkno != blkno || bp->b_edev != dev ||
		    (bp->b_flags & B_STALE))
			continue;
		/*
		 * Get access to a valid buffer without sleeping
		 */
		if (sema_tryp(&bp->b_sem)) {
			if (bp->b_flags & B_DONE) {
				hp->b_length--;
				notavail(bp);
				mutex_exit(hmp);
				return (bp);
			} else {
				sema_v(&bp->b_sem);
				break;
			}
		}
		break;
	}
	mutex_exit(hmp);
	return (NULL);
}
Beispiel #13
0
int
trans_not_done(struct buf *cb)
{
	sema_v(&cb->b_io);
	return (0);
}
/*
 * As part of file system hardening, this daemon is awakened
 * every second to flush cached data which includes the
 * buffer cache, the inode cache and mapped pages.
 */
void
fsflush()
{
	struct buf *bp, *dwp;
	struct hbuf *hp;
	int autoup;
	unsigned int ix, icount, count = 0;
	callb_cpr_t cprinfo;
	uint_t		bcount;
	kmutex_t	*hmp;
	struct vfssw *vswp;

	proc_fsflush = ttoproc(curthread);
	proc_fsflush->p_cstime = 0;
	proc_fsflush->p_stime =  0;
	proc_fsflush->p_cutime =  0;
	proc_fsflush->p_utime = 0;
	bcopy("fsflush", curproc->p_user.u_psargs, 8);
	bcopy("fsflush", curproc->p_user.u_comm, 7);

	mutex_init(&fsflush_lock, NULL, MUTEX_DEFAULT, NULL);
	sema_init(&fsflush_sema, 0, NULL, SEMA_DEFAULT, NULL);

	/*
	 * Setup page coalescing.
	 */
	fsf_npgsz = page_num_pagesizes();
	ASSERT(fsf_npgsz < MAX_PAGESIZES);
	for (ix = 0; ix < fsf_npgsz - 1; ++ix) {
		fsf_pgcnt[ix] =
		    page_get_pagesize(ix + 1) / page_get_pagesize(ix);
		fsf_mask[ix] = page_get_pagecnt(ix + 1) - 1;
	}

	autoup = v.v_autoup * hz;
	icount = v.v_autoup / tune.t_fsflushr;
	CALLB_CPR_INIT(&cprinfo, &fsflush_lock, callb_generic_cpr, "fsflush");
loop:
	sema_v(&fsflush_sema);
	mutex_enter(&fsflush_lock);
	CALLB_CPR_SAFE_BEGIN(&cprinfo);
	cv_wait(&fsflush_cv, &fsflush_lock);		/* wait for clock */
	CALLB_CPR_SAFE_END(&cprinfo, &fsflush_lock);
	mutex_exit(&fsflush_lock);
	sema_p(&fsflush_sema);

	/*
	 * Write back all old B_DELWRI buffers on the freelist.
	 */
	bcount = 0;
	for (ix = 0; ix < v.v_hbuf; ix++) {

		hp = &hbuf[ix];
		dwp = (struct buf *)&dwbuf[ix];

		bcount += (hp->b_length);

		if (dwp->av_forw == dwp) {
			continue;
		}

		hmp = &hbuf[ix].b_lock;
		mutex_enter(hmp);
		bp = dwp->av_forw;

		/*
		 * Go down only on the delayed write lists.
		 */
		while (bp != dwp) {

			ASSERT(bp->b_flags & B_DELWRI);

			if ((bp->b_flags & B_DELWRI) &&
			    (ddi_get_lbolt() - bp->b_start >= autoup) &&
			    sema_tryp(&bp->b_sem)) {
				bp->b_flags |= B_ASYNC;
				hp->b_length--;
				notavail(bp);
				mutex_exit(hmp);
				if (bp->b_vp == NULL) {
					BWRITE(bp);
				} else {
					UFS_BWRITE(VTOI(bp->b_vp)->i_ufsvfs,
					    bp);
				}
				mutex_enter(hmp);
				bp = dwp->av_forw;
			} else {
				bp = bp->av_forw;
			}
		}
		mutex_exit(hmp);
	}

	/*
	 *
	 * There is no need to wakeup any thread waiting on bio_mem_cv
	 * since brelse will wake them up as soon as IO is complete.
	 */
	bfreelist.b_bcount = bcount;

	if (dopageflush)
		fsflush_do_pages();

	if (!doiflush)
		goto loop;

	/*
	 * If the system was not booted to single user mode, skip the
	 * inode flushing until after fsflush_iflush_delay secs have elapsed.
	 */
	if ((boothowto & RB_SINGLE) == 0 &&
	    (ddi_get_lbolt64() / hz) < fsflush_iflush_delay)
		goto loop;

	/*
	 * Flush cached attribute information (e.g. inodes).
	 */
	if (++count >= icount) {
		count = 0;

		/*
		 * Sync back cached data.
		 */
		RLOCK_VFSSW();
		for (vswp = &vfssw[1]; vswp < &vfssw[nfstype]; vswp++) {
			if (ALLOCATED_VFSSW(vswp) && VFS_INSTALLED(vswp)) {
				vfs_refvfssw(vswp);
				RUNLOCK_VFSSW();
				(void) fsop_sync_by_kind(vswp - vfssw,
				    SYNC_ATTR, kcred);
				vfs_unrefvfssw(vswp);
				RLOCK_VFSSW();
			}
		}
		RUNLOCK_VFSSW();
	}
	goto loop;
}
Beispiel #15
0
/*
 * Make sure all write-behind blocks on dev (or NODEV for all)
 * are flushed out.
 */
void
bflush(dev_t dev)
{
	struct buf *bp, *dp;
	struct hbuf *hp;
	struct buf *delwri_list = EMPTY_LIST;
	int i, index;
	kmutex_t *hmp;

	mutex_enter(&blist_lock);
	/*
	 * Wait for any invalidates or flushes ahead of us to finish.
	 * We really could split blist_lock up per device for better
	 * parallelism here.
	 */
	while (bio_doinginval || bio_doingflush) {
		bio_flinv_cv_wanted = 1;
		cv_wait(&bio_flushinval_cv, &blist_lock);
	}
	bio_doingflush++;
	/*
	 * Gather all B_DELWRI buffer for device.
	 * Lock ordering is b_sem > hash lock (brelse).
	 * Since we are finding the buffer via the delayed write list,
	 * it may be busy and we would block trying to get the
	 * b_sem lock while holding hash lock. So transfer all the
	 * candidates on the delwri_list and then drop the hash locks.
	 */
	for (i = 0; i < v.v_hbuf; i++) {
		vfs_syncprogress();
		hmp = &hbuf[i].b_lock;
		dp = (struct buf *)&dwbuf[i];
		mutex_enter(hmp);
		for (bp = dp->av_forw; bp != dp; bp = bp->av_forw) {
			if (dev == NODEV || bp->b_edev == dev) {
				if (bp->b_list == NULL) {
					bp->b_list = delwri_list;
					delwri_list = bp;
				}
			}
		}
		mutex_exit(hmp);
	}
	mutex_exit(&blist_lock);

	/*
	 * Now that the hash locks have been dropped grab the semaphores
	 * and write back all the buffers that have B_DELWRI set.
	 */
	while (delwri_list != EMPTY_LIST) {
		vfs_syncprogress();
		bp = delwri_list;

		sema_p(&bp->b_sem);	/* may block */
		if ((dev != bp->b_edev && dev != NODEV) ||
		    (panicstr && bp->b_flags & B_BUSY)) {
			sema_v(&bp->b_sem);
			delwri_list = bp->b_list;
			bp->b_list = NULL;
			continue;	/* No longer a candidate */
		}
		if (bp->b_flags & B_DELWRI) {
			index = bio_bhash(bp->b_edev, bp->b_blkno);
			hp = &hbuf[index];
			hmp = &hp->b_lock;
			dp = (struct buf *)hp;

			bp->b_flags |= B_ASYNC;
			mutex_enter(hmp);
			hp->b_length--;
			notavail(bp);
			mutex_exit(hmp);
			if (bp->b_vp == NULL) {		/* !ufs */
				BWRITE(bp);
			} else {			/* ufs */
				UFS_BWRITE(VTOI(bp->b_vp)->i_ufsvfs, bp);
			}
		} else {
			sema_v(&bp->b_sem);
		}
		delwri_list = bp->b_list;
		bp->b_list = NULL;
	}
	mutex_enter(&blist_lock);
	bio_doingflush--;
	if (bio_flinv_cv_wanted) {
		bio_flinv_cv_wanted = 0;
		cv_broadcast(&bio_flushinval_cv);
	}
	mutex_exit(&blist_lock);
}
Beispiel #16
0
/*
 * Assign a buffer for the given block.  If the appropriate
 * block is already associated, return it; otherwise search
 * for the oldest non-busy buffer and reassign it.
 */
struct buf *
getblk_common(void * arg, dev_t dev, daddr_t blkno, long bsize, int errflg)
{
	ufsvfs_t *ufsvfsp = (struct ufsvfs *)arg;
	struct buf *bp;
	struct buf *dp;
	struct buf *nbp = NULL;
	struct buf *errbp;
	uint_t		index;
	kmutex_t	*hmp;
	struct	hbuf	*hp;

	if (getmajor(dev) >= devcnt)
		cmn_err(CE_PANIC, "blkdev");

	biostats.bio_lookup.value.ui32++;

	index = bio_bhash(dev, blkno);
	hp    = &hbuf[index];
	dp    = (struct buf *)hp;
	hmp   = &hp->b_lock;

	mutex_enter(hmp);
loop:
	for (bp = dp->b_forw; bp != dp; bp = bp->b_forw) {
		if (bp->b_blkno != blkno || bp->b_edev != dev ||
		    (bp->b_flags & B_STALE))
			continue;
		/*
		 * Avoid holding the hash lock in the event that
		 * the buffer is locked by someone. Since the hash chain
		 * may change when we drop the hash lock
		 * we have to start at the beginning of the chain if the
		 * buffer identity/contents aren't valid.
		 */
		if (!sema_tryp(&bp->b_sem)) {
			biostats.bio_bufbusy.value.ui32++;
			mutex_exit(hmp);
			/*
			 * OK, we are dealing with a busy buffer.
			 * In the case that we are panicking and we
			 * got called from bread(), we have some chance
			 * for error recovery. So better bail out from
			 * here since sema_p() won't block. If we got
			 * called directly from ufs routines, there is
			 * no way to report an error yet.
			 */
			if (panicstr && errflg)
				goto errout;
			/*
			 * For the following line of code to work
			 * correctly never kmem_free the buffer "header".
			 */
			sema_p(&bp->b_sem);
			if (bp->b_blkno != blkno || bp->b_edev != dev ||
			    (bp->b_flags & B_STALE)) {
				sema_v(&bp->b_sem);
				mutex_enter(hmp);
				goto loop;	/* start over */
			}
			mutex_enter(hmp);
		}
		/* Found */
		biostats.bio_hit.value.ui32++;
		bp->b_flags &= ~B_AGE;

		/*
		 * Yank it off the free/delayed write lists
		 */
		hp->b_length--;
		notavail(bp);
		mutex_exit(hmp);

		ASSERT((bp->b_flags & B_NOCACHE) == NULL);

		if (nbp == NULL) {
			/*
			 * Make the common path short.
			 */
			ASSERT(SEMA_HELD(&bp->b_sem));
			return (bp);
		}

		biostats.bio_bufdup.value.ui32++;

		/*
		 * The buffer must have entered during the lock upgrade
		 * so free the new buffer we allocated and return the
		 * found buffer.
		 */
		kmem_free(nbp->b_un.b_addr, nbp->b_bufsize);
		nbp->b_un.b_addr = NULL;

		/*
		 * Account for the memory
		 */
		mutex_enter(&bfree_lock);
		bfreelist.b_bufsize += nbp->b_bufsize;
		mutex_exit(&bfree_lock);

		/*
		 * Destroy buf identity, and place on avail list
		 */
		nbp->b_dev = (o_dev_t)NODEV;
		nbp->b_edev = NODEV;
		nbp->b_flags = 0;
		nbp->b_file = NULL;
		nbp->b_offset = -1;

		sema_v(&nbp->b_sem);
		bio_bhdr_free(nbp);

		ASSERT(SEMA_HELD(&bp->b_sem));
		return (bp);
	}

	/*
	 * bio_getfreeblk may block so check the hash chain again.
	 */
	if (nbp == NULL) {
		mutex_exit(hmp);
		nbp = bio_getfreeblk(bsize);
		mutex_enter(hmp);
		goto loop;
	}

	/*
	 * New buffer. Assign nbp and stick it on the hash.
	 */
	nbp->b_flags = B_BUSY;
	nbp->b_edev = dev;
	nbp->b_dev = (o_dev_t)cmpdev(dev);
	nbp->b_blkno = blkno;
	nbp->b_iodone = NULL;
	nbp->b_bcount = bsize;
	/*
	 * If we are given a ufsvfsp and the vfs_root field is NULL
	 * then this must be I/O for a superblock.  A superblock's
	 * buffer is set up in mountfs() and there is no root vnode
	 * at that point.
	 */
	if (ufsvfsp && ufsvfsp->vfs_root) {
		nbp->b_vp = ufsvfsp->vfs_root;
	} else {
		nbp->b_vp = NULL;
	}

	ASSERT((nbp->b_flags & B_NOCACHE) == NULL);

	binshash(nbp, dp);
	mutex_exit(hmp);

	ASSERT(SEMA_HELD(&nbp->b_sem));

	return (nbp);


	/*
	 * Come here in case of an internal error. At this point we couldn't
	 * get a buffer, but he have to return one. Hence we allocate some
	 * kind of error reply buffer on the fly. This buffer is marked as
	 * B_NOCACHE | B_AGE | B_ERROR | B_DONE to assure the following:
	 *	- B_ERROR will indicate error to the caller.
	 *	- B_DONE will prevent us from reading the buffer from
	 *	  the device.
	 *	- B_NOCACHE will cause that this buffer gets free'd in
	 *	  brelse().
	 */

errout:
	errbp = geteblk();
	sema_p(&errbp->b_sem);
	errbp->b_flags &= ~B_BUSY;
	errbp->b_flags |= (B_ERROR | B_DONE);
	return (errbp);
}
Beispiel #17
0
/*
 * Release the buffer, with no I/O implied.
 */
void
brelse(struct buf *bp)
{
	struct buf	**backp;
	uint_t		index;
	kmutex_t	*hmp;
	struct	buf	*dp;
	struct	hbuf	*hp;


	ASSERT(SEMA_HELD(&bp->b_sem));

	/*
	 * Clear the retry write flag if the buffer was written without
	 * error.  The presence of B_DELWRI means the buffer has not yet
	 * been written and the presence of B_ERROR means that an error
	 * is still occurring.
	 */
	if ((bp->b_flags & (B_ERROR | B_DELWRI | B_RETRYWRI)) == B_RETRYWRI) {
		bp->b_flags &= ~B_RETRYWRI;
	}

	/* Check for anomalous conditions */
	if (bp->b_flags & (B_ERROR|B_NOCACHE)) {
		if (bp->b_flags & B_NOCACHE) {
			/* Don't add to the freelist. Destroy it now */
			kmem_free(bp->b_un.b_addr, bp->b_bufsize);
			sema_destroy(&bp->b_sem);
			sema_destroy(&bp->b_io);
			kmem_free(bp, sizeof (struct buf));
			return;
		}
		/*
		 * If a write failed and we are supposed to retry write,
		 * don't toss the buffer.  Keep it around and mark it
		 * delayed write in the hopes that it will eventually
		 * get flushed (and still keep the system running.)
		 */
		if ((bp->b_flags & (B_READ | B_RETRYWRI)) == B_RETRYWRI) {
			bp->b_flags |= B_DELWRI;
			/* keep fsflush from trying continuously to flush */
			bp->b_start = ddi_get_lbolt();
		} else
			bp->b_flags |= B_AGE|B_STALE;
		bp->b_flags &= ~B_ERROR;
		bp->b_error = 0;
	}

	/*
	 * If delayed write is set then put in on the delayed
	 * write list instead of the free buffer list.
	 */
	index = bio_bhash(bp->b_edev, bp->b_blkno);
	hmp   = &hbuf[index].b_lock;

	mutex_enter(hmp);
	hp = &hbuf[index];
	dp = (struct buf *)hp;

	/*
	 * Make sure that the number of entries on this list are
	 * Zero <= count <= total # buffers
	 */
	ASSERT(hp->b_length >= 0);
	ASSERT(hp->b_length < nbuf);

	hp->b_length++;		/* We are adding this buffer */

	if (bp->b_flags & B_DELWRI) {
		/*
		 * This buffer goes on the delayed write buffer list
		 */
		dp = (struct buf *)&dwbuf[index];
	}
	ASSERT(bp->b_bufsize > 0);
	ASSERT(bp->b_bcount > 0);
	ASSERT(bp->b_un.b_addr != NULL);

	if (bp->b_flags & B_AGE) {
		backp = &dp->av_forw;
		(*backp)->av_back = bp;
		bp->av_forw = *backp;
		*backp = bp;
		bp->av_back = dp;
	} else {
		backp = &dp->av_back;
		(*backp)->av_forw = bp;
		bp->av_back = *backp;
		*backp = bp;
		bp->av_forw = dp;
	}
	mutex_exit(hmp);

	if (bfreelist.b_flags & B_WANTED) {
		/*
		 * Should come here very very rarely.
		 */
		mutex_enter(&bfree_lock);
		if (bfreelist.b_flags & B_WANTED) {
			bfreelist.b_flags &= ~B_WANTED;
			cv_broadcast(&bio_mem_cv);
		}
		mutex_exit(&bfree_lock);
	}

	bp->b_flags &= ~(B_WANTED|B_BUSY|B_ASYNC);
	/*
	 * Don't let anyone get the buffer off the freelist before we
	 * release our hold on it.
	 */
	sema_v(&bp->b_sem);
}
Beispiel #18
0
/*
 * Same as binval, except can force-invalidate delayed-write buffers
 * (which are not be already flushed because of device errors).  Also
 * makes sure that the retry write flag is cleared.
 */
int
bfinval(dev_t dev, int force)
{
	struct buf *dp;
	struct buf *bp;
	struct buf *binval_list = EMPTY_LIST;
	int i, error = 0;
	kmutex_t *hmp;
	uint_t index;
	struct buf **backp;

	mutex_enter(&blist_lock);
	/*
	 * Wait for any flushes ahead of us to finish, it's ok to
	 * do invalidates in parallel.
	 */
	while (bio_doingflush) {
		bio_flinv_cv_wanted = 1;
		cv_wait(&bio_flushinval_cv, &blist_lock);
	}
	bio_doinginval++;

	/* Gather bp's */
	for (i = 0; i < v.v_hbuf; i++) {
		dp = (struct buf *)&hbuf[i];
		hmp = &hbuf[i].b_lock;

		mutex_enter(hmp);
		for (bp = dp->b_forw; bp != dp; bp = bp->b_forw) {
			if (bp->b_edev == dev) {
				if (bp->b_list == NULL) {
					bp->b_list = binval_list;
					binval_list = bp;
				}
			}
		}
		mutex_exit(hmp);
	}
	mutex_exit(&blist_lock);

	/* Invalidate all bp's found */
	while (binval_list != EMPTY_LIST) {
		bp = binval_list;

		sema_p(&bp->b_sem);
		if (bp->b_edev == dev) {
			if (force && (bp->b_flags & B_DELWRI)) {
				/* clear B_DELWRI, move to non-dw freelist */
				index = bio_bhash(bp->b_edev, bp->b_blkno);
				hmp = &hbuf[index].b_lock;
				dp = (struct buf *)&hbuf[index];
				mutex_enter(hmp);

				/* remove from delayed write freelist */
				notavail(bp);

				/* add to B_AGE side of non-dw freelist */
				backp = &dp->av_forw;
				(*backp)->av_back = bp;
				bp->av_forw = *backp;
				*backp = bp;
				bp->av_back = dp;

				/*
				 * make sure write retries and busy are cleared
				 */
				bp->b_flags &=
				    ~(B_BUSY | B_DELWRI | B_RETRYWRI);
				mutex_exit(hmp);
			}
			if ((bp->b_flags & B_DELWRI) == 0)
				bp->b_flags |= B_STALE|B_AGE;
			else
				error = EIO;
		}
		sema_v(&bp->b_sem);
		binval_list = bp->b_list;
		bp->b_list = NULL;
	}
	mutex_enter(&blist_lock);
	bio_doinginval--;
	if (bio_flinv_cv_wanted) {
		cv_broadcast(&bio_flushinval_cv);
		bio_flinv_cv_wanted = 0;
	}
	mutex_exit(&blist_lock);
	return (error);
}
/*
 * iscsi_ioctl_set_config_sess - sets configured session information
 *
 * This function is an ioctl helper function to set the
 * configured session information in the persistent store.
 * In addition it will notify any active sessions of the
 * changed so this can update binding information.  It
 * will also destroy sessions that were removed and add
 * new sessions.
 */
int
iscsi_ioctl_set_config_sess(iscsi_hba_t *ihp, iscsi_config_sess_t *ics)
{
	uchar_t *name;
	iscsi_sess_t *isp;

	/* check range infomration */
	if ((ics->ics_in < ISCSI_MIN_CONFIG_SESSIONS) ||
	    (ics->ics_in > ISCSI_MAX_CONFIG_SESSIONS)) {
		/* invalid range information */
		return (EINVAL);
	}

	if (ics->ics_oid == ISCSI_INITIATOR_OID) {
		name = ihp->hba_name;
	} else {
		/* get target name */
		name = iscsi_targetparam_get_name(ics->ics_oid);
		if (name == NULL) {
			/* invalid node name */
			return (EINVAL);
		}
	}

	/* store the new information */
	if (persistent_set_config_session((char *)name, ics) == B_FALSE) {
		/* failed to store new information */
		return (EINVAL);
	}

	/* notify existing sessions of change */
	rw_enter(&ihp->hba_sess_list_rwlock, RW_READER);
	isp = ihp->hba_sess_list;
	while (isp != NULL) {

		if ((ics->ics_oid == ISCSI_INITIATOR_OID) ||
		    (strncmp((char *)isp->sess_name, (char *)name,
		    ISCSI_MAX_NAME_LEN) == 0)) {

			/*
			 * If this sessions least signficant byte
			 * of the isid is less than or equal to
			 * the the number of configured sessions
			 * then we need to tear down this session.
			 */
			if (ics->ics_in <= isp->sess_isid[5]) {
				/* First attempt to destory the session */
				if (ISCSI_SUCCESS(iscsi_sess_destroy(isp))) {
					isp = ihp->hba_sess_list;
				} else {
					/*
					 * If we can't destroy it then
					 * atleast poke it to disconnect
					 * it.
					 */
					mutex_enter(&isp->sess_state_mutex);
					iscsi_sess_state_machine(isp,
					    ISCSI_SESS_EVENT_N7);
					mutex_exit(&isp->sess_state_mutex);
					isp = isp->sess_next;
				}
			} else {
				isp = isp->sess_next;
			}
		} else {
			isp = isp->sess_next;
		}
	}
	rw_exit(&ihp->hba_sess_list_rwlock);

	/*
	 * The number of targets has changed.  Since we don't expect
	 * this to be a common operation lets keep the code simple and
	 * just use a slightly larger hammer and poke discovery.  This
	 * force the reevaulation of this target and all other targets.
	 */
	iscsid_poke_discovery(ihp, iSCSIDiscoveryMethodUnknown);
	/* lock so only one config operation occrs */
	sema_p(&iscsid_config_semaphore);
	iscsid_config_all(ihp, B_FALSE);
	sema_v(&iscsid_config_semaphore);

	return (0);
}