Пример #1
0
/**
 * qla24xx_ms_entry() - Process a Management Server entry.
 * @ha: SCSI driver HA context
 * @index: Response queue out pointer
 */
static void
qla24xx_ms_entry(scsi_qla_host_t *ha, struct ct_entry_24xx *pkt)
{
	srb_t          *sp;

	DEBUG3(printk("%s(%ld): pkt=%p pkthandle=%d.\n",
	    __func__, ha->host_no, pkt, pkt->handle));

	DEBUG9(printk("%s: ct pkt dump:\n", __func__));
	DEBUG9(qla2x00_dump_buffer((void *)pkt, sizeof(struct ct_entry_24xx)));

	/* Validate handle. */
 	if (pkt->handle < MAX_OUTSTANDING_COMMANDS)
 		sp = ha->outstanding_cmds[pkt->handle];
	else
		sp = NULL;

	if (sp == NULL) {
		DEBUG2(printk("scsi(%ld): MS entry - invalid handle\n",
		    ha->host_no));
		DEBUG10(printk("scsi(%ld): MS entry - invalid handle\n",
		    ha->host_no));
		qla_printk(KERN_WARNING, ha, "MS entry - invalid handle %d\n",
		    pkt->handle);

		set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
		return;
	}

	CMD_COMPL_STATUS(sp->cmd) = le16_to_cpu(pkt->comp_status);
	CMD_ENTRY_STATUS(sp->cmd) = pkt->entry_status;

	/* Free outstanding command slot. */
	ha->outstanding_cmds[pkt->handle] = NULL;

	qla2x00_sp_compl(ha, sp);
}
Пример #2
0
GLOBAL Int UMF_build_tuples
(
    NumericType *Numeric,
    WorkType *Work
)
{
    /* ---------------------------------------------------------------------- */
    /* local variables */
    /* ---------------------------------------------------------------------- */

    Int e, nrows, ncols, nel, *Rows, *Cols, row, col, n_row, n_col, *E,
	*Row_tuples, *Row_degree, *Row_tlen,
	*Col_tuples, *Col_degree, *Col_tlen, n1 ;
    Element *ep ;
    Unit *p ;
    Tuple tuple, *tp ;

    /* ---------------------------------------------------------------------- */
    /* get parameters */
    /* ---------------------------------------------------------------------- */

    E = Work->E ;
    Col_degree = Numeric->Cperm ;	/* for NON_PIVOTAL_COL macro */
    Row_degree = Numeric->Rperm ;	/* for NON_PIVOTAL_ROW macro */
    Row_tuples = Numeric->Uip ;
    Row_tlen   = Numeric->Uilen ;
    Col_tuples = Numeric->Lip ;
    Col_tlen   = Numeric->Lilen ;
    n_row = Work->n_row ;
    n_col = Work->n_col ;
    nel = Work->nel ;
    n1 = Work->n1 ;

    DEBUG3 (("BUILD_TUPLES: n_row "ID" n_col "ID" nel "ID"\n",
	n_row, n_col, nel)) ;

    /* ---------------------------------------------------------------------- */
    /* allocate space for the tuple lists */
    /* ---------------------------------------------------------------------- */

    /* Garbage collection and memory reallocation have already attempted to */
    /* ensure that there is enough memory for all the tuple lists.  If */
    /* memory allocation fails here, then there is nothing more to be done. */

    for (row = n1 ; row < n_row ; row++)
    {
	if (NON_PIVOTAL_ROW (row))
	{
	    Row_tuples [row] = UMF_mem_alloc_tail_block (Numeric,
		UNITS (Tuple, TUPLES (Row_tlen [row]))) ;
	    if (!Row_tuples [row])
	    {
		/* :: out of memory for row tuples :: */
		DEBUGm4 (("out of memory: build row tuples\n")) ;
		return (FALSE) ;	/* out of memory for row tuples */
	    }
	    Row_tlen [row] = 0 ;
	}
    }

    /* push on stack in reverse order, so column tuples are in the order */
    /* that they will be deleted. */
    for (col = n_col-1 ; col >= n1 ; col--)
    {
	if (NON_PIVOTAL_COL (col))
	{
	    Col_tuples [col] = UMF_mem_alloc_tail_block (Numeric,
		UNITS (Tuple, TUPLES (Col_tlen [col]))) ;
	    if (!Col_tuples [col])
	    {
		/* :: out of memory for col tuples :: */
		DEBUGm4 (("out of memory: build col tuples\n")) ;
		return (FALSE) ;	/* out of memory for col tuples */
	    }
	    Col_tlen [col] = 0 ;
	}
    }

#ifndef NDEBUG
    UMF_dump_memory (Numeric) ;
#endif

    /* ---------------------------------------------------------------------- */
    /* create the tuple lists (exclude element 0) */
    /* ---------------------------------------------------------------------- */

    /* for all elements, in order of creation */
    for (e = 1 ; e <= nel ; e++)
    {
	DEBUG9 (("Adding tuples for element: "ID" at "ID"\n", e, E [e])) ;
	ASSERT (E [e]) ;	/* no external fragmentation */
	p = Numeric->Memory + E [e] ;
	GET_ELEMENT_PATTERN (ep, p, Cols, Rows, ncols) ;
	nrows = ep->nrows ;
	ASSERT (e != 0) ;
	ASSERT (e == 0 || nrows == ep->nrowsleft) ;
	ASSERT (e == 0 || ncols == ep->ncolsleft) ;
	tuple.e = e ;
	for (tuple.f = 0 ; tuple.f < ncols ; tuple.f++)
	{
	    col = Cols [tuple.f] ;
	    ASSERT (col >= n1 && col < n_col) ;
	    ASSERT (NON_PIVOTAL_COL (col)) ;
	    ASSERT (Col_tuples [col]) ;
	    tp = ((Tuple *) (Numeric->Memory + Col_tuples [col]))
		+ Col_tlen [col]++ ;
	    *tp = tuple ;
#ifndef NDEBUG
	    UMF_dump_rowcol (1, Numeric, Work, col, FALSE) ;
#endif
	}
	for (tuple.f = 0 ; tuple.f < nrows ; tuple.f++)
	{
	    row = Rows [tuple.f] ;
	    ASSERT (row >= n1 && row < n_row) ;
	    ASSERT (NON_PIVOTAL_COL (col)) ;
	    ASSERT (Row_tuples [row]) ;
	    tp = ((Tuple *) (Numeric->Memory + Row_tuples [row]))
		+ Row_tlen [row]++ ;
	    *tp = tuple ;
#ifndef NDEBUG
	    UMF_dump_rowcol (0, Numeric, Work, row, FALSE) ;
#endif
	}
    }

    /* ---------------------------------------------------------------------- */
    /* the tuple lists are now valid, and can be scanned */
    /* ---------------------------------------------------------------------- */

#ifndef NDEBUG
    UMF_dump_memory (Numeric) ;
    UMF_dump_matrix (Numeric, Work, FALSE) ;
#endif
    DEBUG3 (("BUILD_TUPLES: done\n")) ;
    return (TRUE) ;
}
Пример #3
0
/*
 * do a simple estimate of the space needed to hold the statefile
 * taking compression into account, but be fairly conservative
 * so we have a better chance of completing; when dump fails,
 * the retry cost is fairly high.
 *
 * Do disk blocks allocation for the state file if no space has
 * been allocated yet. Since the state file will not be removed,
 * allocation should only be done once.
 */
static int
cpr_statefile_ok(vnode_t *vp, int alloc_retry)
{
	extern size_t cpr_bitmap_size;
	struct inode *ip = VTOI(vp);
	const int UCOMP_RATE = 20; /* comp. ratio*10 for user pages */
	u_longlong_t size, isize, ksize, raw_data;
	char *str, *est_fmt;
	size_t space;
	int error;

	/*
	 * number of pages short for swapping.
	 */
	STAT->cs_nosw_pages = k_anoninfo.ani_mem_resv;
	if (STAT->cs_nosw_pages < 0)
		STAT->cs_nosw_pages = 0;

	str = "cpr_statefile_ok:";

	DEBUG9(errp("Phys swap: max=%lu resv=%lu\n",
	    k_anoninfo.ani_max, k_anoninfo.ani_phys_resv));
	DEBUG9(errp("Mem swap: max=%ld resv=%lu\n",
	    MAX(availrmem - swapfs_minfree, 0),
	    k_anoninfo.ani_mem_resv));
	DEBUG9(errp("Total available swap: %ld\n",
		CURRENT_TOTAL_AVAILABLE_SWAP));

	/*
	 * try increasing filesize by 15%
	 */
	if (alloc_retry) {
		/*
		 * block device doesn't get any bigger
		 */
		if (vp->v_type == VBLK) {
			if (cpr_debug & (LEVEL1 | LEVEL6))
				errp("Retry statefile on special file\n");
			return (ENOMEM);
		} else {
			rw_enter(&ip->i_contents, RW_READER);
			size = (ip->i_size * SIZE_RATE) / INTEGRAL;
			rw_exit(&ip->i_contents);
		}
		if (cpr_debug & (LEVEL1 | LEVEL6))
			errp("Retry statefile size = %lld\n", size);
	} else {
		u_longlong_t cpd_size;
		pgcnt_t npages, nback;
		int ndvram;

		ndvram = 0;
		(void) callb_execute_class(CB_CL_CPR_FB,
		    (int)(uintptr_t)&ndvram);
		if (cpr_debug & (LEVEL1 | LEVEL6))
			errp("ndvram size = %d\n", ndvram);

		/*
		 * estimate 1 cpd_t for every (CPR_MAXCONTIG / 2) pages
		 */
		npages = cpr_count_kpages(REGULAR_BITMAP, cpr_nobit);
		cpd_size = sizeof (cpd_t) * (npages / (CPR_MAXCONTIG / 2));
		raw_data = cpd_size + cpr_bitmap_size;
		ksize = ndvram + mmu_ptob(npages);

		est_fmt = "%s estimated size with "
		    "%scompression %lld, ksize %lld\n";
		nback = mmu_ptob(STAT->cs_nosw_pages);
		if (CPR->c_flags & C_COMPRESSING) {
			size = ((ksize * COMPRESS_PERCENT) / INTEGRAL) +
			    raw_data + ((nback * 10) / UCOMP_RATE);
			DEBUG1(errp(est_fmt, str, "", size, ksize));
		} else {
			size = ksize + raw_data + nback;
			DEBUG1(errp(est_fmt, str, "no ", size, ksize));
		}
	}

	/*
	 * All this is much simpler for a block device
	 */
	if (vp->v_type == VBLK) {
		space = cpr_get_devsize(vp->v_rdev);
		if (cpr_debug & (LEVEL1 | LEVEL6))
			errp("statefile dev size %lu\n", space);

		/*
		 * Export the estimated filesize info, this value will be
		 * compared before dumping out the statefile in the case of
		 * no compression.
		 */
		STAT->cs_est_statefsz = size;
		if (cpr_debug & (LEVEL1 | LEVEL6))
			errp("%s Estimated statefile size %llu, space %lu\n",
			    str, size, space);
		if (size > space) {
			cpr_err(CE_CONT, "Statefile partition too small.");
			return (ENOMEM);
		}
		return (0);
	} else {
		if (CPR->c_alloc_cnt++ > C_MAX_ALLOC_RETRY) {
			cpr_err(CE_CONT, "Statefile allocation retry failed\n");
			return (ENOMEM);
		}

		/*
		 * Estimate space needed for the state file.
		 *
		 * State file size in bytes:
		 * 	kernel size + non-cache pte seg +
		 *	bitmap size + cpr state file headers size
		 * (round up to fs->fs_bsize)
		 */
		size = blkroundup(ip->i_fs, size);

		/*
		 * Export the estimated filesize info, this value will be
		 * compared before dumping out the statefile in the case of
		 * no compression.
		 */
		STAT->cs_est_statefsz = size;
		error = cpr_grow_statefile(vp, size);
		if (cpr_debug & (LEVEL1 | LEVEL6)) {
			rw_enter(&ip->i_contents, RW_READER);
			isize = ip->i_size;
			rw_exit(&ip->i_contents);
			errp("%s Estimated statefile size %lld, i_size %lld\n",
			    str, size, isize);
		}

		return (error);
	}
}