示例#1
0
/*
 * The first round search is to find:
 * 1) a VGA device.
 * 2) a PCI VGA compatible device whose IO space is enabled
 *    and the VGA Enable bit of any PCI-PCI bridge above it is set.
 * If the first round search succeeds, prune the second round search.
 *
 * The second round seach does not check the VGA Enable bit.
 *
 * Return the device path as the console fb path.
 */
char *
plat_fbpath(void)
{
	struct find_fb_dev_param param;
	static char *fbpath = NULL;
	static char fbpath_buf[MAXPATHLEN];

	/* first round search */
	param.found_dip = NULL;
	param.vga_enable = 1;
	ddi_walk_devs(ddi_root_node(), find_fb_dev, &param);

	if (param.found_dip != NULL) {
		(void) ddi_pathname(param.found_dip, fbpath_buf);
		fbpath = fbpath_buf;
		return (fbpath);
	}

	/*
	 * second round search, do not check the
	 * PCI_BCNF_BCNTRL_VGA_ENABLE bit
	 */
	param.found_dip = NULL;
	param.vga_enable = 0;
	ddi_walk_devs(ddi_root_node(), find_fb_dev, &param);

	if (param.found_dip == NULL)
		return (NULL);

	(void) ddi_pathname(param.found_dip, fbpath_buf);
	fbpath = fbpath_buf;
	return (fbpath);
}
/*
 * Before suspending devices first mark all device nodes busy. This
 * avoids a deadlock situation when another thread holds a device busy
 * and accesses an already suspended device.
 */
static int
sbdp_suspend_devices(dev_info_t *dip, sbdp_sr_handle_t *srh)
{
	int	rv;

	/* assumes dip is ddi_root_node so no ndi_devi_enter required */
	ASSERT(dip == ddi_root_node());
	ddi_walk_devs(dip, sbdp_suspend_devices_enter, NULL);
	rv = sbdp_suspend_devices_(dip, srh);
	ddi_walk_devs(dip, sbdp_suspend_devices_exit, NULL);
	return (rv);
}
示例#3
0
/*
 * vdds_find_node -- A common function to find a NIU nexus or NIU node.
 */
static dev_info_t *
vdds_find_node(uint64_t cookie, dev_info_t *sdip,
	int (*match_func)(dev_info_t *dip, void *arg))
{
	vdds_cb_arg_t arg;
	dev_info_t *pdip;
	int circ;

	DBG1(NULL, "Called cookie=%lx\n", cookie);

	arg.dip = NULL;
	arg.cookie = cookie;

	if (pdip = ddi_get_parent(sdip)) {
		ndi_devi_enter(pdip, &circ);
	}

	ddi_walk_devs(sdip, match_func, (void *)&arg);
	if (pdip != NULL) {
		ndi_devi_exit(pdip, circ);
	}

	DBG1(NULL, "Returning dip=0x%p", arg.dip);
	return (arg.dip);
}
示例#4
0
/*
 * Routine used to setup a newly inserted CPU in preparation for starting
 * it running code.
 */
int
mp_cpu_configure(int cpuid)
{
    extern void fill_cpu_ddi(dev_info_t *);
    extern int setup_cpu_common(int);
    struct mp_find_cpu_arg target;

    ASSERT(MUTEX_HELD(&cpu_lock));

    target.dip = NULL;
    target.cpuid = cpuid;
    ddi_walk_devs(ddi_root_node(), mp_find_cpu, &target);

    if (target.dip == NULL)
        return (ENODEV);

    /*
     * Note:  uses cpu_lock to protect cpunodes and ncpunodes
     * which will be modified inside of fill_cpu_ddi().
     */
    fill_cpu_ddi(target.dip);

    /*
     * sun4v cpu setup may fail. sun4u assumes cpu setup to
     * be always successful, so the return value is ignored.
     */
    (void) setup_cpu_common(cpuid);

    return (0);
}
示例#5
0
int
gfxp_pci_device_present(uint16_t vendor, uint16_t device)
{
	gfxp_pci_bsf_t	*pci_bsf;
	int		rv;

	/*
	 * Find a PCI device based on its device and vendor id.
	 */

	if ((pci_bsf = kmem_zalloc(sizeof (gfxp_pci_bsf_t), KM_SLEEP)) == NULL)
	    return (0);

	pci_bsf->vendor = vendor;
	pci_bsf->device = device;
	ddi_walk_devs(ddi_root_node(), gfxp_pci_find_vd, pci_bsf);

	if (pci_bsf->found) {
		rv = 1;
	} else {
		rv = 0;
	}

	kmem_free(pci_bsf, sizeof (gfxp_pci_bsf_t));

	return (rv);
}
示例#6
0
int
uadmin(int cmd, int fcn, uintptr_t mdep)
{
	int error = 0, rv = 0;
	size_t nbytes = 0;
	cred_t *credp = CRED();
	char *bootargs = NULL;
	int reset_status = 0;

	if (cmd == A_SHUTDOWN && fcn == AD_FASTREBOOT_DRYRUN) {
		ddi_walk_devs(ddi_root_node(), check_driver_quiesce,
		    &reset_status);
		if (reset_status != 0)
			return (EIO);
		else
			return (0);
	}

	/*
	 * The swapctl system call doesn't have its own entry point: it uses
	 * uadmin as a wrapper so we just call it directly from here.
	 */
	if (cmd == A_SWAPCTL) {
		if (get_udatamodel() == DATAMODEL_NATIVE)
			error = swapctl(fcn, (void *)mdep, &rv);
#if defined(_SYSCALL32_IMPL)
		else
			error = swapctl32(fcn, (void *)mdep, &rv);
#endif /* _SYSCALL32_IMPL */
		return (error ? set_errno(error) : rv);
	}

	/*
	 * Certain subcommands intepret a non-NULL mdep value as a pointer to
	 * a boot string.  We pull that in as bootargs, if applicable.
	 */
	if (mdep != NULL &&
	    (cmd == A_SHUTDOWN || cmd == A_REBOOT || cmd == A_DUMP ||
	    cmd == A_FREEZE || cmd == A_CONFIG)) {
		bootargs = kmem_zalloc(BOOTARGS_MAX, KM_SLEEP);
		if ((error = copyinstr((const char *)mdep, bootargs,
		    BOOTARGS_MAX, &nbytes)) != 0) {
			kmem_free(bootargs, BOOTARGS_MAX);
			return (set_errno(error));
		}
	}

	/*
	 * Invoke the appropriate kadmin() routine.
	 */
	if (getzoneid() != GLOBAL_ZONEID)
		error = zone_kadmin(cmd, fcn, bootargs, credp);
	else
		error = kadmin(cmd, fcn, bootargs, credp);

	if (bootargs != NULL)
		kmem_free(bootargs, BOOTARGS_MAX);
	return (error ? set_errno(error) : 0);
}
示例#7
0
gfxp_acc_handle_t
gfxp_pci_init_handle(uint8_t bus, uint8_t slot, uint8_t function,
	uint16_t *vendor, uint16_t *device)
{
	dev_info_t	*dip;
	gfxp_pci_bsf_t	*pci_bsf;

	/*
	 * Find a PCI device based on its address, and return a unique handle
	 * to be used in subsequent calls to read from or write to the config
	 * space of this device.
	 */

	if ((pci_bsf = kmem_zalloc(sizeof (gfxp_pci_bsf_t), KM_SLEEP))
			== NULL) {
		return (NULL);
	}

	pci_bsf->bus = bus;
	pci_bsf->slot = slot;
	pci_bsf->function = function;

	ddi_walk_devs(ddi_root_node(), gfxp_pci_find_bsf, pci_bsf);

	if (pci_bsf->found) {
		dip = pci_bsf->dip;

		if (vendor) *vendor = pci_bsf->vendor;
		if (device) *device = pci_bsf->device;
	} else {
		dip = NULL;
		if (vendor) *vendor = 0x0000;
		if (device) *device = 0x0000;
	}

	kmem_free(pci_bsf, sizeof (gfxp_pci_bsf_t));

	return ((gfxp_acc_handle_t)dip);
}
示例#8
0
/*
 * Return the devinfo node to a boot device
 */
static dev_info_t *
path_to_devinfo(char *path)
{
	struct i_path_findnode fn;
	extern dev_info_t *top_devinfo;

	/*
	 * Get the nodeid of the given pathname, if such a mapping exists.
	 */
	fn.dip = NULL;
	fn.nodeid = prom_finddevice(path);
	if (fn.nodeid != OBP_BADNODE) {
		/*
		 * Find the nodeid in our copy of the device tree and return
		 * whatever name we used to bind this node to a driver.
		 */
		ddi_walk_devs(top_devinfo, i_path_find_node, (void *)(&fn));
	}

#ifdef	DEBUG
	/*
	 * If we're bound to something other than the nodename,
	 * note that in the message buffer and system log.
	 */
	if (fn.dip) {
		char *p, *q;

		p = ddi_binding_name(fn.dip);
		q = ddi_node_name(fn.dip);
		if (p && q && (strcmp(p, q) != 0)) {
			BMDPRINTF(("path_to_devinfo: %s bound to %s\n",
			    path, p));
		}
	}
#endif	/* DEBUG */

	return (fn.dip);
}
示例#9
0
int
dr_suspend(dr_sr_handle_t *srh)
{
	dr_handle_t	*handle;
	int		force;
	int		dev_errs_idx;
	uint64_t	dev_errs[DR_MAX_ERR_INT];
	int		rc = DDI_SUCCESS;

	handle = srh->sr_dr_handlep;

	force = dr_cmd_flags(handle) & SBD_FLAG_FORCE;

	/*
	 * update the signature block
	 */
	CPU_SIGNATURE(OS_SIG, SIGST_QUIESCE_INPROGRESS, SIGSUBST_NULL,
	    CPU->cpu_id);

	i_ndi_block_device_tree_changes(&handle->h_ndi);

	prom_printf("\nDR: suspending user threads...\n");
	srh->sr_suspend_state = DR_SRSTATE_USER;
	if (((rc = dr_stop_user_threads(srh)) != DDI_SUCCESS) &&
	    dr_check_user_stop_result) {
		dr_resume(srh);
		return (rc);
	}

	if (!force) {
		struct dr_ref drc = {0};

		prom_printf("\nDR: checking devices...\n");
		dev_errs_idx = 0;

		drc.arr = dev_errs;
		drc.idx = &dev_errs_idx;
		drc.len = DR_MAX_ERR_INT;

		/*
		 * Since the root node can never go away, it
		 * doesn't have to be held.
		 */
		ddi_walk_devs(ddi_root_node(), dr_check_unsafe_major, &drc);
		if (dev_errs_idx) {
			handle->h_err = drerr_int(ESBD_UNSAFE, dev_errs,
				dev_errs_idx, 1);
			dr_resume(srh);
			return (DDI_FAILURE);
		}
		PR_QR("done\n");
	} else {
		prom_printf("\nDR: dr_suspend invoked with force flag\n");
	}

#ifndef	SKIP_SYNC
	/*
	 * This sync swap out all user pages
	 */
	vfs_sync(SYNC_ALL);
#endif

	/*
	 * special treatment for lock manager
	 */
	lm_cprsuspend();

#ifndef	SKIP_SYNC
	/*
	 * sync the file system in case we never make it back
	 */
	sync();
#endif

	/*
	 * now suspend drivers
	 */
	prom_printf("DR: suspending drivers...\n");
	srh->sr_suspend_state = DR_SRSTATE_DRIVER;
	srh->sr_err_idx = 0;
	/* No parent to hold busy */
	if ((rc = dr_suspend_devices(ddi_root_node(), srh)) != DDI_SUCCESS) {
		if (srh->sr_err_idx && srh->sr_dr_handlep) {
			(srh->sr_dr_handlep)->h_err = drerr_int(ESBD_SUSPEND,
				srh->sr_err_ints, srh->sr_err_idx, 1);
		}
		dr_resume(srh);
		return (rc);
	}

	drmach_suspend_last();

	/*
	 * finally, grab all cpus
	 */
	srh->sr_suspend_state = DR_SRSTATE_FULL;

	/*
	 * if watchdog was activated, disable it
	 */
	if (watchdog_activated) {
		mutex_enter(&tod_lock);
		tod_ops.tod_clear_watchdog_timer();
		mutex_exit(&tod_lock);
		srh->sr_flags |= SR_FLAG_WATCHDOG;
	} else {
		srh->sr_flags &= ~(SR_FLAG_WATCHDOG);
	}

	/*
	 * Update the signature block.
	 * This must be done before cpus are paused, since on Starcat the
	 * cpu signature update aquires an adaptive mutex in the iosram driver.
	 * Blocking with cpus paused can lead to deadlock.
	 */
	CPU_SIGNATURE(OS_SIG, SIGST_QUIESCED, SIGSUBST_NULL, CPU->cpu_id);

	mutex_enter(&cpu_lock);
	pause_cpus(NULL);
	dr_stop_intr();

	return (rc);
}
示例#10
0
/*ARGSUSED*/
static void
fipe_ioat_alloc(void *arg)
{
	int idx, flags, rc = 0;
	uint64_t physaddr;
	boolean_t fatal = B_FALSE;
	dcopy_query_t info;
	dcopy_handle_t handle;
	dcopy_cmd_t cmds[FIPE_IOAT_CMD_NUM + 1];

	mutex_enter(&fipe_ioat_ctrl.ioat_lock);
	/*
	 * fipe_ioat_alloc() is called in DEVICE ATTACH context when loaded.
	 * In DEVICE ATTACH context, it can't call ddi_walk_devs(), so just
	 * schedule a timer and exit.
	 */
	if (fipe_ioat_ctrl.ioat_try_alloc == B_FALSE) {
		fipe_ioat_ctrl.ioat_try_alloc = B_TRUE;
		mutex_exit(&fipe_ioat_ctrl.ioat_lock);
		goto out_error;
	}

	/*
	 * Check whether device has been initialized or if it encountered
	 * some permanent error.
	 */
	if (fipe_ioat_ctrl.ioat_ready || fipe_ioat_ctrl.ioat_failed ||
	    fipe_ioat_ctrl.ioat_cancel) {
		fipe_ioat_ctrl.ioat_timerid = 0;
		mutex_exit(&fipe_ioat_ctrl.ioat_lock);
		return;
	}

	if (fipe_ioat_ctrl.ioat_dev_info == NULL) {
		/* Find dev_info_t for IOAT engine. */
		ddi_walk_devs(ddi_root_node(), fipe_search_ioat_dev, NULL);
		if (fipe_ioat_ctrl.ioat_dev_info == NULL) {
			cmn_err(CE_NOTE,
			    "!fipe: no IOAT hardware found, disable pm.");
			mutex_exit(&fipe_ioat_ctrl.ioat_lock);
			fatal = B_TRUE;
			goto out_error;
		}
	}
	mutex_exit(&fipe_ioat_ctrl.ioat_lock);

	/* Check, allocate and initialize IOAT resources with lock released. */
	dcopy_query(&info);
	if (info.dq_version < DCOPY_QUERY_V0) {
		/* Permanent error, give up. */
		cmn_err(CE_WARN, "!fipe: IOAT driver version mismatch.");
		fatal = B_TRUE;
		goto out_error;
	} else if (info.dq_num_channels == 0) {
		/* IOAT driver hasn't been loaded, keep trying. */
		goto out_error;
	}

	/* Allocate IOAT channel. */
	rc = dcopy_alloc(DCOPY_NOSLEEP, &handle);
	if (rc == DCOPY_NORESOURCES) {
		/* Resource temporarily not available, keep trying. */
		goto out_error;
	} else if (rc != DCOPY_SUCCESS) {
		/* Permanent error, give up. */
		cmn_err(CE_WARN, "!fipe: failed to allocate IOAT channel.");
		fatal = B_TRUE;
		goto out_error;
	}

	/*
	 * Allocate multiple IOAT commands and organize them into a ring to
	 * loop forever. Commands number is determined by IOAT descriptor size
	 * and memory interleave pattern.
	 * cmd[0] is used break the loop and disable IOAT operation.
	 * cmd[1, FIPE_IOAT_CMD_NUM] are grouped into a ring and cmd[1] is the
	 * list head.
	 */
	bzero(cmds, sizeof (cmds));
	physaddr = fipe_ioat_ctrl.ioat_buf_physaddr;
	for (idx = FIPE_IOAT_CMD_NUM; idx >= 0; idx--) {
		/* Allocate IOAT commands. */
		if (idx == 0 || idx == FIPE_IOAT_CMD_NUM) {
			flags = DCOPY_NOSLEEP;
		} else {
			/*
			 * To link commands into a list, the initial value of
			 * cmd need to be set to next cmd on list.
			 */
			flags = DCOPY_NOSLEEP | DCOPY_ALLOC_LINK;
			cmds[idx] = cmds[idx + 1];
		}
		rc = dcopy_cmd_alloc(handle, flags, &cmds[idx]);
		if (rc == DCOPY_NORESOURCES) {
			goto out_freecmd;
		} else if (rc != DCOPY_SUCCESS) {
			/* Permanent error, give up. */
			cmn_err(CE_WARN,
			    "!fipe: failed to allocate IOAT command.");
			fatal = B_TRUE;
			goto out_freecmd;
		}

		/* Disable src/dst snoop to improve CPU cache efficiency. */
		cmds[idx]->dp_flags = DCOPY_CMD_NOSRCSNP | DCOPY_CMD_NODSTSNP;
		/* Specially handle commands on the list. */
		if (idx != 0) {
			/* Disable IOAT status. */
			cmds[idx]->dp_flags |= DCOPY_CMD_NOSTAT;
			/* Disable waiting for resources. */
			cmds[idx]->dp_flags |= DCOPY_CMD_NOWAIT;
			if (idx == 1) {
				/* The list head, chain command into loop. */
				cmds[idx]->dp_flags |= DCOPY_CMD_LOOP;
			} else {
				/* Queue all other commands except head. */
				cmds[idx]->dp_flags |= DCOPY_CMD_QUEUE;
			}
		}
		cmds[idx]->dp_cmd = DCOPY_CMD_COPY;
		cmds[idx]->dp.copy.cc_source = physaddr;
		cmds[idx]->dp.copy.cc_dest = physaddr + FIPE_MC_MEMORY_OFFSET;
		if (idx == 0) {
			/*
			 * Command 0 is used to cancel memory copy by breaking
			 * the ring created in fipe_ioat_trigger().
			 * For efficiency, use the smallest memory copy size.
			 */
			cmds[idx]->dp.copy.cc_size = 1;
		} else {
			cmds[idx]->dp.copy.cc_size = FIPE_MC_MEMORY_SIZE;
		}
	}

	/* Update IOAT control status if it hasn't been initialized yet. */
	mutex_enter(&fipe_ioat_ctrl.ioat_lock);
	if (!fipe_ioat_ctrl.ioat_ready && !fipe_ioat_ctrl.ioat_cancel) {
		fipe_ioat_ctrl.ioat_handle = handle;
		for (idx = 0; idx <= FIPE_IOAT_CMD_NUM; idx++) {
			fipe_ioat_ctrl.ioat_cmds[idx] = cmds[idx];
		}
		fipe_ioat_ctrl.ioat_ready = B_TRUE;
		fipe_ioat_ctrl.ioat_failed = B_FALSE;
		fipe_ioat_ctrl.ioat_timerid = 0;
		mutex_exit(&fipe_ioat_ctrl.ioat_lock);
		return;
	}
	mutex_exit(&fipe_ioat_ctrl.ioat_lock);
	/* Initialized by another thread, fall through to free resources. */

out_freecmd:
	if (cmds[0] != NULL) {
		dcopy_cmd_free(&cmds[0]);
	}
	/* Only need to free head, dcopy will free all commands on the list. */
	for (idx = 1; idx <= FIPE_IOAT_CMD_NUM; idx++) {
		if (cmds[idx] != NULL) {
			dcopy_cmd_free(&cmds[idx]);
			break;
		}
	}
	dcopy_free(&handle);

out_error:
	mutex_enter(&fipe_ioat_ctrl.ioat_lock);
	fipe_ioat_ctrl.ioat_timerid = 0;
	if (!fipe_ioat_ctrl.ioat_ready && !fipe_ioat_ctrl.ioat_cancel) {
		if (fatal) {
			/* Mark permanent error and give up. */
			fipe_ioat_ctrl.ioat_failed = B_TRUE;
			/* Release reference count hold by ddi_find_devinfo. */
			if (fipe_ioat_ctrl.ioat_dev_info != NULL) {
				ndi_rele_devi(fipe_ioat_ctrl.ioat_dev_info);
				fipe_ioat_ctrl.ioat_dev_info = NULL;
			}
		} else {
			/*
			 * Schedule another timer to keep on trying.
			 * timeout() should always success, no need to check.
			 */
			fipe_ioat_ctrl.ioat_timerid = timeout(fipe_ioat_alloc,
			    NULL, drv_usectohz(FIPE_IOAT_RETRY_INTERVAL));
		}
	}
	mutex_exit(&fipe_ioat_ctrl.ioat_lock);
}
示例#11
0
/*ARGSUSED*/
static void
fipe_ioat_alloc(void *arg)
{
	int rc = 0, nregs;
	dev_info_t *dip;
	ddi_device_acc_attr_t attr;
	boolean_t fatal = B_FALSE;

	mutex_enter(&fipe_ioat_ctrl.ioat_lock);
	/*
	 * fipe_ioat_alloc() is called in DEVICE ATTACH context when loaded.
	 * In DEVICE ATTACH context, it can't call ddi_walk_devs(), so just
	 * schedule a timer and exit.
	 */
	if (fipe_ioat_ctrl.ioat_try_alloc == B_FALSE) {
		fipe_ioat_ctrl.ioat_try_alloc = B_TRUE;
		goto out_error;
	}

	/* Check whether has been initialized or encountered permanent error. */
	if (fipe_ioat_ctrl.ioat_ready || fipe_ioat_ctrl.ioat_failed ||
	    fipe_ioat_ctrl.ioat_cancel) {
		fipe_ioat_ctrl.ioat_timerid = 0;
		mutex_exit(&fipe_ioat_ctrl.ioat_lock);
		return;
	}

	if (fipe_ioat_ctrl.ioat_dev_info == NULL) {
		/* Find dev_info_t for IOAT engine. */
		ddi_walk_devs(ddi_root_node(), fipe_search_ioat_dev, NULL);
		if (fipe_ioat_ctrl.ioat_dev_info == NULL) {
			cmn_err(CE_NOTE,
			    "!fipe: no IOAT hardware found, disable pm.");
			fatal = B_TRUE;
			goto out_error;
		}
	}

	/* Map in IOAT control register window. */
	ASSERT(fipe_ioat_ctrl.ioat_dev_info != NULL);
	ASSERT(fipe_ioat_ctrl.ioat_reg_mapped == B_FALSE);
	dip = fipe_ioat_ctrl.ioat_dev_info;
	if (ddi_dev_nregs(dip, &nregs) != DDI_SUCCESS || nregs < 2) {
		cmn_err(CE_WARN, "!fipe: ioat has not enough register bars.");
		fatal = B_TRUE;
		goto out_error;
	}
	attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
	attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
	attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
	rc = ddi_regs_map_setup(dip, 1,
	    (caddr_t *)&fipe_ioat_ctrl.ioat_reg_addr,
	    0, 0, &attr, &fipe_ioat_ctrl.ioat_reg_handle);
	if (rc != DDI_SUCCESS) {
		cmn_err(CE_WARN, "!fipe: failed to map IOAT registeres.");
		fatal = B_TRUE;
		goto out_error;
	}

	/* Mark IOAT status. */
	fipe_ioat_ctrl.ioat_reg_mapped = B_TRUE;
	fipe_ioat_ctrl.ioat_ready = B_TRUE;
	fipe_ioat_ctrl.ioat_failed = B_FALSE;
	fipe_ioat_ctrl.ioat_timerid = 0;
	mutex_exit(&fipe_ioat_ctrl.ioat_lock);

	return;

out_error:
	fipe_ioat_ctrl.ioat_timerid = 0;
	if (!fipe_ioat_ctrl.ioat_ready && !fipe_ioat_ctrl.ioat_cancel) {
		if (fatal) {
			/* Mark permanent error and give up. */
			fipe_ioat_ctrl.ioat_failed = B_TRUE;
			/* Release reference count hold by ddi_find_devinfo. */
			if (fipe_ioat_ctrl.ioat_dev_info != NULL) {
				ndi_rele_devi(fipe_ioat_ctrl.ioat_dev_info);
				fipe_ioat_ctrl.ioat_dev_info = NULL;
			}
		} else {
			/*
			 * Schedule another timer to keep on trying.
			 * timeout() should always succeed, no need to check
			 * return.
			 */
			fipe_ioat_ctrl.ioat_timerid = timeout(fipe_ioat_alloc,
			    NULL, drv_usectohz(FIPE_IOAT_RETRY_INTERVAL));
		}
	}
	mutex_exit(&fipe_ioat_ctrl.ioat_lock);
}
示例#12
0
文件: busra.c 项目: andreiw/polaris
/*
 * Setup resource map for the pci bus node based on the "available"
 * property and "bus-range" property.
 */
int
pci_resource_setup(dev_info_t *dip)
{
	pci_regspec_t *regs;
	int rlen, rcount, i;
	char bus_type[16] = "(unknown)";
	int len;
	struct busnum_ctrl ctrl;
	int circular_count;
	int rval = NDI_SUCCESS;

	/*
	 * If this is a pci bus node then look for "available" property
	 * to find the available resources on this bus.
	 */
	len = sizeof (bus_type);
	if (ddi_prop_op(DDI_DEV_T_ANY, dip, PROP_LEN_AND_VAL_BUF,
	    DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "device_type",
	    (caddr_t)&bus_type, &len) != DDI_SUCCESS)
		return (NDI_FAILURE);

	/* it is not a pci/pci-ex bus type */
	if ((strcmp(bus_type, "pci") != 0) && (strcmp(bus_type, "pciex") != 0))
		return (NDI_FAILURE);

	/*
	 * The pci-hotplug project addresses adding the call
	 * to pci_resource_setup from pci nexus driver.
	 * However that project would initially be only for x86,
	 * so for sparc pcmcia-pci support we still need to call
	 * pci_resource_setup in pcic driver. Once all pci nexus drivers
	 * are updated to call pci_resource_setup this portion of the
	 * code would really become an assert to make sure this
	 * function is not called for the same dip twice.
	 */
	{
		if (ra_map_exist(dip, NDI_RA_TYPE_MEM) == NDI_SUCCESS) {
			return (NDI_FAILURE);
		}
	}


	/*
	 * Create empty resource maps first.
	 *
	 * NOTE: If all the allocated resources are already assigned to
	 * device(s) in the hot plug slot then "available" property may not
	 * be present. But, subsequent hot plug operation may unconfigure
	 * the device in the slot and try to free up it's resources. So,
	 * at the minimum we should create empty maps here.
	 */
	if (ndi_ra_map_setup(dip, NDI_RA_TYPE_MEM) == NDI_FAILURE) {
		return (NDI_FAILURE);
	}

	if (ndi_ra_map_setup(dip, NDI_RA_TYPE_IO) == NDI_FAILURE) {
		return (NDI_FAILURE);
	}

	if (ndi_ra_map_setup(dip, NDI_RA_TYPE_PCI_BUSNUM) == NDI_FAILURE) {
		return (NDI_FAILURE);
	}

	if (ndi_ra_map_setup(dip, NDI_RA_TYPE_PCI_PREFETCH_MEM) ==
	    NDI_FAILURE) {
		return (NDI_FAILURE);
	}

	/* read the "available" property if it is available */
	if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
	    "available", (caddr_t)&regs, &rlen) == DDI_SUCCESS) {
		/*
		 * create the available resource list for both memory and
		 * io space
		 */
		rcount = rlen / sizeof (pci_regspec_t);
		for (i = 0; i < rcount; i++) {
		    switch (PCI_REG_ADDR_G(regs[i].pci_phys_hi)) {
		    case PCI_REG_ADDR_G(PCI_ADDR_MEM32):
			(void) ndi_ra_free(dip,
			    (uint64_t)regs[i].pci_phys_low,
			    (uint64_t)regs[i].pci_size_low,
			    (regs[i].pci_phys_hi & PCI_REG_PF_M) ?
			    NDI_RA_TYPE_PCI_PREFETCH_MEM : NDI_RA_TYPE_MEM,
			    0);
			break;
		    case PCI_REG_ADDR_G(PCI_ADDR_MEM64):
			(void) ndi_ra_free(dip,
			    ((uint64_t)(regs[i].pci_phys_mid) << 32) |
			    ((uint64_t)(regs[i].pci_phys_low)),
			    ((uint64_t)(regs[i].pci_size_hi) << 32) |
			    ((uint64_t)(regs[i].pci_size_low)),
			    (regs[i].pci_phys_hi & PCI_REG_PF_M) ?
			    NDI_RA_TYPE_PCI_PREFETCH_MEM : NDI_RA_TYPE_MEM,
			    0);
			break;
		    case PCI_REG_ADDR_G(PCI_ADDR_IO):
			(void) ndi_ra_free(dip,
			    (uint64_t)regs[i].pci_phys_low,
			    (uint64_t)regs[i].pci_size_low,
			    NDI_RA_TYPE_IO,
			    0);
			break;
		    case PCI_REG_ADDR_G(PCI_ADDR_CONFIG):
			break;
		    default:
			cmn_err(CE_WARN,
			    "pci_resource_setup: bad addr type: %x\n",
			    PCI_REG_ADDR_G(regs[i].pci_phys_hi));
			break;
		    }
		}
		kmem_free((caddr_t)regs, rlen);
	}

	/*
	 * update resource map for available bus numbers if the node
	 * has available-bus-range or bus-range property.
	 */
	len = sizeof (struct bus_range);
	if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
	    "available-bus-range", (caddr_t)&pci_bus_range, &len) ==
	    DDI_SUCCESS) {
		/*
		 * Add bus numbers in the range to the free list.
		 */
		(void) ndi_ra_free(dip, (uint64_t)pci_bus_range.lo,
		    (uint64_t)pci_bus_range.hi - (uint64_t)pci_bus_range.lo +
		    1, NDI_RA_TYPE_PCI_BUSNUM, 0);
	} else {
		/*
		 * We don't have an available-bus-range property. If, instead,
		 * we have a bus-range property we add all the bus numbers
		 * in that range to the free list but we must then scan
		 * for pci-pci bridges on this bus to find out the if there
		 * are any of those bus numbers already in use. If so, we can
		 * reclaim them.
		 */
		len = sizeof (struct bus_range);
		if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip,
		    DDI_PROP_DONTPASS, "bus-range", (caddr_t)&pci_bus_range,
		    &len) == DDI_SUCCESS) {
			if (pci_bus_range.lo != pci_bus_range.hi) {
				/*
				 * Add bus numbers other than the secondary
				 * bus number to the free list.
				 */
				(void) ndi_ra_free(dip,
				    (uint64_t)pci_bus_range.lo + 1,
				    (uint64_t)pci_bus_range.hi -
				    (uint64_t)pci_bus_range.lo,
				    NDI_RA_TYPE_PCI_BUSNUM, 0);

				/* scan for pci-pci bridges */
				ctrl.rv = DDI_SUCCESS;
				ctrl.dip = dip;
				ctrl.range = &pci_bus_range;
				ndi_devi_enter(dip, &circular_count);
				ddi_walk_devs(ddi_get_child(dip),
				    claim_pci_busnum, (void *)&ctrl);
				ndi_devi_exit(dip, circular_count);
				if (ctrl.rv != DDI_SUCCESS) {
					/* failed to create the map */
					(void) ndi_ra_map_destroy(dip,
					    NDI_RA_TYPE_PCI_BUSNUM);
					rval = NDI_FAILURE;
				}
			}
		}
	}

#ifdef BUSRA_DEBUG
	if (busra_debug) {
		(void) ra_dump_all(NULL, dip);
	}
#endif

	return (rval);
}
示例#13
0
void
main(void)
{
	proc_t		*p = ttoproc(curthread);	/* &p0 */
	int		(**initptr)();
	extern void	sched();
	extern void	fsflush();
	extern int	(*init_tbl[])();
	extern int	(*mp_init_tbl[])();
	extern id_t	syscid, defaultcid;
	extern int	swaploaded;
	extern int	netboot;
	extern ib_boot_prop_t *iscsiboot_prop;
	extern void	vm_init(void);
	extern void	cbe_init_pre(void);
	extern void	cbe_init(void);
	extern void	clock_tick_init_pre(void);
	extern void	clock_tick_init_post(void);
	extern void	clock_init(void);
	extern void	physio_bufs_init(void);
	extern void	pm_cfb_setup_intr(void);
	extern int	pm_adjust_timestamps(dev_info_t *, void *);
	extern void	start_other_cpus(int);
	extern void	sysevent_evc_thrinit();
	extern kmutex_t	ualock;
#if defined(__x86)
	extern void	fastboot_post_startup(void);
	extern void	progressbar_start(void);
#endif
	/*
	 * In the horrible world of x86 in-lines, you can't get symbolic
	 * structure offsets a la genassym.  This assertion is here so
	 * that the next poor slob who innocently changes the offset of
	 * cpu_thread doesn't waste as much time as I just did finding
	 * out that it's hard-coded in i86/ml/i86.il.  Similarly for
	 * curcpup.  You're welcome.
	 */
	ASSERT(CPU == CPU->cpu_self);
	ASSERT(curthread == CPU->cpu_thread);
	ASSERT_STACK_ALIGNED();

	/*
	 * We take the ualock until we have completed the startup
	 * to prevent kadmin() from disrupting this work. In particular,
	 * we don't want kadmin() to bring the system down while we are
	 * trying to start it up.
	 */
	mutex_enter(&ualock);

	/*
	 * Setup root lgroup and leaf lgroup for CPU 0
	 */
	lgrp_init(LGRP_INIT_STAGE2);

	/*
	 * Once 'startup()' completes, the thread_reaper() daemon would be
	 * created(in thread_init()). After that, it is safe to create threads
	 * that could exit. These exited threads will get reaped.
	 */
	startup();
	segkmem_gc();
	callb_init();
	cbe_init_pre();	/* x86 must initialize gethrtimef before timer_init */
	ddi_periodic_init();
	cbe_init();
	callout_init();	/* callout table MUST be init'd after cyclics */
	clock_tick_init_pre();
	clock_init();

#if defined(__x86)
	/*
	 * The progressbar thread uses cv_reltimedwait() and hence needs to be
	 * started after the callout mechanism has been initialized.
	 */
	progressbar_start();
#endif
	/*
	 * On some platforms, clkinitf() changes the timing source that
	 * gethrtime_unscaled() uses to generate timestamps.  cbe_init() calls
	 * clkinitf(), so re-initialize the microstate counters after the
	 * timesource has been chosen.
	 */
	init_mstate(&t0, LMS_SYSTEM);
	init_cpu_mstate(CPU, CMS_SYSTEM);

	/*
	 * May need to probe to determine latencies from CPU 0 after
	 * gethrtime() comes alive in cbe_init() and before enabling interrupts
	 * and copy and release any temporary memory allocated with BOP_ALLOC()
	 * before release_bootstrap() frees boot memory
	 */
	lgrp_init(LGRP_INIT_STAGE3);

	/*
	 * Call all system initialization functions.
	 */
	for (initptr = &init_tbl[0]; *initptr; initptr++)
		(**initptr)();
	/*
	 * Load iSCSI boot properties
	 */
	ld_ib_prop();
	/*
	 * initialize vm related stuff.
	 */
	vm_init();

	/*
	 * initialize buffer pool for raw I/O requests
	 */
	physio_bufs_init();

	ttolwp(curthread)->lwp_error = 0; /* XXX kludge for SCSI driver */

	/*
	 * Drop the interrupt level and allow interrupts.  At this point
	 * the DDI guarantees that interrupts are enabled.
	 */
	(void) spl0();
	interrupts_unleashed = 1;

	/*
	 * Create kmem cache for proc structures
	 */
	process_cache = kmem_cache_create("process_cache", sizeof (proc_t),
	    0, NULL, NULL, NULL, NULL, NULL, 0);

	vfs_mountroot();	/* Mount the root file system */
	errorq_init();		/* after vfs_mountroot() so DDI root is ready */
	cpu_kstat_init(CPU);	/* after vfs_mountroot() so TOD is valid */
	ddi_walk_devs(ddi_root_node(), pm_adjust_timestamps, NULL);
				/* after vfs_mountroot() so hrestime is valid */

	post_startup();
	swaploaded = 1;

	/*
	 * Initialize Solaris Audit Subsystem
	 */
	audit_init();

	/*
	 * Plumb the protocol modules and drivers only if we are not
	 * networked booted, in this case we already did it in rootconf().
	 */
	if (netboot == 0 && iscsiboot_prop == NULL)
		(void) strplumb();

	gethrestime(&PTOU(curproc)->u_start);
	curthread->t_start = PTOU(curproc)->u_start.tv_sec;
	p->p_mstart = gethrtime();

	/*
	 * Perform setup functions that can only be done after root
	 * and swap have been set up.
	 */
	consconfig();
#ifndef	__sparc
	release_bootstrap();
#endif

	/*
	 * attach drivers with ddi-forceattach prop
	 * It must be done early enough to load hotplug drivers (e.g.
	 * pcmcia nexus) so that devices enumerated via hotplug is
	 * available before I/O subsystem is fully initialized.
	 */
	i_ddi_forceattach_drivers();

	/*
	 * Set the scan rate and other parameters of the paging subsystem.
	 */
	setupclock(0);

	/*
	 * Initialize process 0's lwp directory and lwpid hash table.
	 */
	p->p_lwpdir = p->p_lwpfree = p0_lwpdir;
	p->p_lwpdir->ld_next = p->p_lwpdir + 1;
	p->p_lwpdir_sz = 2;
	p->p_tidhash = p0_tidhash;
	p->p_tidhash_sz = 2;
	p0_lep.le_thread = curthread;
	p0_lep.le_lwpid = curthread->t_tid;
	p0_lep.le_start = curthread->t_start;
	lwp_hash_in(p, &p0_lep, p0_tidhash, 2, 0);

	/*
	 * Initialize extended accounting.
	 */
	exacct_init();

	/*
	 * Initialize threads of sysevent event channels
	 */
	sysevent_evc_thrinit();

	/*
	 * This must be done after post_startup() but before
	 * start_other_cpus()
	 */
	lgrp_init(LGRP_INIT_STAGE4);

	/*
	 * Perform MP initialization, if any.
	 */
	start_other_cpus(0);

#ifdef	__sparc
	/*
	 * Release bootstrap here since PROM interfaces are
	 * used to start other CPUs above.
	 */
	release_bootstrap();
#endif

	/*
	 * Finish lgrp initialization after all CPUS are brought online.
	 */
	lgrp_init(LGRP_INIT_STAGE5);

	/*
	 * After mp_init(), number of cpus are known (this is
	 * true for the time being, when there are actually
	 * hot pluggable cpus then this scheme  would not do).
	 * Any per cpu initialization is done here.
	 */
	kmem_mp_init();
	vmem_update(NULL);

	clock_tick_init_post();

	for (initptr = &mp_init_tbl[0]; *initptr; initptr++)
		(**initptr)();

	/*
	 * These must be called after start_other_cpus
	 */
	pm_cfb_setup_intr();
#if defined(__x86)
	fastboot_post_startup();
#endif

	/*
	 * Make init process; enter scheduling loop with system process.
	 *
	 * Note that we manually assign the pids for these processes, for
	 * historical reasons.  If more pre-assigned pids are needed,
	 * FAMOUS_PIDS will have to be updated.
	 */

	/* create init process */
	if (newproc(start_init, NULL, defaultcid, 59, NULL,
	    FAMOUS_PID_INIT))
		panic("main: unable to fork init.");

	/* create pageout daemon */
	if (newproc(pageout, NULL, syscid, maxclsyspri - 1, NULL,
	    FAMOUS_PID_PAGEOUT))
		panic("main: unable to fork pageout()");

	/* create fsflush daemon */
	if (newproc(fsflush, NULL, syscid, minclsyspri, NULL,
	    FAMOUS_PID_FSFLUSH))
		panic("main: unable to fork fsflush()");

	/* create cluster process if we're a member of one */
	if (cluster_bootflags & CLUSTER_BOOTED) {
		if (newproc(cluster_wrapper, NULL, syscid, minclsyspri,
		    NULL, 0)) {
			panic("main: unable to fork cluster()");
		}
	}

	/*
	 * Create system threads (threads are associated with p0)
	 */

	/* create module uninstall daemon */
	/* BugID 1132273. If swapping over NFS need a bigger stack */
	(void) thread_create(NULL, 0, (void (*)())mod_uninstall_daemon,
	    NULL, 0, &p0, TS_RUN, minclsyspri);

	(void) thread_create(NULL, 0, seg_pasync_thread,
	    NULL, 0, &p0, TS_RUN, minclsyspri);

	pid_setmin();

	/* system is now ready */
	mutex_exit(&ualock);

	bcopy("sched", PTOU(curproc)->u_psargs, 6);
	bcopy("sched", PTOU(curproc)->u_comm, 5);
	sched();
	/* NOTREACHED */
}
示例#14
0
/*
 * Top level routine to direct suspend/resume of a domain.
 */
void
xen_suspend_domain(void)
{
	extern void rtcsync(void);
	extern void ec_resume(void);
	extern kmutex_t ec_lock;
	struct xen_add_to_physmap xatp;
	ulong_t flags;
	int err;

	cmn_err(CE_NOTE, "Domain suspending for save/migrate");

	SUSPEND_DEBUG("xen_suspend_domain\n");

	/*
	 * We only want to suspend the PV devices, since the emulated devices
	 * are suspended by saving the emulated device state.  The PV devices
	 * are all children of the xpvd nexus device.  So we search the
	 * device tree for the xpvd node to use as the root of the tree to
	 * be suspended.
	 */
	if (xpvd_dip == NULL)
		ddi_walk_devs(ddi_root_node(), check_xpvd, NULL);

	/*
	 * suspend interrupts and devices
	 */
	if (xpvd_dip != NULL)
		(void) xen_suspend_devices(ddi_get_child(xpvd_dip));
	else
		cmn_err(CE_WARN, "No PV devices found to suspend");
	SUSPEND_DEBUG("xenbus_suspend\n");
	xenbus_suspend();

	mutex_enter(&cpu_lock);

	/*
	 * Suspend on vcpu 0
	 */
	thread_affinity_set(curthread, 0);
	kpreempt_disable();

	if (ncpus > 1)
		pause_cpus(NULL, NULL);
	/*
	 * We can grab the ec_lock as it's a spinlock with a high SPL. Hence
	 * any holder would have dropped it to get through pause_cpus().
	 */
	mutex_enter(&ec_lock);

	/*
	 * From here on in, we can't take locks.
	 */

	flags = intr_clear();

	SUSPEND_DEBUG("HYPERVISOR_suspend\n");
	/*
	 * At this point we suspend and sometime later resume.
	 * Note that this call may return with an indication of a cancelled
	 * for now no matter ehat the return we do a full resume of all
	 * suspended drivers, etc.
	 */
	(void) HYPERVISOR_shutdown(SHUTDOWN_suspend);

	/*
	 * Point HYPERVISOR_shared_info to the proper place.
	 */
	xatp.domid = DOMID_SELF;
	xatp.idx = 0;
	xatp.space = XENMAPSPACE_shared_info;
	xatp.gpfn = xen_shared_info_frame;
	if ((err = HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp)) != 0)
		panic("Could not set shared_info page. error: %d", err);

	SUSPEND_DEBUG("gnttab_resume\n");
	gnttab_resume();

	SUSPEND_DEBUG("ec_resume\n");
	ec_resume();

	intr_restore(flags);

	if (ncpus > 1)
		start_cpus();

	mutex_exit(&ec_lock);
	mutex_exit(&cpu_lock);

	/*
	 * Now we can take locks again.
	 */

	rtcsync();

	SUSPEND_DEBUG("xenbus_resume\n");
	xenbus_resume();
	SUSPEND_DEBUG("xen_resume_devices\n");
	if (xpvd_dip != NULL)
		(void) xen_resume_devices(ddi_get_child(xpvd_dip), 0);

	thread_affinity_clear(curthread);
	kpreempt_enable();

	SUSPEND_DEBUG("finished xen_suspend_domain\n");

	cmn_err(CE_NOTE, "domain restore/migrate completed");
}