コード例 #1
0
/*
 * Establish an interrupt handler.
 * Called by driver attach functions.
 */
void *
isrlink(int (*func)(void *), void *arg, int ipl, int priority)
{
	struct isr *newisr, *curisr;
	isr_list_t *list;

	if ((ipl < 0) || (ipl >= NISR))
		panic("isrlink: bad ipl %d", ipl);

	newisr = (struct isr *)malloc(sizeof(struct isr), M_DEVBUF, M_NOWAIT);
	if (newisr == NULL)
		panic("isrlink: can't allocate space for isr");

	/* Fill in the new entry. */
	newisr->isr_func = func;
	newisr->isr_arg = arg;
	newisr->isr_ipl = ipl;
	newisr->isr_priority = priority;

	/*
	 * Some devices are particularly sensitive to interrupt
	 * handling latency.  The DCA, for example, can lose many
	 * characters if its interrupt isn't handled with reasonable
	 * speed.
	 *
	 * To work around this problem, each device can give itself a
	 * "priority".  An unbuffered DCA would give itself a higher
	 * priority than a SCSI device, for example.
	 *
	 * This is necessary because of the flat spl scheme employed by
	 * the hp300.  Each device can be set from ipl 3 to ipl 5, which
	 * in turn means that splbio, splnet, and spltty must all be at
	 * spl5.
	 *
	 * Don't blame me...I just work here.
	 */

	/*
	 * Get the appropriate ISR list.  If the list is empty, no
	 * additional work is necessary; we simply insert ourselves
	 * at the head of the list.
	 */
	list = &isr_list[ipl];
	if (list->lh_first == NULL) {
		LIST_INSERT_HEAD(list, newisr, isr_link);
		goto done;
	}

	/*
	 * A little extra work is required.  We traverse the list
	 * and place ourselves after any ISRs with our current (or
	 * higher) priority.
	 */
	for (curisr = list->lh_first; curisr->isr_link.le_next != NULL;
	    curisr = curisr->isr_link.le_next) {
		if (newisr->isr_priority > curisr->isr_priority) {
			LIST_INSERT_BEFORE(curisr, newisr, isr_link);
			goto done;
		}
	}

	/*
	 * We're the least important entry, it seems.  We just go
	 * on the end.
	 */
	LIST_INSERT_AFTER(curisr, newisr, isr_link);

 done:
	return (newisr);
}
コード例 #2
0
ファイル: doi.c プロジェクト: ebichu/dd-wrt
void
doi_register (struct doi *doi)
{
  LIST_INSERT_HEAD (&doi_tab, doi, link);
}
コード例 #3
0
uint8_t portpilot_helpers_create_dev(libusb_device *device,
        struct portpilot_ctx *pp_ctx, uint16_t max_packet_size,
        uint8_t input_endpoint, uint8_t intf_num, uint8_t *dev_path,
        uint8_t dev_path_len)
{
    int32_t retval;
    struct portpilot_dev *pp_dev = NULL;

    //TODO: Split into new function?
    //All info is ready, time to create struc, open device and add to list
    pp_dev = calloc(sizeof(struct portpilot_dev), 1);

    if (!pp_dev) {
        fprintf(stderr, "Failed to allocate memory for PortPilot device\n");
        return RETVAL_FAILURE;
    }

    if (pp_ctx->output_interval) {
        pp_dev->agg_data = calloc(sizeof(struct portpilot_data), 1);

        if (!pp_dev->agg_data) {
            fprintf(stderr, "Failed to allocate memory for agg. data\n");
            return RETVAL_FAILURE;
        }
    }

    pp_dev->max_packet_size = max_packet_size;
    pp_dev->input_endpoint = input_endpoint;
    pp_dev->intf_num = intf_num;

    retval = libusb_open(device, &(pp_dev->handle));

    if (retval) {
        fprintf(stderr, "Failed to open device: %s\n",
                libusb_error_name(retval));
        free(pp_dev);
        return RETVAL_FAILURE;
    }

    if (libusb_kernel_driver_active(pp_dev->handle, intf_num) == 1) {
        retval = libusb_detach_kernel_driver(pp_dev->handle, intf_num);

        if (retval) {
            fprintf(stderr, "Failed to detach kernel driver: %s\n",
                    libusb_error_name(retval));
            libusb_close(pp_dev->handle);
            free(pp_dev);
            return RETVAL_FAILURE;
        }
    }

    retval = libusb_claim_interface(pp_dev->handle, intf_num);

    if (retval) {
        fprintf(stderr, "Failed to claim interface: %s\n",
                libusb_error_name(retval));
        libusb_close(pp_dev->handle);
        free(pp_dev);
        return RETVAL_FAILURE;
    }

    //Try to read serial number from device. It is not critical if it is not
    //present, the check for matching a desired serial has been done by the time
    //we get here
    portpilot_helpers_get_serial_num(device, pp_dev->serial_number,
            MAX_USB_STR_LEN);

    memcpy(pp_dev->path, dev_path, dev_path_len);
    pp_dev->path_len = dev_path_len;

    pp_dev->pp_ctx = pp_ctx;
    LIST_INSERT_HEAD(&(pp_dev->pp_ctx->dev_head), pp_dev, next_dev);
    ++pp_ctx->dev_list_len;

    fprintf(stdout, "Ready to start reading on device %s\n",
            pp_dev->serial_number);

    portpilot_helpers_start_reading_data(pp_dev);

    return RETVAL_SUCCESS;
}
コード例 #4
0
ファイル: e1000phy.c プロジェクト: UnitedMarsupials/kame
static int
e1000phy_attach(device_t dev)
{
	struct mii_softc *sc;
	struct mii_attach_args *ma;
	struct mii_data *mii;
	const char *sep = "";

	getenv_int("e1000phy_debug", &e1000phy_debug);

	sc = device_get_softc(dev);
	ma = device_get_ivars(dev);
	sc->mii_dev = device_get_parent(dev);
	mii = device_get_softc(sc->mii_dev);
	LIST_INSERT_HEAD(&mii->mii_phys, sc, mii_list);

	sc->mii_inst = mii->mii_instance;
	sc->mii_phy = ma->mii_phyno;
	sc->mii_service = e1000phy_service;
	sc->mii_pdata = mii;

	sc->mii_flags |= MIIF_NOISOLATE;
	mii->mii_instance++;
	e1000phy_reset(sc);

#define	ADD(m, c)	ifmedia_add(&mii->mii_media, (m), (c), NULL)
#define PRINT(s)	printf("%s%s", sep, s); sep = ", "

#if	0
	ADD(IFM_MAKEWORD(IFM_ETHER, IFM_NONE, 0, sc->mii_inst),
	    E1000_CR_ISOLATE);
#endif

	device_printf(dev, " ");
	ADD(IFM_MAKEWORD(IFM_ETHER, IFM_1000_TX, IFM_FDX, sc->mii_inst),
			E1000_CR_SPEED_1000 | E1000_CR_FULL_DUPLEX);
	PRINT("1000baseTX-FDX");
	/*
	TODO - apparently 1000BT-simplex not supported?
	ADD(IFM_MAKEWORD(IFM_ETHER, IFM_1000_TX, 0, sc->mii_inst),
			E1000_CR_SPEED_1000);
	PRINT("1000baseTX");
	*/
	ADD(IFM_MAKEWORD(IFM_ETHER, IFM_100_TX, IFM_FDX, sc->mii_inst),
			E1000_CR_SPEED_100 | E1000_CR_FULL_DUPLEX);
	PRINT("100baseTX-FDX");
	ADD(IFM_MAKEWORD(IFM_ETHER, IFM_100_TX, 0, sc->mii_inst),
			E1000_CR_SPEED_100);
	PRINT("100baseTX");
	ADD(IFM_MAKEWORD(IFM_ETHER, IFM_10_T, IFM_FDX, sc->mii_inst),
			E1000_CR_SPEED_10 | E1000_CR_FULL_DUPLEX);
	PRINT("10baseTX-FDX");
	ADD(IFM_MAKEWORD(IFM_ETHER, IFM_10_T, 0, sc->mii_inst),
			E1000_CR_SPEED_10);
	PRINT("10baseTX");
	ADD(IFM_MAKEWORD(IFM_ETHER, IFM_AUTO, 0, sc->mii_inst), 0);
	PRINT("auto");

	printf("\n");
#undef ADD
#undef PRINT

	MIIBUS_MEDIAINIT(sc->mii_dev);
	return(0);
}
コード例 #5
0
ファイル: ikev2_config.c プロジェクト: hmatyschok/MeshBSD
static int
ikev2_process_cfg_request_attribs(struct ikev2_sa *ike_sa,
				  struct ikev2_child_sa *child_sa,
				  struct ikev2payl_config *cfg,
				  struct ikev2_child_param *param)
{
	struct ikev2cfg_attrib *attr;
	size_t bytes;
	int attr_type;
	unsigned int attr_len;
	int ip4_address = 0;
	int ip6_address = 0;
	int af;
	size_t addrsize;
	uint8_t *addrbits;
	struct rcf_address *addr;
	int address_fail = 0;
	int address_success = 0;
#ifdef DEBUG_TRACE
	char addrstr[INET6_ADDRSTRLEN];
#endif

	for (bytes = get_payload_length(cfg) - sizeof(*cfg),
		 attr = (struct ikev2cfg_attrib *)(cfg + 1);
	     bytes > 0;
	     bytes -= IKEV2CFG_ATTR_TOTALLENGTH(attr),
		 attr = IKEV2CFG_ATTR_NEXT(attr)) {
		attr_type = IKEV2CFG_ATTR_TYPE(attr);
		attr_len = IKEV2CFG_ATTR_LENGTH(attr);
		TRACE((PLOGLOC, "attribute type %d length %d\n",
		       attr_type, attr_len));
		assert(bytes >= sizeof(struct ikev2cfg_attrib));

		switch (attr_type) {
		case IKEV2_CFG_INTERNAL_IP4_ADDRESS:
			TRACE((PLOGLOC, "INTERNAL_IP4_ADDRESS\n"));
			if (++ip4_address > ike_max_ip4_alloc(ike_sa->rmconf)) {
				TRACE((PLOGLOC,
				       "INTERNAL_IP4_ADDRESS request exceeds allocation limit (%d)\n",
				       ike_max_ip4_alloc(ike_sa->rmconf)));
				++address_fail;
				break;
			}

			af = AF_INET;
			addrsize = sizeof(struct in_addr);
			addrbits = (uint8_t *)(attr + 1);
			if (attr_len != 0 && attr_len < addrsize) {
				TRACE((PLOGLOC,
				       "bogus attribute length %d, ignoring content\n",
				       attr_len));
				goto alloc_addr;
			}

			if (attr_len == 0 ||
			    get_uint32((uint32_t *)addrbits) == INADDR_ANY)
				goto alloc_addr;

		    try_assign:
			TRACE((PLOGLOC, "trying peer-specified address %s\n",
			       inet_ntop(af, addrbits, addrstr, sizeof(addrstr))));
			addr = rc_addrpool_assign(ikev2_addresspool(ike_sa->rmconf),
					       af, addrbits);
			if (addr) {
				TRACE((PLOGLOC, "OK.\n"));
				goto alloc_success;
			}

			TRACE((PLOGLOC, "failed, trying to allocate different address\n"));
			/* go on */
		    alloc_addr:
			addr = rc_addrpool_alloc_any(ikev2_addresspool(ike_sa->rmconf), af);
			if (!addr) {
				TRACE((PLOGLOC, "no address available for lease\n"));
				++address_fail;
				break;
			}

			TRACE((PLOGLOC, "allocated %s\n",
			       inet_ntop(af, addr->address, addrstr, sizeof(addrstr))));
		    alloc_success:
			++address_success;
			LIST_INSERT_HEAD(&child_sa->lease_list, addr, link_sa);
			break;

		case IKEV2_CFG_INTERNAL_IP6_ADDRESS:
			if (++ip6_address > ike_max_ip6_alloc(ike_sa->rmconf)) {
				TRACE((PLOGLOC,
				       "INTERNAL_IP6_ADDRESS request exceeds allocation limit (%d)\n",
				       ike_max_ip6_alloc(ike_sa->rmconf)));
				++address_fail;
				break;
			}
			af = AF_INET6;
			addrsize = sizeof(struct in6_addr);
			addrbits = (uint8_t *)(attr + 1);

			if (attr_len != 0 &&
			    attr_len < sizeof(struct ikev2cfg_ip6addr)) {
				TRACE((PLOGLOC,
				       "bogus attribute length %d, ignoring content\n",
				       attr_len));
				goto alloc_addr;
			}

			/* :: */
			if (attr_len == 0 ||
			    IN6_IS_ADDR_UNSPECIFIED((struct in6_addr *)addrbits))
				goto alloc_addr;

			/* ::xxxx:yyyy:zzzz:qqqq/64 */
			/* XXX not sure about prefix, ignore for now */
			if (/* ((struct ikev2cfg_ip6addr *)(attr + 1))->prefixlen == 64 && */
			    memcmp(addrbits, &in6addr_any, 64/8) == 0) {
				TRACE((PLOGLOC,
				       "peer-specified interface identifier %s/%d\n",
				       inet_ntop(af, addrbits, addrstr, sizeof(addrstr)),
				       64));
				addr = rc_addrpool_assign_ip6intf(ikev2_addresspool(ike_sa->rmconf), addrbits);
				if (addr)
					goto alloc_success;

				TRACE((PLOGLOC, "assign failed\n"));
			}

			/* aaaa:bbbb:cccc:dddd:eeee:ffff:gggg:hhhh/64 */
			/* XXX again, prefix ignored */
			if (!IN6_IS_ADDR_UNSPECIFIED((struct in6_addr *)addrbits))
				goto try_assign;

			goto alloc_addr;

		case IKEV2_CFG_APPLICATION_VERSION:
			++param->cfg_application_version;
			break;
		case IKEV2_CFG_INTERNAL_IP4_DNS:
			++param->cfg_ip4_dns;
			break;
		case IKEV2_CFG_INTERNAL_IP6_DNS:
			++param->cfg_ip6_dns;
			break;
		case IKEV2_CFG_INTERNAL_IP4_DHCP:
			++param->cfg_ip4_dhcp;
			break;
		case IKEV2_CFG_INTERNAL_IP6_DHCP:
			++param->cfg_ip6_dhcp;
			break;
		case IKEV2_CFG_SUPPORTED_ATTRIBUTES:
			++param->cfg_supported_attributes;
			break;
		case IKEV2_CFG_MIP6_HOME_PREFIX:
			++param->cfg_mip6_home_prefix;
			break;
		default:
			TRACE((PLOGLOC, "ignored\n"));
			break;
		}
	}
	if (address_fail > 0 && address_success == 0)
		return IKEV2_INTERNAL_ADDRESS_FAILURE;
	return 0;
}
コード例 #6
0
/*
 * Handle two cases for replacing the base set or mask of an entire process.
 *
 * 1) Set is non-null and mask is null.  This reparents all anonymous sets
 *    to the provided set and replaces all non-anonymous td_cpusets with the
 *    provided set.
 * 2) Mask is non-null and set is null.  This replaces or creates anonymous
 *    sets for every thread with the existing base as a parent.
 *
 * This is overly complicated because we can't allocate while holding a 
 * spinlock and spinlocks must be held while changing and examining thread
 * state.
 */
static int
cpuset_setproc(pid_t pid, struct cpuset *set, cpuset_t *mask)
{
	struct setlist freelist;
	struct setlist droplist;
	struct cpuset *tdset;
	struct cpuset *nset;
	struct thread *td;
	struct proc *p;
	int threads;
	int nfree;
	int error;
	/*
	 * The algorithm requires two passes due to locking considerations.
	 * 
	 * 1) Lookup the process and acquire the locks in the required order.
	 * 2) If enough cpusets have not been allocated release the locks and
	 *    allocate them.  Loop.
	 */
	LIST_INIT(&freelist);
	LIST_INIT(&droplist);
	nfree = 0;
	for (;;) {
		error = cpuset_which(CPU_WHICH_PID, pid, &p, &td, &nset);
		if (error)
			goto out;
		if (nfree >= p->p_numthreads)
			break;
		threads = p->p_numthreads;
		PROC_UNLOCK(p);
		for (; nfree < threads; nfree++) {
			nset = uma_zalloc(cpuset_zone, M_WAITOK);
			LIST_INSERT_HEAD(&freelist, nset, cs_link);
		}
	}
	PROC_LOCK_ASSERT(p, MA_OWNED);
	/*
	 * Now that the appropriate locks are held and we have enough cpusets,
	 * make sure the operation will succeed before applying changes.  The
	 * proc lock prevents td_cpuset from changing between calls.
	 */
	error = 0;
	FOREACH_THREAD_IN_PROC(p, td) {
		thread_lock(td);
		tdset = td->td_cpuset;
		/*
		 * Verify that a new mask doesn't specify cpus outside of
		 * the set the thread is a member of.
		 */
		if (mask) {
			if (tdset->cs_id == CPUSET_INVALID)
				tdset = tdset->cs_parent;
			if (!CPU_SUBSET(&tdset->cs_mask, mask))
				error = EDEADLK;
		/*
		 * Verify that a new set won't leave an existing thread
		 * mask without a cpu to run on.  It can, however, restrict
		 * the set.
		 */
		} else if (tdset->cs_id == CPUSET_INVALID) {
			if (!CPU_OVERLAP(&set->cs_mask, &tdset->cs_mask))
				error = EDEADLK;
		}
		thread_unlock(td);
		if (error)
			goto unlock_out;
	}
コード例 #7
0
ファイル: scsi_pt.c プロジェクト: UnitedMarsupials/kame
static void
ptstart(struct cam_periph *periph, union ccb *start_ccb)
{
	struct pt_softc *softc;
	struct bio *bp;
	int s;

	softc = (struct pt_softc *)periph->softc;

	/*
	 * See if there is a buf with work for us to do..
	 */
	s = splbio();
	bp = bioq_first(&softc->bio_queue);
	if (periph->immediate_priority <= periph->pinfo.priority) {
		CAM_DEBUG_PRINT(CAM_DEBUG_SUBTRACE,
				("queuing for immediate ccb\n"));
		start_ccb->ccb_h.ccb_state = PT_CCB_WAITING;
		SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h,
				  periph_links.sle);
		periph->immediate_priority = CAM_PRIORITY_NONE;
		splx(s);
		wakeup(&periph->ccb_list);
	} else if (bp == NULL) {
		splx(s);
		xpt_release_ccb(start_ccb);
	} else {
		int oldspl;

		bioq_remove(&softc->bio_queue, bp);

		devstat_start_transaction(&softc->device_stats);

		scsi_send_receive(&start_ccb->csio,
				  /*retries*/4,
				  ptdone,
				  MSG_SIMPLE_Q_TAG,
				  bp->bio_cmd == BIO_READ,
				  /*byte2*/0,
				  bp->bio_bcount,
				  bp->bio_data,
				  /*sense_len*/SSD_FULL_SIZE,
				  /*timeout*/softc->io_timeout);

		start_ccb->ccb_h.ccb_state = PT_CCB_BUFFER_IO_UA;

		/*
		 * Block out any asyncronous callbacks
		 * while we touch the pending ccb list.
		 */
		oldspl = splcam();
		LIST_INSERT_HEAD(&softc->pending_ccbs, &start_ccb->ccb_h,
				 periph_links.le);
		splx(oldspl);

		start_ccb->ccb_h.ccb_bp = bp;
		bp = bioq_first(&softc->bio_queue);
		splx(s);

		xpt_action(start_ccb);
		
		if (bp != NULL) {
			/* Have more work to do, so ensure we stay scheduled */
			xpt_schedule(periph, /* XXX priority */1);
		}
	}
}
コード例 #8
0
ファイル: bsd_init.c プロジェクト: Prajna/xnu
/*
 * This function is called very early on in the Mach startup, from the
 * function start_kernel_threads() in osfmk/kern/startup.c.  It's called
 * in the context of the current (startup) task using a call to the
 * function kernel_thread_create() to jump into start_kernel_threads().
 * Internally, kernel_thread_create() calls thread_create_internal(),
 * which calls uthread_alloc().  The function of uthread_alloc() is
 * normally to allocate a uthread structure, and fill out the uu_sigmask,
 * uu_context fields.  It skips filling these out in the case of the "task"
 * being "kernel_task", because the order of operation is inverted.  To
 * account for that, we need to manually fill in at least the contents
 * of the uu_context.vc_ucred field so that the uthread structure can be
 * used like any other.
 */
void
bsd_init(void)
{
	struct uthread *ut;
	unsigned int i;
#if __i386__ || __x86_64__
	int error;
#endif	
	struct vfs_context context;
	kern_return_t	ret;
	struct ucred temp_cred;
	struct posix_cred temp_pcred;
#if NFSCLIENT || CONFIG_IMAGEBOOT
	boolean_t       netboot = FALSE;
#endif

#define bsd_init_kprintf(x...) /* kprintf("bsd_init: " x) */

	kernel_flock = funnel_alloc(KERNEL_FUNNEL);
	if (kernel_flock == (funnel_t *)0 ) {
		panic("bsd_init: Failed to allocate kernel funnel");
	}
        
	printf(copyright);
	
	bsd_init_kprintf("calling kmeminit\n");
	kmeminit();
	
	bsd_init_kprintf("calling parse_bsd_args\n");
	parse_bsd_args();

	/* Initialize kauth subsystem before instancing the first credential */
	bsd_init_kprintf("calling kauth_init\n");
	kauth_init();

	/* Initialize process and pgrp structures. */
	bsd_init_kprintf("calling procinit\n");
	procinit();

	/* Initialize the ttys (MUST be before kminit()/bsd_autoconf()!)*/
	tty_init();

	kernproc = &proc0;	/* implicitly bzero'ed */

	/* kernel_task->proc = kernproc; */
	set_bsdtask_info(kernel_task,(void *)kernproc);

	/* give kernproc a name */
	bsd_init_kprintf("calling process_name\n");
	process_name("kernel_task", kernproc);

	/* allocate proc lock group attribute and group */
	bsd_init_kprintf("calling lck_grp_attr_alloc_init\n");
	proc_lck_grp_attr= lck_grp_attr_alloc_init();

	proc_lck_grp = lck_grp_alloc_init("proc",  proc_lck_grp_attr);
#if CONFIG_FINE_LOCK_GROUPS
	proc_slock_grp = lck_grp_alloc_init("proc-slock",  proc_lck_grp_attr);
	proc_fdmlock_grp = lck_grp_alloc_init("proc-fdmlock",  proc_lck_grp_attr);
	proc_mlock_grp = lck_grp_alloc_init("proc-mlock",  proc_lck_grp_attr);
#endif
	/* Allocate proc lock attribute */
	proc_lck_attr = lck_attr_alloc_init();
#if 0
#if __PROC_INTERNAL_DEBUG
	lck_attr_setdebug(proc_lck_attr);
#endif
#endif

#if CONFIG_FINE_LOCK_GROUPS
	proc_list_mlock = lck_mtx_alloc_init(proc_mlock_grp, proc_lck_attr);
	proc_klist_mlock = lck_mtx_alloc_init(proc_mlock_grp, proc_lck_attr);
	lck_mtx_init(&kernproc->p_mlock, proc_mlock_grp, proc_lck_attr);
	lck_mtx_init(&kernproc->p_fdmlock, proc_fdmlock_grp, proc_lck_attr);
	lck_spin_init(&kernproc->p_slock, proc_slock_grp, proc_lck_attr);
#else
	proc_list_mlock = lck_mtx_alloc_init(proc_lck_grp, proc_lck_attr);
	proc_klist_mlock = lck_mtx_alloc_init(proc_lck_grp, proc_lck_attr);
	lck_mtx_init(&kernproc->p_mlock, proc_lck_grp, proc_lck_attr);
	lck_mtx_init(&kernproc->p_fdmlock, proc_lck_grp, proc_lck_attr);
	lck_spin_init(&kernproc->p_slock, proc_lck_grp, proc_lck_attr);
#endif

	assert(bsd_simul_execs != 0);
	execargs_cache_lock = lck_mtx_alloc_init(proc_lck_grp, proc_lck_attr);
	execargs_cache_size = bsd_simul_execs;
	execargs_free_count = bsd_simul_execs;
	execargs_cache = (vm_offset_t *)kalloc(bsd_simul_execs * sizeof(vm_offset_t));
	bzero(execargs_cache, bsd_simul_execs * sizeof(vm_offset_t));
	
	if (current_task() != kernel_task)
		printf("bsd_init: We have a problem, "
				"current task is not kernel task\n");
	
	bsd_init_kprintf("calling get_bsdthread_info\n");
	ut = (uthread_t)get_bsdthread_info(current_thread());

#if CONFIG_MACF
	/*
	 * Initialize the MAC Framework
	 */
	mac_policy_initbsd();
	kernproc->p_mac_enforce = 0;

#if defined (__i386__) || defined (__x86_64__)
	/*
	 * We currently only support this on i386/x86_64, as that is the
	 * only lock code we have instrumented so far.
	 */
	check_policy_init(policy_check_flags);
#endif
#endif /* MAC */

	/*
	 * Create process 0.
	 */
	proc_list_lock();
	LIST_INSERT_HEAD(&allproc, kernproc, p_list);
	kernproc->p_pgrp = &pgrp0;
	LIST_INSERT_HEAD(PGRPHASH(0), &pgrp0, pg_hash);
	LIST_INIT(&pgrp0.pg_members);
#ifdef CONFIG_FINE_LOCK_GROUPS
	lck_mtx_init(&pgrp0.pg_mlock, proc_mlock_grp, proc_lck_attr);
#else
	lck_mtx_init(&pgrp0.pg_mlock, proc_lck_grp, proc_lck_attr);
#endif
	/* There is no other bsd thread this point and is safe without pgrp lock */
	LIST_INSERT_HEAD(&pgrp0.pg_members, kernproc, p_pglist);
	kernproc->p_listflag |= P_LIST_INPGRP;
	kernproc->p_pgrpid = 0;
	kernproc->p_uniqueid = 0;

	pgrp0.pg_session = &session0;
	pgrp0.pg_membercnt = 1;

	session0.s_count = 1;
	session0.s_leader = kernproc;
	session0.s_listflags = 0;
#ifdef CONFIG_FINE_LOCK_GROUPS
	lck_mtx_init(&session0.s_mlock, proc_mlock_grp, proc_lck_attr);
#else
	lck_mtx_init(&session0.s_mlock, proc_lck_grp, proc_lck_attr);
#endif
	LIST_INSERT_HEAD(SESSHASH(0), &session0, s_hash);
	proc_list_unlock();

#if CONFIG_LCTX
	kernproc->p_lctx = NULL;
#endif

	kernproc->task = kernel_task;
	
	kernproc->p_stat = SRUN;
	kernproc->p_flag = P_SYSTEM;
	kernproc->p_lflag = 0;
	kernproc->p_ladvflag = 0;
	
#if DEVELOPMENT || DEBUG
	if (bootarg_disable_aslr)
		kernproc->p_flag |= P_DISABLE_ASLR;
#endif

	kernproc->p_nice = NZERO;
	kernproc->p_pptr = kernproc;

	TAILQ_INIT(&kernproc->p_uthlist);
	TAILQ_INSERT_TAIL(&kernproc->p_uthlist, ut, uu_list);
	
	kernproc->sigwait = FALSE;
	kernproc->sigwait_thread = THREAD_NULL;
	kernproc->exit_thread = THREAD_NULL;
	kernproc->p_csflags = CS_VALID;

	/*
	 * Create credential.  This also Initializes the audit information.
	 */
	bsd_init_kprintf("calling bzero\n");
	bzero(&temp_cred, sizeof(temp_cred));
	bzero(&temp_pcred, sizeof(temp_pcred));
	temp_pcred.cr_ngroups = 1;

	temp_cred.cr_audit.as_aia_p = audit_default_aia_p;

	bsd_init_kprintf("calling kauth_cred_create\n");
	/*
	 * We have to label the temp cred before we create from it to
	 * properly set cr_ngroups, or the create will fail.
	 */
	posix_cred_label(&temp_cred, &temp_pcred);
	kernproc->p_ucred = kauth_cred_create(&temp_cred); 

	/* update cred on proc */
	PROC_UPDATE_CREDS_ONPROC(kernproc);

	/* give the (already exisiting) initial thread a reference on it */
	bsd_init_kprintf("calling kauth_cred_ref\n");
	kauth_cred_ref(kernproc->p_ucred);
	ut->uu_context.vc_ucred = kernproc->p_ucred;
	ut->uu_context.vc_thread = current_thread();

	TAILQ_INIT(&kernproc->p_aio_activeq);
	TAILQ_INIT(&kernproc->p_aio_doneq);
	kernproc->p_aio_total_count = 0;
	kernproc->p_aio_active_count = 0;

	bsd_init_kprintf("calling file_lock_init\n");
	file_lock_init();

#if CONFIG_MACF
	mac_cred_label_associate_kernel(kernproc->p_ucred);
	mac_task_label_update_cred (kernproc->p_ucred, (struct task *) kernproc->task);
#endif

	/* Create the file descriptor table. */
	filedesc0.fd_refcnt = 1+1;	/* +1 so shutdown will not _FREE_ZONE */
	kernproc->p_fd = &filedesc0;
	filedesc0.fd_cmask = cmask;
	filedesc0.fd_knlistsize = -1;
	filedesc0.fd_knlist = NULL;
	filedesc0.fd_knhash = NULL;
	filedesc0.fd_knhashmask = 0;

	/* Create the limits structures. */
	kernproc->p_limit = &limit0;
	for (i = 0; i < sizeof(kernproc->p_rlimit)/sizeof(kernproc->p_rlimit[0]); i++)
		limit0.pl_rlimit[i].rlim_cur = 
			limit0.pl_rlimit[i].rlim_max = RLIM_INFINITY;
	limit0.pl_rlimit[RLIMIT_NOFILE].rlim_cur = NOFILE;
	limit0.pl_rlimit[RLIMIT_NPROC].rlim_cur = maxprocperuid;
	limit0.pl_rlimit[RLIMIT_NPROC].rlim_max = maxproc;
	limit0.pl_rlimit[RLIMIT_STACK] = vm_initial_limit_stack;
	limit0.pl_rlimit[RLIMIT_DATA] = vm_initial_limit_data;
	limit0.pl_rlimit[RLIMIT_CORE] = vm_initial_limit_core;
	limit0.pl_refcnt = 1;

	kernproc->p_stats = &pstats0;
	kernproc->p_sigacts = &sigacts0;

	/*
	 * Charge root for two  processes: init and mach_init.
	 */
	bsd_init_kprintf("calling chgproccnt\n");
	(void)chgproccnt(0, 1);

	/*
	 *	Allocate a kernel submap for pageable memory
	 *	for temporary copying (execve()).
	 */
	{
		vm_offset_t	minimum;

		bsd_init_kprintf("calling kmem_suballoc\n");
		assert(bsd_pageable_map_size != 0);
		ret = kmem_suballoc(kernel_map,
				&minimum,
				(vm_size_t)bsd_pageable_map_size,
				TRUE,
				VM_FLAGS_ANYWHERE,
				&bsd_pageable_map);
		if (ret != KERN_SUCCESS) 
			panic("bsd_init: Failed to allocate bsd pageable map");
	}

	/*
	 * Initialize buffers and hash links for buffers
	 *
	 * SIDE EFFECT: Starts a thread for bcleanbuf_thread(), so must
	 *		happen after a credential has been associated with
	 *		the kernel task.
	 */
	bsd_init_kprintf("calling bsd_bufferinit\n");
	bsd_bufferinit();

	/* Initialize the execve() semaphore */
	bsd_init_kprintf("calling semaphore_create\n");

	if (ret != KERN_SUCCESS)
		panic("bsd_init: Failed to create execve semaphore");

	/*
	 * Initialize the calendar.
	 */
	bsd_init_kprintf("calling IOKitInitializeTime\n");
	IOKitInitializeTime();

	bsd_init_kprintf("calling ubc_init\n");
	ubc_init();

	/*
	 * Initialize device-switches.
	 */
	bsd_init_kprintf("calling devsw_init() \n");
	devsw_init();

	/* Initialize the file systems. */
	bsd_init_kprintf("calling vfsinit\n");
	vfsinit();

#if SOCKETS
	/* Initialize per-CPU cache allocator */
	mcache_init();

	/* Initialize mbuf's. */
	bsd_init_kprintf("calling mbinit\n");
	mbinit();
	net_str_id_init(); /* for mbuf tags */
#endif /* SOCKETS */

	/*
	 * Initializes security event auditing.
	 * XXX: Should/could this occur later?
	 */
#if CONFIG_AUDIT
	bsd_init_kprintf("calling audit_init\n");
 	audit_init();  
#endif

	/* Initialize kqueues */
	bsd_init_kprintf("calling knote_init\n");
	knote_init();

	/* Initialize for async IO */
	bsd_init_kprintf("calling aio_init\n");
	aio_init();

	/* Initialize pipes */
	bsd_init_kprintf("calling pipeinit\n");
	pipeinit();

	/* Initialize SysV shm subsystem locks; the subsystem proper is
	 * initialized through a sysctl.
	 */
#if SYSV_SHM
	bsd_init_kprintf("calling sysv_shm_lock_init\n");
	sysv_shm_lock_init();
#endif
#if SYSV_SEM
	bsd_init_kprintf("calling sysv_sem_lock_init\n");
	sysv_sem_lock_init();
#endif
#if SYSV_MSG
	bsd_init_kprintf("sysv_msg_lock_init\n");
	sysv_msg_lock_init();
#endif
	bsd_init_kprintf("calling pshm_lock_init\n");
	pshm_lock_init();
	bsd_init_kprintf("calling psem_lock_init\n");
	psem_lock_init();

	pthread_init();
	/* POSIX Shm and Sem */
	bsd_init_kprintf("calling pshm_cache_init\n");
	pshm_cache_init();
	bsd_init_kprintf("calling psem_cache_init\n");
	psem_cache_init();
	bsd_init_kprintf("calling time_zone_slock_init\n");
	time_zone_slock_init();
	bsd_init_kprintf("calling select_wait_queue_init\n");
	select_wait_queue_init();

	/* Stack snapshot facility lock */
	stackshot_lock_init();
	/*
	 * Initialize protocols.  Block reception of incoming packets
	 * until everything is ready.
	 */
	bsd_init_kprintf("calling sysctl_register_fixed\n");
	sysctl_register_fixed(); 
	bsd_init_kprintf("calling sysctl_mib_init\n");
	sysctl_mib_init();
#if NETWORKING
	bsd_init_kprintf("calling dlil_init\n");
	dlil_init();
	bsd_init_kprintf("calling proto_kpi_init\n");
	proto_kpi_init();
#endif /* NETWORKING */
#if SOCKETS
	bsd_init_kprintf("calling socketinit\n");
	socketinit();
	bsd_init_kprintf("calling domaininit\n");
	domaininit();
#endif /* SOCKETS */

	kernproc->p_fd->fd_cdir = NULL;
	kernproc->p_fd->fd_rdir = NULL;

#if CONFIG_FREEZE
	/* Initialise background hibernation */
	bsd_init_kprintf("calling kern_hibernation_init\n");
	kern_hibernation_init();
#endif

#if CONFIG_EMBEDDED
	/* Initialize kernel memory status notifications */
	bsd_init_kprintf("calling kern_memorystatus_init\n");
	kern_memorystatus_init();
#endif

#ifdef GPROF
	/* Initialize kernel profiling. */
	kmstartup();
#endif

	/* kick off timeout driven events by calling first time */
	thread_wakeup(&lbolt);
	timeout(lightning_bolt, 0, hz);

	bsd_init_kprintf("calling bsd_autoconf\n");
	bsd_autoconf();

#if CONFIG_DTRACE
	dtrace_postinit();
#endif

	/*
	 * We attach the loopback interface *way* down here to ensure
	 * it happens after autoconf(), otherwise it becomes the
	 * "primary" interface.
	 */
#include <loop.h>
#if NLOOP > 0
	bsd_init_kprintf("calling loopattach\n");
	loopattach();			/* XXX */
#endif

#if PFLOG
	/* Initialize packet filter log interface */
	pfloginit();
#endif /* PFLOG */

#if NETHER > 0
	/* Register the built-in dlil ethernet interface family */
	bsd_init_kprintf("calling ether_family_init\n");
	ether_family_init();
#endif /* ETHER */

#if NETWORKING
	/* Call any kext code that wants to run just after network init */
	bsd_init_kprintf("calling net_init_run\n");
	net_init_run();
	
	/* register user tunnel kernel control handler */
	utun_register_control();
    netsrc_init();
	
	/* wait for network domain to finish */
	domainfin();
#endif /* NETWORKING */

	bsd_init_kprintf("calling vnode_pager_bootstrap\n");
	vnode_pager_bootstrap();
#if 0
	/* XXX Hack for early debug stop */
	printf("\nabout to sleep for 10 seconds\n");
	IOSleep( 10 * 1000 );
	/* Debugger("hello"); */
#endif

	bsd_init_kprintf("calling inittodr\n");
	inittodr(0);

	/* Mount the root file system. */
	while( TRUE) {
		int err;

		bsd_init_kprintf("calling setconf\n");
		setconf();
#if NFSCLIENT
		netboot = (mountroot == netboot_mountroot);
#endif

		bsd_init_kprintf("vfs_mountroot\n");
		if (0 == (err = vfs_mountroot()))
			break;
		rootdevice[0] = '\0';
#if NFSCLIENT
		if (netboot) {
			PE_display_icon( 0, "noroot");  /* XXX a netboot-specific icon would be nicer */
			vc_progress_set(FALSE, 0);
			for (i=1; 1; i*=2) {
				printf("bsd_init: failed to mount network root, error %d, %s\n",
					err, PE_boot_args());
				printf("We are hanging here...\n");
				IOSleep(i*60*1000);
			}
			/*NOTREACHED*/
		}
#endif
		printf("cannot mount root, errno = %d\n", err);
		boothowto |= RB_ASKNAME;
	}

	IOSecureBSDRoot(rootdevice);

	context.vc_thread = current_thread();
	context.vc_ucred = kernproc->p_ucred;
	mountlist.tqh_first->mnt_flag |= MNT_ROOTFS;

	bsd_init_kprintf("calling VFS_ROOT\n");
	/* Get the vnode for '/'.  Set fdp->fd_fd.fd_cdir to reference it. */
	if (VFS_ROOT(mountlist.tqh_first, &rootvnode, &context))
		panic("bsd_init: cannot find root vnode: %s", PE_boot_args());
	rootvnode->v_flag |= VROOT;
	(void)vnode_ref(rootvnode);
	(void)vnode_put(rootvnode);
	filedesc0.fd_cdir = rootvnode;

#if NFSCLIENT
	if (netboot) {
		int err;

		netboot = TRUE;
		/* post mount setup */
		if ((err = netboot_setup()) != 0) {
			PE_display_icon( 0, "noroot");  /* XXX a netboot-specific icon would be nicer */
			vc_progress_set(FALSE, 0);
			for (i=1; 1; i*=2) {
				printf("bsd_init: NetBoot could not find root, error %d: %s\n",
					err, PE_boot_args());
				printf("We are hanging here...\n");
				IOSleep(i*60*1000);
			}
			/*NOTREACHED*/
		}
	}
#endif
	

#if CONFIG_IMAGEBOOT
	/*
	 * See if a system disk image is present. If so, mount it and
	 * switch the root vnode to point to it
	 */ 
	if (netboot == FALSE && imageboot_needed()) {
		/* 
		 * An image was found.  No turning back: we're booted
		 * with a kernel from the disk image.
		 */
		imageboot_setup(); 
	}
#endif /* CONFIG_IMAGEBOOT */
  
	/* set initial time; all other resource data is  already zero'ed */
	microtime(&kernproc->p_start);
	kernproc->p_stats->p_start = kernproc->p_start;	/* for compat */

#if DEVFS
	{
	    char mounthere[] = "/dev";	/* !const because of internal casting */

	    bsd_init_kprintf("calling devfs_kernel_mount\n");
	    devfs_kernel_mount(mounthere);
	}
#endif /* DEVFS */
	
	/* Initialize signal state for process 0. */
	bsd_init_kprintf("calling siginit\n");
	siginit(kernproc);

	bsd_init_kprintf("calling bsd_utaskbootstrap\n");
	bsd_utaskbootstrap();

#if defined(__LP64__)
	kernproc->p_flag |= P_LP64;
	printf("Kernel is LP64\n");
#endif

	pal_kernel_announce();

#if __i386__ || __x86_64__
	/* this should be done after the root filesystem is mounted */
	error = set_archhandler(kernproc, CPU_TYPE_POWERPC);
	if (error) /* XXX make more generic */
		exec_archhandler_ppc.path[0] = 0;
#endif	

	bsd_init_kprintf("calling mountroot_post_hook\n");

	/* invoke post-root-mount hook */
	if (mountroot_post_hook != NULL)
		mountroot_post_hook();

#if 0 /* not yet */
	consider_zone_gc(FALSE);
#endif

	bsd_init_kprintf("done\n");
}
コード例 #9
0
/*
 * Bind port from sin6 to in6p.
 */
static int
in6_pcbbind_port(struct in6pcb *in6p, struct sockaddr_in6 *sin6, struct lwp *l)
{
	struct inpcbtable *table = in6p->in6p_table;
	struct socket *so = in6p->in6p_socket;
	int wild = 0, reuseport = (so->so_options & SO_REUSEPORT);
	int error;

	if ((so->so_options & (SO_REUSEADDR|SO_REUSEPORT)) == 0 &&
	   ((so->so_proto->pr_flags & PR_CONNREQUIRED) == 0 ||
	    (so->so_options & SO_ACCEPTCONN) == 0))
		wild = 1;

	if (sin6->sin6_port != 0) {
		enum kauth_network_req req;

#ifndef IPNOPRIVPORTS
		if (ntohs(sin6->sin6_port) < IPV6PORT_RESERVED)
			req = KAUTH_REQ_NETWORK_BIND_PRIVPORT;
		else
#endif /* IPNOPRIVPORTS */
			req = KAUTH_REQ_NETWORK_BIND_PORT;

		error = kauth_authorize_network(l->l_cred, KAUTH_NETWORK_BIND,
		    req, so, sin6, NULL);
		if (error)
			return (EACCES);
	}

	if (IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
		/*
		 * Treat SO_REUSEADDR as SO_REUSEPORT for multicast;
		 * allow compepte duplication of binding if
		 * SO_REUSEPORT is set, or if SO_REUSEADDR is set
		 * and a multicast address is bound on both
		 * new and duplicated sockets.
		 */
		if (so->so_options & SO_REUSEADDR)
			reuseport = SO_REUSEADDR|SO_REUSEPORT;
	}

	if (sin6->sin6_port != 0) {
		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
#ifdef INET
			struct inpcb *t;
			struct vestigial_inpcb vestige;

			t = in_pcblookup_port(table,
			    *(struct in_addr *)&sin6->sin6_addr.s6_addr32[3],
			    sin6->sin6_port, wild, &vestige);
			if (t && (reuseport & t->inp_socket->so_options) == 0)
				return (EADDRINUSE);
			if (!t
			    && vestige.valid
			    && !(reuseport && vestige.reuse_port))
			    return EADDRINUSE;
#else
			return (EADDRNOTAVAIL);
#endif
		}

		{
			struct in6pcb *t;
			struct vestigial_inpcb vestige;

			t = in6_pcblookup_port(table, &sin6->sin6_addr,
			    sin6->sin6_port, wild, &vestige);
			if (t && (reuseport & t->in6p_socket->so_options) == 0)
				return (EADDRINUSE);
			if (!t
			    && vestige.valid
			    && !(reuseport && vestige.reuse_port))
			    return EADDRINUSE;
		}
	}

	if (sin6->sin6_port == 0) {
		int e;
		e = in6_pcbsetport(sin6, in6p, l);
		if (e != 0)
			return (e);
	} else {
		in6p->in6p_lport = sin6->sin6_port;
		in6_pcbstate(in6p, IN6P_BOUND);
	}

	LIST_REMOVE(&in6p->in6p_head, inph_lhash);
	LIST_INSERT_HEAD(IN6PCBHASH_PORT(table, in6p->in6p_lport),
	    &in6p->in6p_head, inph_lhash);

	return (0);
}
コード例 #10
0
ファイル: mac.c プロジェクト: edgar-pek/PerspicuOS
static int
mac_add_type(const char *name, const char *labels)
{
	struct label_default *ld, *ld_new;
	char *name_dup, *labels_dup;

	/*
	 * Speculatively allocate all the memory now to avoid allocating
	 * later when we will someday hold a mutex.
	 */
	name_dup = strdup(name);
	if (name_dup == NULL) {
		errno = ENOMEM;
		return (-1);
	}
	labels_dup = strdup(labels);
	if (labels_dup == NULL) {
		free(name_dup);
		errno = ENOMEM;
		return (-1);
	}
	ld_new = malloc(sizeof(*ld));
	if (ld_new == NULL) {
		free(name_dup);
		free(labels_dup);
		errno = ENOMEM;
		return (-1);
	}

	/*
	 * If the type is already present, replace the current entry
	 * rather than add a new instance.
	 */
	for (ld = LIST_FIRST(&label_default_head); ld != NULL;
	    ld = LIST_NEXT(ld, ld_entries)) {
		if (strcmp(name, ld->ld_name) == 0)
			break;
	}

	if (ld != NULL) {
		free(ld->ld_labels);
		ld->ld_labels = labels_dup;
		labels_dup = NULL;
	} else {
		ld = ld_new;
		ld->ld_name = name_dup;
		ld->ld_labels = labels_dup;

		ld_new = NULL;
		name_dup = NULL;
		labels_dup = NULL;

		LIST_INSERT_HEAD(&label_default_head, ld, ld_entries);
	}

	if (name_dup != NULL)
		free(name_dup);
	if (labels_dup != NULL)
		free(labels_dup);
	if (ld_new != NULL)
		free(ld_new);

	return (0);
}
コード例 #11
0
ファイル: bthidev.c プロジェクト: goroutines/rumprun
static void
bthidev_attach(device_t parent, device_t self, void *aux)
{
	struct bthidev_softc *sc = device_private(self);
	prop_dictionary_t dict = aux;
	prop_object_t obj;
	device_t dev;
	struct bthidev_attach_args bha;
	struct bthidev *hidev;
	struct hid_data *d;
	struct hid_item h;
	const void *desc;
	int locs[BTHIDBUSCF_NLOCS];
	int maxid, rep, dlen;
	int vendor, product;
	int err;

	/*
	 * Init softc
	 */
	sc->sc_dev = self;
	LIST_INIT(&sc->sc_list);
	MBUFQ_INIT(&sc->sc_inq);
	callout_init(&sc->sc_reconnect, 0);
	callout_setfunc(&sc->sc_reconnect, bthidev_timeout, sc);
	sc->sc_state = BTHID_CLOSED;
	sc->sc_flags = BTHID_CONNECTING;
	sc->sc_ctlpsm = L2CAP_PSM_HID_CNTL;
	sc->sc_intpsm = L2CAP_PSM_HID_INTR;

	sockopt_init(&sc->sc_mode, BTPROTO_L2CAP, SO_L2CAP_LM, 0);
	mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_NONE);
	cv_init(&sc->sc_cv, device_xname(self));

	/*
	 * extract config from proplist
	 */
	obj = prop_dictionary_get(dict, BTDEVladdr);
	bdaddr_copy(&sc->sc_laddr, prop_data_data_nocopy(obj));

	obj = prop_dictionary_get(dict, BTDEVraddr);
	bdaddr_copy(&sc->sc_raddr, prop_data_data_nocopy(obj));

	obj = prop_dictionary_get(dict, BTDEVvendor);
	vendor = (int)prop_number_integer_value(obj);

	obj = prop_dictionary_get(dict, BTDEVproduct);
	product = (int)prop_number_integer_value(obj);

	obj = prop_dictionary_get(dict, BTDEVmode);
	if (prop_object_type(obj) == PROP_TYPE_STRING) {
		if (prop_string_equals_cstring(obj, BTDEVauth))
			sockopt_setint(&sc->sc_mode, L2CAP_LM_AUTH);
		else if (prop_string_equals_cstring(obj, BTDEVencrypt))
			sockopt_setint(&sc->sc_mode, L2CAP_LM_ENCRYPT);
		else if (prop_string_equals_cstring(obj, BTDEVsecure))
			sockopt_setint(&sc->sc_mode, L2CAP_LM_SECURE);
		else  {
			aprint_error(" unknown %s\n", BTDEVmode);
			return;
		}

		aprint_verbose(" %s %s", BTDEVmode,
					 prop_string_cstring_nocopy(obj));
	} else
		sockopt_setint(&sc->sc_mode, 0);

	obj = prop_dictionary_get(dict, BTHIDEVcontrolpsm);
	if (prop_object_type(obj) == PROP_TYPE_NUMBER) {
		sc->sc_ctlpsm = prop_number_integer_value(obj);
		if (L2CAP_PSM_INVALID(sc->sc_ctlpsm)) {
			aprint_error(" invalid %s\n", BTHIDEVcontrolpsm);
			return;
		}
	}

	obj = prop_dictionary_get(dict, BTHIDEVinterruptpsm);
	if (prop_object_type(obj) == PROP_TYPE_NUMBER) {
		sc->sc_intpsm = prop_number_integer_value(obj);
		if (L2CAP_PSM_INVALID(sc->sc_intpsm)) {
			aprint_error(" invalid %s\n", BTHIDEVinterruptpsm);
			return;
		}
	}

	obj = prop_dictionary_get(dict, BTHIDEVdescriptor);
	if (prop_object_type(obj) == PROP_TYPE_DATA) {
		dlen = prop_data_size(obj);
		desc = prop_data_data_nocopy(obj);
	} else {
		aprint_error(" no %s\n", BTHIDEVdescriptor);
		return;
	}

	obj = prop_dictionary_get(dict, BTHIDEVreconnect);
	if (prop_object_type(obj) == PROP_TYPE_BOOL
	    && !prop_bool_true(obj))
		sc->sc_flags |= BTHID_RECONNECT;

	/*
	 * Parse the descriptor and attach child devices, one per report.
	 */
	maxid = -1;
	h.report_ID = 0;
	d = hid_start_parse(desc, dlen, hid_none);
	while (hid_get_item(d, &h)) {
		if (h.report_ID > maxid)
			maxid = h.report_ID;
	}
	hid_end_parse(d);

	if (maxid < 0) {
		aprint_error(" no reports found\n");
		return;
	}

	aprint_normal("\n");

	if (kthread_create(PRI_NONE, KTHREAD_MUSTJOIN, NULL, bthidev_process,
	    sc, &sc->sc_lwp, "%s", device_xname(self)) != 0) {
		aprint_error_dev(self, "failed to create input thread\n");
		return;
	}

	for (rep = 0 ; rep <= maxid ; rep++) {
		if (hid_report_size(desc, dlen, hid_feature, rep) == 0
		    && hid_report_size(desc, dlen, hid_input, rep) == 0
		    && hid_report_size(desc, dlen, hid_output, rep) == 0)
			continue;

		bha.ba_vendor = vendor;
		bha.ba_product = product;
		bha.ba_desc = desc;
		bha.ba_dlen = dlen;
		bha.ba_input = bthidev_null;
		bha.ba_feature = bthidev_null;
		bha.ba_output = bthidev_output;
		bha.ba_id = rep;

		locs[BTHIDBUSCF_REPORTID] = rep;

		dev = config_found_sm_loc(self, "bthidbus",
					locs, &bha, bthidev_print, config_stdsubmatch);
		if (dev != NULL) {
			hidev = device_private(dev);
			hidev->sc_dev = dev;
			hidev->sc_parent = self;
			hidev->sc_id = rep;
			hidev->sc_input = bha.ba_input;
			hidev->sc_feature = bha.ba_feature;
			LIST_INSERT_HEAD(&sc->sc_list, hidev, sc_next);
		}
	}

	pmf_device_register(self, NULL, NULL);

	/*
	 * start bluetooth connections
	 */
	mutex_enter(bt_lock);
	if ((sc->sc_flags & BTHID_RECONNECT) == 0
	    && (err = bthidev_listen(sc)) != 0)
		aprint_error_dev(self, "failed to listen (%d)\n", err);

	if (sc->sc_flags & BTHID_CONNECTING)
		bthidev_connect(sc);
	mutex_exit(bt_lock);
}
コード例 #12
0
ファイル: ieee80211.c プロジェクト: sofuture/bitrig
void
ieee80211_ifattach(struct ifnet *ifp)
{
	struct ieee80211com *ic = (void *)ifp;
	struct ieee80211_channel *c;
	int i;

	memcpy(((struct arpcom *)ifp)->ac_enaddr, ic->ic_myaddr,
		ETHER_ADDR_LEN);
	ether_ifattach(ifp);

	ifp->if_output = ieee80211_output;

#if NBPFILTER > 0
	bpfattach(&ic->ic_rawbpf, ifp, DLT_IEEE802_11,
	    sizeof(struct ieee80211_frame_addr4));
#endif
	ieee80211_crypto_attach(ifp);

	/*
	 * Fill in 802.11 available channel set, mark
	 * all available channels as active, and pick
	 * a default channel if not already specified.
	 */
	memset(ic->ic_chan_avail, 0, sizeof(ic->ic_chan_avail));
	ic->ic_modecaps |= 1<<IEEE80211_MODE_AUTO;
	for (i = 0; i <= IEEE80211_CHAN_MAX; i++) {
		c = &ic->ic_channels[i];
		if (c->ic_flags) {
			/*
			 * Verify driver passed us valid data.
			 */
			if (i != ieee80211_chan2ieee(ic, c)) {
				printf("%s: bad channel ignored; "
					"freq %u flags %x number %u\n",
					ifp->if_xname, c->ic_freq, c->ic_flags,
					i);
				c->ic_flags = 0;	/* NB: remove */
				continue;
			}
			setbit(ic->ic_chan_avail, i);
			/*
			 * Identify mode capabilities.
			 */
			if (IEEE80211_IS_CHAN_A(c))
				ic->ic_modecaps |= 1<<IEEE80211_MODE_11A;
			if (IEEE80211_IS_CHAN_B(c))
				ic->ic_modecaps |= 1<<IEEE80211_MODE_11B;
			if (IEEE80211_IS_CHAN_PUREG(c))
				ic->ic_modecaps |= 1<<IEEE80211_MODE_11G;
			if (IEEE80211_IS_CHAN_T(c))
				ic->ic_modecaps |= 1<<IEEE80211_MODE_TURBO;
		}
	}
	/* validate ic->ic_curmode */
	if ((ic->ic_modecaps & (1<<ic->ic_curmode)) == 0)
		ic->ic_curmode = IEEE80211_MODE_AUTO;
	ic->ic_des_chan = IEEE80211_CHAN_ANYC;	/* any channel is ok */
	ic->ic_scan_lock = IEEE80211_SCAN_UNLOCKED;

	/* IEEE 802.11 defines a MTU >= 2290 */
	ifp->if_capabilities |= IFCAP_VLAN_MTU;

	ieee80211_setbasicrates(ic);
	(void)ieee80211_setmode(ic, ic->ic_curmode);

	if (ic->ic_lintval == 0)
		ic->ic_lintval = 100;		/* default sleep */
	ic->ic_bmisstimeout = 7*ic->ic_lintval;	/* default 7 beacons */
	ic->ic_dtim_period = 1;	/* all TIMs are DTIMs */

	LIST_INSERT_HEAD(&ieee80211com_head, ic, ic_list);
	ieee80211_node_attach(ifp);
	ieee80211_proto_attach(ifp);

	if_addgroup(ifp, "wlan");
	ifp->if_priority = IF_WIRELESS_DEFAULT_PRIORITY;
}
コード例 #13
0
ファイル: ng_ksocket.c プロジェクト: ornarium/freebsd
/*
 * Handle the first completed incoming connection, assumed to be already
 * on the socket's so_comp queue.
 */
static void
ng_ksocket_finish_accept(priv_p priv)
{
	struct socket *const head = priv->so;
	struct socket *so;
	struct sockaddr *sa = NULL;
	struct ng_mesg *resp;
	struct ng_ksocket_accept *resp_data;
	node_p node;
	priv_p priv2;
	int len;
	int error;

	ACCEPT_LOCK();
	so = TAILQ_FIRST(&head->so_comp);
	if (so == NULL) {	/* Should never happen */
		ACCEPT_UNLOCK();
		return;
	}
	TAILQ_REMOVE(&head->so_comp, so, so_list);
	head->so_qlen--;
	so->so_qstate &= ~SQ_COMP;
	so->so_head = NULL;
	SOCK_LOCK(so);
	soref(so);
	so->so_state |= SS_NBIO;
	SOCK_UNLOCK(so);
	ACCEPT_UNLOCK();

	/* XXX KNOTE_UNLOCKED(&head->so_rcv.sb_sel.si_note, 0); */

	soaccept(so, &sa);

	len = OFFSETOF(struct ng_ksocket_accept, addr);
	if (sa != NULL)
		len += sa->sa_len;

	NG_MKMESSAGE(resp, NGM_KSOCKET_COOKIE, NGM_KSOCKET_ACCEPT, len,
	    M_NOWAIT);
	if (resp == NULL) {
		soclose(so);
		goto out;
	}
	resp->header.flags |= NGF_RESP;
	resp->header.token = priv->response_token;

	/* Clone a ksocket node to wrap the new socket */
        error = ng_make_node_common(&ng_ksocket_typestruct, &node);
        if (error) {
		free(resp, M_NETGRAPH);
		soclose(so);
		goto out;
	}

	if (ng_ksocket_constructor(node) != 0) {
		NG_NODE_UNREF(node);
		free(resp, M_NETGRAPH);
		soclose(so);
		goto out;
	}

	priv2 = NG_NODE_PRIVATE(node);
	priv2->so = so;
	priv2->flags |= KSF_CLONED | KSF_EMBRYONIC;

	/*
	 * Insert the cloned node into a list of embryonic children
	 * on the parent node.  When a hook is created on the cloned
	 * node it will be removed from this list.  When the parent
	 * is destroyed it will destroy any embryonic children it has.
	 */
	LIST_INSERT_HEAD(&priv->embryos, priv2, siblings);

	SOCKBUF_LOCK(&so->so_rcv);
	soupcall_set(so, SO_RCV, ng_ksocket_incoming, node);
	SOCKBUF_UNLOCK(&so->so_rcv);
	SOCKBUF_LOCK(&so->so_snd);
	soupcall_set(so, SO_SND, ng_ksocket_incoming, node);
	SOCKBUF_UNLOCK(&so->so_snd);

	/* Fill in the response data and send it or return it to the caller */
	resp_data = (struct ng_ksocket_accept *)resp->data;
	resp_data->nodeid = NG_NODE_ID(node);
	if (sa != NULL)
		bcopy(sa, &resp_data->addr, sa->sa_len);
	NG_SEND_MSG_ID(error, node, resp, priv->response_addr, 0);

out:
	if (sa != NULL)
		free(sa, M_SONAME);
}
コード例 #14
0
ファイル: if_pflog.c プロジェクト: BillTheBest/libuinet
int
pflog_clone_create(struct if_clone *ifc, int unit)
#endif
{
	struct ifnet *ifp;
	struct pflog_softc *pflogif;
	int s;

	if (unit >= PFLOGIFS_MAX)
		return (EINVAL);

	if ((pflogif = malloc(sizeof(*pflogif),
	    M_DEVBUF, M_NOWAIT|M_ZERO)) == NULL)
		return (ENOMEM);

	pflogif->sc_unit = unit;
#ifdef __FreeBSD__
	ifp = pflogif->sc_ifp = if_alloc(IFT_PFLOG);
	if (ifp == NULL) {
		free(pflogif, M_DEVBUF);
		return (ENOSPC);
	}
	if_initname(ifp, ifc->ifc_name, unit);
#else
	ifp = &pflogif->sc_if;
	snprintf(ifp->if_xname, sizeof ifp->if_xname, "pflog%d", unit);
#endif
	ifp->if_softc = pflogif;
	ifp->if_mtu = PFLOGMTU;
	ifp->if_ioctl = pflogioctl;
	ifp->if_output = pflogoutput;
	ifp->if_start = pflogstart;
#ifndef __FreeBSD__
	ifp->if_type = IFT_PFLOG;
#endif
	ifp->if_snd.ifq_maxlen = ifqmaxlen;
	ifp->if_hdrlen = PFLOG_HDRLEN;
	if_attach(ifp);
#ifndef __FreeBSD__
	if_alloc_sadl(ifp);
#endif

#if NBPFILTER > 0
#ifdef __FreeBSD__
	bpfattach(ifp, DLT_PFLOG, PFLOG_HDRLEN);
#else
	bpfattach(&pflogif->sc_if.if_bpf, ifp, DLT_PFLOG, PFLOG_HDRLEN);
#endif
#endif

	s = splnet();
#ifdef __FreeBSD__
	/* XXX: Why pf(4) lock?! Better add a pflog lock?! */
	PF_LOCK();
#endif
	LIST_INSERT_HEAD(&pflogif_list, pflogif, sc_list);
	pflogifs[unit] = ifp;
#ifdef __FreeBSD__
	PF_UNLOCK();
#endif
	splx(s);

	return (0);
}
コード例 #15
0
void *
alloc(size_t size)
{
	struct ml *f, *bestf;
#ifndef ALLOC_FIRST_FIT
	unsigned bestsize = 0xffffffff;	/* greater than any real size */
#endif
	char *help;
	int failed;

#ifdef ALLOC_TRACE
	printf("alloc(%zu)", size);
#endif

	/*
	 * Account for overhead now, so that we don't get an
	 * "exact fit" which doesn't have enough space.
	 */
	size = ALIGN(size) + OVERHEAD;

#ifdef ALLOC_FIRST_FIT
	/* scan freelist */
	for (f = freelist.lh_first; f != NULL && (size_t)f->size < size;
	    f = f->list.le_next)
		/* noop */ ;
	bestf = f;
	failed = (bestf == NULL);
#else
	/* scan freelist */
	bestf = NULL;		/* XXXGCC: -Wuninitialized */
	f = freelist.lh_first;
	while (f != NULL) {
		if ((size_t)f->size >= size) {
			if ((size_t)f->size == size)	/* exact match */
				goto found;

			if (f->size < bestsize) {
				/* keep best fit */
				bestf = f;
				bestsize = f->size;
			}
		}
		f = f->list.le_next;
	}

	/* no match in freelist if bestsize unchanged */
	failed = (bestsize == 0xffffffff);
#endif

	if (failed) {	/* nothing found */
		/*
		 * Allocate memory from the OpenFirmware, rounded
		 * to page size, and record the chunk size.
		 */
		size = roundup(size, NBPG);
		help = OF_claim(NULL, (unsigned)size, NBPG);
		if (help == (char *)-1)
			panic("alloc: out of memory");

		f = (struct ml *)help;
		f->size = (unsigned)size;
#ifdef ALLOC_TRACE
		printf("=%lx (new chunk size %u)\n",
		    (u_long)(help + OVERHEAD), f->size);
#endif
		goto out;
	}

	/* we take the best fit */
	f = bestf;

#ifndef ALLOC_FIRST_FIT
 found:
#endif
	/* remove from freelist */
	LIST_REMOVE(f, list);
	help = (char *)f;
#ifdef ALLOC_TRACE
	printf("=%lx (origsize %u)\n", (u_long)(help + OVERHEAD), f->size);
#endif
 out:
	/* place on allocated list */
	LIST_INSERT_HEAD(&allocatedlist, f, list);
	return (help + OVERHEAD);
}
コード例 #16
0
ファイル: rthread.c プロジェクト: mikekmv/aeriebsd-src
int
pthread_create(pthread_t *threadp, const pthread_attr_t *attr,
    void *(*start_routine)(void *), void *arg)
{
	pthread_t thread;
	pid_t tid;
	int rc = 0;

	if (!_threads_ready)
		if ((rc = _rthread_init()))
		    return (rc);

	thread = malloc(sizeof(*thread));
	if (!thread)
		return (errno);
	memset(thread, 0, sizeof(*thread));
	thread->donesem.lock = _SPINLOCK_UNLOCKED;
	thread->flags_lock = _SPINLOCK_UNLOCKED;
	thread->fn = start_routine;
	thread->arg = arg;
	if (attr)
		thread->attr = *(*attr);
	else {
		thread->attr.stack_size = RTHREAD_STACK_SIZE_DEF;
		thread->attr.guard_size = sysconf(_SC_PAGESIZE);
		thread->attr.stack_size -= thread->attr.guard_size;
	}
	if (thread->attr.detach_state == PTHREAD_CREATE_DETACHED)
		thread->flags |= THREAD_DETACHED;

	_spinlock(&_thread_lock);

	thread->stack = _rthread_alloc_stack(thread);
	if (!thread->stack) {
		rc = errno;
		goto fail1;
	}
	LIST_INSERT_HEAD(&_thread_list, thread, threads);

	tid = rfork_thread(RFPROC | RFTHREAD | RFMEM | RFNOWAIT,
	    thread->stack->sp, _rthread_start, thread);
	if (tid == -1) {
		rc = errno;
		goto fail2;
	}
	/* new thread will appear _rthread_start */
	thread->tid = tid;
	thread->flags |= THREAD_CANCEL_ENABLE|THREAD_CANCEL_DEFERRED;
	*threadp = thread;

	/*
	 * Since _rthread_start() aquires the thread lock and due to the way
	 * signal delivery is implemented, this is not a race.
	 */
	if (thread->attr.create_suspended)
		kill(thread->tid, SIGSTOP);

	_spinunlock(&_thread_lock);

	return (0);

fail2:
	_rthread_free_stack(thread->stack);
	LIST_REMOVE(thread, threads);
fail1:
	_spinunlock(&_thread_lock);
	_rthread_free(thread);

	return (rc);
}
コード例 #17
0
ファイル: usdf_domain.c プロジェクト: brminich/ompi-mirror
int
usdf_domain_open(struct fid_fabric *fabric, struct fi_info *info,
	   struct fid_domain **domain, void *context)
{
	struct usdf_fabric *fp;
	struct usdf_domain *udp;
	struct sockaddr_in *sin;
	size_t addrlen;
	int ret;

	udp = calloc(1, sizeof *udp);
	if (udp == NULL) {
		USDF_DEBUG("unable to alloc mem for domain\n");
		ret = -FI_ENOMEM;
		goto fail;
	}

	fp = fab_fidtou(fabric);

	USDF_DEBUG("uda_devname=%s\n", fp->fab_dev_attrs->uda_devname);

	/*
	 * Make sure address format is good and matches this fabric
	 */
	switch (info->addr_format) {
	case FI_SOCKADDR:
		addrlen = sizeof(struct sockaddr);
		break;
	case FI_SOCKADDR_IN:
		addrlen = sizeof(struct sockaddr_in);
		break;
	default:
		ret = -FI_EINVAL;
		goto fail;
	}
	sin = info->src_addr;
	if (info->src_addrlen != addrlen || sin->sin_family != AF_INET ||
	    sin->sin_addr.s_addr != fp->fab_dev_attrs->uda_ipaddr_be) {
		ret = -FI_EINVAL;
		goto fail;
	}

	ret = usd_open(fp->fab_dev_attrs->uda_devname, &udp->dom_dev);
	if (ret != 0) {
		goto fail;
	}

	udp->dom_fid.fid.fclass = FI_CLASS_DOMAIN;
	udp->dom_fid.fid.context = context;
	udp->dom_fid.fid.ops = &usdf_fid_ops;
	udp->dom_fid.ops = &usdf_domain_ops;
	udp->dom_fid.mr = &usdf_domain_mr_ops;

	ret = pthread_spin_init(&udp->dom_progress_lock,
			PTHREAD_PROCESS_PRIVATE);
	if (ret != 0) {
		ret = -ret;
		goto fail;
	}
	TAILQ_INIT(&udp->dom_tx_ready);
	TAILQ_INIT(&udp->dom_hcq_list);

	udp->dom_info = fi_dupinfo(info);
	if (udp->dom_info == NULL) {
		ret = -FI_ENOMEM;
		goto fail;
	}
	if (udp->dom_info->dest_addr != NULL) {
		free(udp->dom_info->dest_addr);
		udp->dom_info->dest_addr = NULL;
	}

	ret = usdf_dom_rdc_alloc_data(udp);
	if (ret != 0) {
		goto fail;
	}

	udp->dom_fabric = fp;
	LIST_INSERT_HEAD(&fp->fab_domain_list, udp, dom_link);
	atomic_init(&udp->dom_refcnt, 0);
	atomic_inc(&fp->fab_refcnt);

	*domain = &udp->dom_fid;
	return 0;

fail:
	if (udp != NULL) {
		if (udp->dom_info != NULL) {
			fi_freeinfo(udp->dom_info);
		}
		if (udp->dom_dev != NULL) {
			usd_close(udp->dom_dev);
		}
		usdf_dom_rdc_free_data(udp);
		free(udp);
	}
	return ret;
}
コード例 #18
0
ファイル: minidlna.c プロジェクト: edwacode/r6300v2
/* process HTTP or SSDP requests */
int
main(int argc, char * * argv)
{
	int i;
	int sudp = -1, shttpl = -1;
	int snotify[MAX_LAN_ADDR];
	LIST_HEAD(httplisthead, upnphttp) upnphttphead;
	struct upnphttp * e = 0;
	struct upnphttp * next;
	fd_set readset;	/* for select() */
	fd_set writeset;
	struct timeval timeout, timeofday, lastnotifytime = {0, 0}, lastupdatetime = {0, 0};
	int max_fd = -1;
	int last_changecnt = 0;
	short int new_db = 0;
	pid_t scanner_pid = 0;
	pthread_t inotify_thread = 0;
#ifdef TIVO_SUPPORT
	unsigned short int beacon_interval = 5;
	int sbeacon = -1;
	struct sockaddr_in tivo_bcast;
	struct timeval lastbeacontime = {0, 0};
#endif

	if(init(argc, argv) != 0)
		return 1;

#ifdef READYNAS
	DPRINTF(E_WARN, L_GENERAL, "Starting ReadyDLNA version " MINIDLNA_VERSION ".\n");
	unlink("/ramfs/.upnp-av_scan");
#else
	DPRINTF(E_WARN, L_GENERAL, "Starting MiniDLNA version " MINIDLNA_VERSION " [SQLite %s].\n", sqlite3_libversion());
	if( !sqlite3_threadsafe() )
	{
		DPRINTF(E_ERROR, L_GENERAL, "SQLite library is not threadsafe!  "
		                            "Scanning must be finished before file serving can begin, "
		                            "and inotify will be disabled.\n");
	}
	if( sqlite3_libversion_number() < 3005001 )
	{
		DPRINTF(E_WARN, L_GENERAL, "SQLite library is old.  Please use version 3.5.1 or newer.\n");
	}
#endif
	LIST_INIT(&upnphttphead);

	new_db = open_db();
	if( !new_db )
	{
		updateID = sql_get_int_field(db, "SELECT UPDATE_ID from SETTINGS");
	}
	if( sql_get_int_field(db, "pragma user_version") != DB_VERSION )
	{
		if( new_db )
		{
			DPRINTF(E_WARN, L_GENERAL, "Creating new database...\n");
		}
		else
		{
			DPRINTF(E_WARN, L_GENERAL, "Database version mismatch; need to recreate...\n");
		}
		sqlite3_close(db);
		char *cmd;
		asprintf(&cmd, "rm -rf %s/files.db %s/art_cache", db_path, db_path);
		system(cmd);
		free(cmd);
		open_db();
		if( CreateDatabase() != 0 )
		{
			DPRINTF(E_FATAL, L_GENERAL, "ERROR: Failed to create sqlite database!  Exiting...\n");
		}
#if USE_FORK
		scanning = 1;
		sqlite3_close(db);
		scanner_pid = fork();
		open_db();
		if( !scanner_pid ) // child (scanner) process
		{
			start_scanner();
			sqlite3_close(db);
			exit(EXIT_SUCCESS);
		}
#else
		start_scanner();
#endif
	}
	if( sqlite3_threadsafe() && sqlite3_libversion_number() >= 3005001 &&
	    GETFLAG(INOTIFY_MASK) && pthread_create(&inotify_thread, NULL, start_inotify, NULL) )
	{
		DPRINTF(E_FATAL, L_GENERAL, "ERROR: pthread_create() failed for start_inotify.\n");
	}

	sudp = OpenAndConfSSDPReceiveSocket(n_lan_addr, lan_addr);
	if(sudp < 0)
	{
		DPRINTF(E_FATAL, L_GENERAL, "Failed to open socket for receiving SSDP. EXITING\n");
	}
	/* open socket for HTTP connections. Listen on the 1st LAN address */
	shttpl = OpenAndConfHTTPSocket(runtime_vars.port);
	if(shttpl < 0)
	{
		DPRINTF(E_FATAL, L_GENERAL, "Failed to open socket for HTTP. EXITING\n");
	}
	DPRINTF(E_WARN, L_GENERAL, "HTTP listening on port %d\n", runtime_vars.port);

	/* open socket for sending notifications */
	if(OpenAndConfSSDPNotifySockets(snotify) < 0)
	{
		DPRINTF(E_FATAL, L_GENERAL, "Failed to open sockets for sending SSDP notify "
	                "messages. EXITING\n");
	}

#ifdef TIVO_SUPPORT
	if( GETFLAG(TIVO_MASK) )
	{
		DPRINTF(E_WARN, L_GENERAL, "TiVo support is enabled.\n");
		/* Add TiVo-specific randomize function to sqlite */
		if( sqlite3_create_function(db, "tivorandom", 1, SQLITE_UTF8, NULL, &TiVoRandomSeedFunc, NULL, NULL) != SQLITE_OK )
		{
			DPRINTF(E_ERROR, L_TIVO, "ERROR: Failed to add sqlite randomize function for TiVo!\n");
		}
		/* open socket for sending Tivo notifications */
		sbeacon = OpenAndConfTivoBeaconSocket();
		if(sbeacon < 0)
		{
			DPRINTF(E_FATAL, L_GENERAL, "Failed to open sockets for sending Tivo beacon notify "
		                "messages. EXITING\n");
		}
		tivo_bcast.sin_family = AF_INET;
		tivo_bcast.sin_addr.s_addr = htonl(getBcastAddress());
		tivo_bcast.sin_port = htons(2190);
	}
	else
	{
		sbeacon = -1;
	}
#endif

	SendSSDPGoodbye(snotify, n_lan_addr);

	/* main loop */
	while(!quitting)
	{
		/* Check if we need to send SSDP NOTIFY messages and do it if
		 * needed */
		if(gettimeofday(&timeofday, 0) < 0)
		{
			DPRINTF(E_ERROR, L_GENERAL, "gettimeofday(): %s\n", strerror(errno));
			timeout.tv_sec = runtime_vars.notify_interval;
			timeout.tv_usec = 0;
		}
		else
		{
			/* the comparaison is not very precise but who cares ? */
			if(timeofday.tv_sec >= (lastnotifytime.tv_sec + runtime_vars.notify_interval))
			{
				SendSSDPNotifies2(snotify,
			                  (unsigned short)runtime_vars.port,
			                  (runtime_vars.notify_interval << 1)+10);
				memcpy(&lastnotifytime, &timeofday, sizeof(struct timeval));
				timeout.tv_sec = runtime_vars.notify_interval;
				timeout.tv_usec = 0;
			}
			else
			{
				timeout.tv_sec = lastnotifytime.tv_sec + runtime_vars.notify_interval
				                 - timeofday.tv_sec;
				if(timeofday.tv_usec > lastnotifytime.tv_usec)
				{
					timeout.tv_usec = 1000000 + lastnotifytime.tv_usec
					                  - timeofday.tv_usec;
					timeout.tv_sec--;
				}
				else
				{
					timeout.tv_usec = lastnotifytime.tv_usec - timeofday.tv_usec;
				}
			}
#ifdef TIVO_SUPPORT
			if( GETFLAG(TIVO_MASK) )
			{
				if(timeofday.tv_sec >= (lastbeacontime.tv_sec + beacon_interval))
				{
   					sendBeaconMessage(sbeacon, &tivo_bcast, sizeof(struct sockaddr_in), 1);
					memcpy(&lastbeacontime, &timeofday, sizeof(struct timeval));
					if( timeout.tv_sec > beacon_interval )
					{
						timeout.tv_sec = beacon_interval;
						timeout.tv_usec = 0;
					}
					/* Beacons should be sent every 5 seconds or so for the first minute,
					 * then every minute or so thereafter. */
					if( beacon_interval == 5 && (timeofday.tv_sec - startup_time) > 60 )
					{
						beacon_interval = 60;
					}
				}
				else if( timeout.tv_sec > (lastbeacontime.tv_sec + beacon_interval + 1 - timeofday.tv_sec) )
				{
					timeout.tv_sec = lastbeacontime.tv_sec + beacon_interval - timeofday.tv_sec;
				}
			}
#endif
		}

		if( scanning )
		{
			if( !scanner_pid || kill(scanner_pid, 0) )
				scanning = 0;
		}

		/* select open sockets (SSDP, HTTP listen, and all HTTP soap sockets) */
		FD_ZERO(&readset);

		if (sudp >= 0) 
		{
			FD_SET(sudp, &readset);
			max_fd = MAX( max_fd, sudp);
		}
		
		if (shttpl >= 0) 
		{
			FD_SET(shttpl, &readset);
			max_fd = MAX( max_fd, shttpl);
		}

		i = 0;	/* active HTTP connections count */
		for(e = upnphttphead.lh_first; e != NULL; e = e->entries.le_next)
		{
			if((e->socket >= 0) && (e->state <= 2))
			{
				FD_SET(e->socket, &readset);
				max_fd = MAX( max_fd, e->socket);
				i++;
			}
		}
		/* for debug */
#ifdef DEBUG
		if(i > 1)
		{
			DPRINTF(E_DEBUG, L_GENERAL, "%d active incoming HTTP connections\n", i);
		}
#endif

		FD_ZERO(&writeset);
		upnpevents_selectfds(&readset, &writeset, &max_fd);

		if(select(max_fd+1, &readset, &writeset, 0, &timeout) < 0)
		{
			if(quitting) goto shutdown;
			DPRINTF(E_ERROR, L_GENERAL, "select(all): %s\n", strerror(errno));
			DPRINTF(E_FATAL, L_GENERAL, "Failed to select open sockets. EXITING\n");
		}
		upnpevents_processfds(&readset, &writeset);
		/* process SSDP packets */
		if(sudp >= 0 && FD_ISSET(sudp, &readset))
		{
			/*DPRINTF(E_DEBUG, L_GENERAL, "Received UDP Packet\n");*/
			ProcessSSDPRequest(sudp, (unsigned short)runtime_vars.port);
		}
		/* increment SystemUpdateID if the content database has changed,
		 * and if there is an active HTTP connection, at most once every 2 seconds */
		if( i && (time(NULL) >= (lastupdatetime.tv_sec + 2)) )
		{
			if( sqlite3_total_changes(db) != last_changecnt )
			{
				updateID++;
				last_changecnt = sqlite3_total_changes(db);
				upnp_event_var_change_notify(EContentDirectory);
				memcpy(&lastupdatetime, &timeofday, sizeof(struct timeval));
			}
		}
		/* process active HTTP connections */
		for(e = upnphttphead.lh_first; e != NULL; e = e->entries.le_next)
		{
			if(  (e->socket >= 0) && (e->state <= 2)
				&&(FD_ISSET(e->socket, &readset)) )
			{
				Process_upnphttp(e);
			}
		}
		/* process incoming HTTP connections */
		if(shttpl >= 0 && FD_ISSET(shttpl, &readset))
		{
			int shttp;
			socklen_t clientnamelen;
			struct sockaddr_in clientname;
			clientnamelen = sizeof(struct sockaddr_in);
			shttp = accept(shttpl, (struct sockaddr *)&clientname, &clientnamelen);
			if(shttp<0)
			{
				DPRINTF(E_ERROR, L_GENERAL, "accept(http): %s\n", strerror(errno));
			}
			else
			{
				struct upnphttp * tmp = 0;
				DPRINTF(E_DEBUG, L_GENERAL, "HTTP connection from %s:%d\n",
					inet_ntoa(clientname.sin_addr),
					ntohs(clientname.sin_port) );
				/*if (fcntl(shttp, F_SETFL, O_NONBLOCK) < 0) {
					DPRINTF(E_ERROR, L_GENERAL, "fcntl F_SETFL, O_NONBLOCK");
				}*/
				/* Create a new upnphttp object and add it to
				 * the active upnphttp object list */
				tmp = New_upnphttp(shttp);
				if(tmp)
				{
					tmp->clientaddr = clientname.sin_addr;
					LIST_INSERT_HEAD(&upnphttphead, tmp, entries);
				}
				else
				{
					DPRINTF(E_ERROR, L_GENERAL, "New_upnphttp() failed\n");
					close(shttp);
				}
			}
		}
		/* delete finished HTTP connections */
		for(e = upnphttphead.lh_first; e != NULL; )
		{
			next = e->entries.le_next;
			if(e->state >= 100)
			{
				LIST_REMOVE(e, entries);
				Delete_upnphttp(e);
			}
			e = next;
		}
	}

shutdown:
	/* kill the scanner */
	if( scanning && scanner_pid )
	{
		kill(scanner_pid, 9);
	}
	/* close out open sockets */
	while(upnphttphead.lh_first != NULL)
	{
		e = upnphttphead.lh_first;
		LIST_REMOVE(e, entries);
		Delete_upnphttp(e);
	}

	if (sudp >= 0) close(sudp);
	if (shttpl >= 0) close(shttpl);
	#ifdef TIVO_SUPPORT
	if (sbeacon >= 0) close(sbeacon);
	#endif
	
	if(SendSSDPGoodbye(snotify, n_lan_addr) < 0)
	{
		DPRINTF(E_ERROR, L_GENERAL, "Failed to broadcast good-bye notifications\n");
	}
	for(i=0; i<n_lan_addr; i++)
		close(snotify[i]);

	if( inotify_thread )
		pthread_join(inotify_thread, NULL);

	sql_exec(db, "UPDATE SETTINGS set UPDATE_ID = %u", updateID);
	sqlite3_close(db);

	struct media_dir_s * media_path = media_dirs;
	struct media_dir_s * last_path;
	while( media_path )
	{
		free(media_path->path);
		last_path = media_path;
		media_path = media_path->next;
		free(last_path);
	}
	struct album_art_name_s * art_names = album_art_names;
	struct album_art_name_s * last_name;
	while( art_names )
	{
		free(art_names->name);
		last_name = art_names;
		art_names = art_names->next;
		free(last_name);
	}

	if(unlink(pidfilename) < 0)
	{
		DPRINTF(E_ERROR, L_GENERAL, "Failed to remove pidfile %s: %s\n", pidfilename, strerror(errno));
	}

	freeoptions();

	exit(EXIT_SUCCESS);
}
コード例 #19
0
ファイル: eval.c プロジェクト: oswjk/lispish
struct atom *eval(struct atom *expr, struct env *env)
{
    // symbols and not-a-lists are evaluated or returned directly

    if (IS_SYM(expr))
    {
        struct atom *atom = env_lookup(env, expr->str.str);

        if (atom)
        {
            return atom;
        }
        else
        {
            printf("error: undefined variable: %s\n",
                expr->str.str);
            return &nil_atom;
        }
    }

    if (!IS_LIST(expr))
        return expr;

    struct list *list = expr->list;
    struct atom *op = LIST_FIRST(list);

    // Check if the first elem is not a symbol or a closure. If it's
    // not, then we'll evaluate it (it could be a lambda form).

    if (!IS_SYM(op) && !IS_CLOSURE(op))
    {
        struct atom *evaluated_op = eval(op, env);
        // Replace the evaluated one to the list!
        LIST_REMOVE(op, entries);
        LIST_INSERT_HEAD(list, evaluated_op, entries);
        op = evaluated_op;
    }

    // If the first elem is a symbol, it should be a name for a builtin
    // function or a closure bound to that name by the user. If the
    // first argument is directly a closure, eval that with the args.

    if (IS_SYM(op))
    {
        struct builtin_function_def *def = builtin_function_defs;
        while (def->name && def->fn)
        {
            if (strcmp(op->str.str, def->name) == 0)
            {
                return def->fn(expr, env);
            }

            ++def;
        }

        struct atom *closure = env_lookup(env, op->str.str);

        if (closure)
        {
            return eval_closure(closure, CDR(op), env);
        }

        printf("error: unknown function %s\n", op->str.str);
    }
    else if (IS_CLOSURE(op))
    {
        return eval_closure(op, CDR(op), env);
    }

    printf("error: cannot evaluate\n");

    return &nil_atom;
}
コード例 #20
0
ファイル: sys_sig.c プロジェクト: ryo/netbsd-src
int
sigtimedwait1(struct lwp *l, const struct sys_____sigtimedwait50_args *uap,
    register_t *retval, copyin_t fetchss, copyout_t storeinf, copyin_t fetchts,
    copyout_t storets)
{
	/* {
		syscallarg(const sigset_t *) set;
		syscallarg(siginfo_t *) info;
		syscallarg(struct timespec *) timeout;
	} */
	struct proc *p = l->l_proc;
	int error, signum, timo;
	struct timespec ts, tsstart, tsnow;
	ksiginfo_t ksi;

	/*
	 * Calculate timeout, if it was specified.
	 *
	 * NULL pointer means an infinite timeout.
	 * {.tv_sec = 0, .tv_nsec = 0} means do not block.
	 */
	if (SCARG(uap, timeout)) {
		error = (*fetchts)(SCARG(uap, timeout), &ts, sizeof(ts));
		if (error)
			return error;

		if ((error = itimespecfix(&ts)) != 0)
			return error;

		timo = tstohz(&ts);
		if (timo == 0) {
			if (ts.tv_sec == 0 && ts.tv_nsec == 0)
				timo = -1; /* do not block */
			else
				timo = 1; /* the shortest possible timeout */
		}

		/*
		 * Remember current uptime, it would be used in
		 * ECANCELED/ERESTART case.
		 */
		getnanouptime(&tsstart);
	} else {
		memset(&tsstart, 0, sizeof(tsstart)); /* XXXgcc */
		timo = 0; /* infinite timeout */
	}

	error = (*fetchss)(SCARG(uap, set), &l->l_sigwaitset,
	    sizeof(l->l_sigwaitset));
	if (error)
		return error;

	/*
	 * Silently ignore SA_CANTMASK signals. psignal1() would ignore
	 * SA_CANTMASK signals in waitset, we do this only for the below
	 * siglist check.
	 */
	sigminusset(&sigcantmask, &l->l_sigwaitset);

	mutex_enter(p->p_lock);

	/* Check for pending signals in the process, if no - then in LWP. */
	if ((signum = sigget(&p->p_sigpend, &ksi, 0, &l->l_sigwaitset)) == 0)
		signum = sigget(&l->l_sigpend, &ksi, 0, &l->l_sigwaitset);

	if (signum != 0) {
		/* If found a pending signal, just copy it out to the user. */
		mutex_exit(p->p_lock);
		goto out;
	}

	if (timo < 0) {
		/* If not allowed to block, return an error */
		mutex_exit(p->p_lock);
		return EAGAIN;
	}

	/*
	 * Set up the sigwait list and wait for signal to arrive.
	 * We can either be woken up or time out.
	 */
	l->l_sigwaited = &ksi;
	LIST_INSERT_HEAD(&p->p_sigwaiters, l, l_sigwaiter);
	error = cv_timedwait_sig(&l->l_sigcv, p->p_lock, timo);

	/*
	 * Need to find out if we woke as a result of _lwp_wakeup() or a
	 * signal outside our wait set.
	 */
	if (l->l_sigwaited != NULL) {
		if (error == EINTR) {
			/* Wakeup via _lwp_wakeup(). */
			error = ECANCELED;
		} else if (!error) {
			/* Spurious wakeup - arrange for syscall restart. */
			error = ERESTART;
		}
		l->l_sigwaited = NULL;
		LIST_REMOVE(l, l_sigwaiter);
	}
	mutex_exit(p->p_lock);

	/*
	 * If the sleep was interrupted (either by signal or wakeup), update
	 * the timeout and copyout new value back.  It would be used when
	 * the syscall would be restarted or called again.
	 */
	if (timo && (error == ERESTART || error == ECANCELED)) {
		getnanouptime(&tsnow);

		/* Compute how much time has passed since start. */
		timespecsub(&tsnow, &tsstart, &tsnow);

		/* Substract passed time from timeout. */
		timespecsub(&ts, &tsnow, &ts);

		if (ts.tv_sec < 0)
			error = EAGAIN;
		else {
			/* Copy updated timeout to userland. */
			error = (*storets)(&ts, SCARG(uap, timeout),
			    sizeof(ts));
		}
	}
out:
	/*
	 * If a signal from the wait set arrived, copy it to userland.
	 * Copy only the used part of siginfo, the padding part is
	 * left unchanged (userland is not supposed to touch it anyway).
	 */
	if (error == 0 && SCARG(uap, info)) {
		error = (*storeinf)(&ksi.ksi_info, SCARG(uap, info),
		    sizeof(ksi.ksi_info));
	}
	if (error == 0) {
		*retval = ksi.ksi_info._signo;
		SDT_PROBE(proc, kernel, , signal__clear, *retval,
		    &ksi, 0, 0, 0);
	}
コード例 #21
0
ファイル: ufs_extattr.c プロジェクト: AhmadTux/freebsd
/*
 * Enable a named attribute on the specified filesystem; provide an
 * unlocked backing vnode to hold the attribute data.
 */
static int
ufs_extattr_enable(struct ufsmount *ump, int attrnamespace,
    const char *attrname, struct vnode *backing_vnode, struct thread *td)
{
	struct ufs_extattr_list_entry *attribute;
	struct iovec aiov;
	struct uio auio;
	int error = 0;

	if (!ufs_extattr_valid_attrname(attrnamespace, attrname))
		return (EINVAL);
	if (backing_vnode->v_type != VREG)
		return (EINVAL);

	attribute = malloc(sizeof(struct ufs_extattr_list_entry),
	    M_UFS_EXTATTR, M_WAITOK);
	if (attribute == NULL)
		return (ENOMEM);

	if (!(ump->um_extattr.uepm_flags & UFS_EXTATTR_UEPM_STARTED)) {
		error = EOPNOTSUPP;
		goto free_exit;
	}

	if (ufs_extattr_find_attr(ump, attrnamespace, attrname)) {
		error = EEXIST;
		goto free_exit;
	}

	strncpy(attribute->uele_attrname, attrname,
	    UFS_EXTATTR_MAXEXTATTRNAME);
	attribute->uele_attrnamespace = attrnamespace;
	bzero(&attribute->uele_fileheader,
	    sizeof(struct ufs_extattr_fileheader));
	
	attribute->uele_backing_vnode = backing_vnode;

	auio.uio_iov = &aiov;
	auio.uio_iovcnt = 1;
	aiov.iov_base = (caddr_t) &attribute->uele_fileheader;
	aiov.iov_len = sizeof(struct ufs_extattr_fileheader);
	auio.uio_resid = sizeof(struct ufs_extattr_fileheader);
	auio.uio_offset = (off_t) 0;
	auio.uio_segflg = UIO_SYSSPACE;
	auio.uio_rw = UIO_READ;
	auio.uio_td = td;

	vn_lock(backing_vnode, LK_SHARED | LK_RETRY);
	error = VOP_READ(backing_vnode, &auio, IO_NODELOCKED,
	    ump->um_extattr.uepm_ucred);

	if (error)
		goto unlock_free_exit;

	if (auio.uio_resid != 0) {
		printf("ufs_extattr_enable: malformed attribute header\n");
		error = EINVAL;
		goto unlock_free_exit;
	}

	if (attribute->uele_fileheader.uef_magic != UFS_EXTATTR_MAGIC) {
		printf("ufs_extattr_enable: invalid attribute header magic\n");
		error = EINVAL;
		goto unlock_free_exit;
	}

	if (attribute->uele_fileheader.uef_version != UFS_EXTATTR_VERSION) {
		printf("ufs_extattr_enable: incorrect attribute header "
		    "version\n");
		error = EINVAL;
		goto unlock_free_exit;
	}

	ASSERT_VOP_LOCKED(backing_vnode, "ufs_extattr_enable");
	LIST_INSERT_HEAD(&ump->um_extattr.uepm_list, attribute,
	    uele_entries);

	VOP_UNLOCK(backing_vnode, 0);
	return (0);

unlock_free_exit:
	VOP_UNLOCK(backing_vnode, 0);

free_exit:
	free(attribute, M_UFS_EXTATTR);
	return (error);
}
コード例 #22
0
ファイル: ufs_quota.c プロジェクト: jamesbjackson/src
/*
 * Obtain a dquot structure for the specified identifier and quota file
 * reading the information from the file if necessary.
 */
static int
dqget(struct vnode *vp, u_long id, struct ufsmount *ump, int type,
    struct dquot **dqp)
{
	uint8_t buf[sizeof(struct dqblk64)];
	off_t base, recsize;
	struct dquot *dq, *dq1;
	struct dqhash *dqh;
	struct vnode *dqvp;
	struct iovec aiov;
	struct uio auio;
	int dqvplocked, error;

#ifdef DEBUG_VFS_LOCKS
	if (vp != NULLVP)
		ASSERT_VOP_ELOCKED(vp, "dqget");
#endif

	if (vp != NULLVP && *dqp != NODQUOT) {
		return (0);
	}

	/* XXX: Disallow negative id values to prevent the
	* creation of 100GB+ quota data files.
	*/
	if ((int)id < 0)
		return (EINVAL);

	UFS_LOCK(ump);
	dqvp = ump->um_quotas[type];
	if (dqvp == NULLVP || (ump->um_qflags[type] & QTF_CLOSING)) {
		*dqp = NODQUOT;
		UFS_UNLOCK(ump);
		return (EINVAL);
	}
	vref(dqvp);
	UFS_UNLOCK(ump);
	error = 0;
	dqvplocked = 0;

	/*
	 * Check the cache first.
	 */
	dqh = DQHASH(dqvp, id);
	DQH_LOCK();
	dq = dqhashfind(dqh, id, dqvp);
	if (dq != NULL) {
		DQH_UNLOCK();
hfound:		DQI_LOCK(dq);
		DQI_WAIT(dq, PINOD+1, "dqget");
		DQI_UNLOCK(dq);
		if (dq->dq_ump == NULL) {
			dqrele(vp, dq);
			dq = NODQUOT;
			error = EIO;
		}
		*dqp = dq;
		if (dqvplocked)
			vput(dqvp);
		else
			vrele(dqvp);
		return (error);
	}

	/*
	 * Quota vnode lock is before DQ_LOCK. Acquire dqvp lock there
	 * since new dq will appear on the hash chain DQ_LOCKed.
	 */
	if (vp != dqvp) {
		DQH_UNLOCK();
		vn_lock(dqvp, LK_SHARED | LK_RETRY);
		dqvplocked = 1;
		DQH_LOCK();
		/*
		 * Recheck the cache after sleep for quota vnode lock.
		 */
		dq = dqhashfind(dqh, id, dqvp);
		if (dq != NULL) {
			DQH_UNLOCK();
			goto hfound;
		}
	}

	/*
	 * Not in cache, allocate a new one or take it from the
	 * free list.
	 */
	if (TAILQ_FIRST(&dqfreelist) == NODQUOT &&
	    numdquot < MAXQUOTAS * desiredvnodes)
		desireddquot += DQUOTINC;
	if (numdquot < desireddquot) {
		numdquot++;
		DQH_UNLOCK();
		dq1 = malloc(sizeof *dq1, M_DQUOT, M_WAITOK | M_ZERO);
		mtx_init(&dq1->dq_lock, "dqlock", NULL, MTX_DEF);
		DQH_LOCK();
		/*
		 * Recheck the cache after sleep for memory.
		 */
		dq = dqhashfind(dqh, id, dqvp);
		if (dq != NULL) {
			numdquot--;
			DQH_UNLOCK();
			mtx_destroy(&dq1->dq_lock);
			free(dq1, M_DQUOT);
			goto hfound;
		}
		dq = dq1;
	} else {
		if ((dq = TAILQ_FIRST(&dqfreelist)) == NULL) {
			DQH_UNLOCK();
			tablefull("dquot");
			*dqp = NODQUOT;
			if (dqvplocked)
				vput(dqvp);
			else
				vrele(dqvp);
			return (EUSERS);
		}
		if (dq->dq_cnt || (dq->dq_flags & DQ_MOD))
			panic("dqget: free dquot isn't %p", dq);
		TAILQ_REMOVE(&dqfreelist, dq, dq_freelist);
		if (dq->dq_ump != NULL)
			LIST_REMOVE(dq, dq_hash);
	}

	/*
	 * Dq is put into hash already locked to prevent parallel
	 * usage while it is being read from file.
	 */
	dq->dq_flags = DQ_LOCK;
	dq->dq_id = id;
	dq->dq_type = type;
	dq->dq_ump = ump;
	LIST_INSERT_HEAD(dqh, dq, dq_hash);
	DQREF(dq);
	DQH_UNLOCK();

	/*
	 * Read the requested quota record from the quota file, performing
	 * any necessary conversions.
	 */
	if (ump->um_qflags[type] & QTF_64BIT) {
		recsize = sizeof(struct dqblk64);
		base = sizeof(struct dqhdr64);
	} else {
		recsize = sizeof(struct dqblk32);
		base = 0;
	}
	auio.uio_iov = &aiov;
	auio.uio_iovcnt = 1;
	aiov.iov_base = buf;
	aiov.iov_len = recsize;
	auio.uio_resid = recsize;
	auio.uio_offset = base + id * recsize;
	auio.uio_segflg = UIO_SYSSPACE;
	auio.uio_rw = UIO_READ;
	auio.uio_td = (struct thread *)0;

	error = VOP_READ(dqvp, &auio, 0, ump->um_cred[type]);
	if (auio.uio_resid == recsize && error == 0) {
		bzero(&dq->dq_dqb, sizeof(dq->dq_dqb));
	} else {
		if (ump->um_qflags[type] & QTF_64BIT)
			dqb64_dq((struct dqblk64 *)buf, dq);
		else
			dqb32_dq((struct dqblk32 *)buf, dq);
	}
	if (dqvplocked)
		vput(dqvp);
	else
		vrele(dqvp);
	/*
	 * I/O error in reading quota file, release
	 * quota structure and reflect problem to caller.
	 */
	if (error) {
		DQH_LOCK();
		dq->dq_ump = NULL;
		LIST_REMOVE(dq, dq_hash);
		DQH_UNLOCK();
		DQI_LOCK(dq);
		if (dq->dq_flags & DQ_WANT)
			wakeup(dq);
		dq->dq_flags = 0;
		DQI_UNLOCK(dq);
		dqrele(vp, dq);
		*dqp = NODQUOT;
		return (error);
	}
	DQI_LOCK(dq);
	/*
	 * Check for no limit to enforce.
	 * Initialize time values if necessary.
	 */
	if (dq->dq_isoftlimit == 0 && dq->dq_bsoftlimit == 0 &&
	    dq->dq_ihardlimit == 0 && dq->dq_bhardlimit == 0)
		dq->dq_flags |= DQ_FAKE;
	if (dq->dq_id != 0) {
		if (dq->dq_btime == 0) {
			dq->dq_btime = time_second + ump->um_btime[type];
			if (dq->dq_bsoftlimit &&
			    dq->dq_curblocks >= dq->dq_bsoftlimit)
				dq->dq_flags |= DQ_MOD;
		}
		if (dq->dq_itime == 0) {
			dq->dq_itime = time_second + ump->um_itime[type];
			if (dq->dq_isoftlimit &&
			    dq->dq_curinodes >= dq->dq_isoftlimit)
				dq->dq_flags |= DQ_MOD;
		}
	}
	DQI_WAKEUP(dq);
	DQI_UNLOCK(dq);
	*dqp = dq;
	return (0);
}
コード例 #23
0
int
vcons_init_screen(struct vcons_data *vd, struct vcons_screen *scr,
    int existing, long *defattr)
{
	struct rasops_info *ri = &scr->scr_ri;
	int cnt, i;
#ifdef VCONS_DRAW_INTR
	int size;
#endif

	scr->scr_cookie = vd->cookie;
	scr->scr_vd = scr->scr_origvd = vd;
	scr->scr_busy = 0;
	
	/*
	 * call the driver-supplied init_screen function which is expected
	 * to set up rasops_info, override cursor() and probably others
	 */
	vd->init_screen(vd->cookie, scr, existing, defattr);

	/* 
	 * save the non virtual console aware rasops and replace them with 
	 * our wrappers
	 */
	vd->eraserows = ri->ri_ops.eraserows;
	vd->erasecols = ri->ri_ops.erasecols;
	vd->putchar   = ri->ri_ops.putchar;
	vd->cursor    = ri->ri_ops.cursor;

	if (scr->scr_flags & VCONS_NO_COPYCOLS) {
		vd->copycols  = vcons_copycols_noread;
	} else {
		vd->copycols = ri->ri_ops.copycols;
	}

	if (scr->scr_flags & VCONS_NO_COPYROWS) {
		vd->copyrows  = vcons_copyrows_noread;
	} else {
		vd->copyrows = ri->ri_ops.copyrows;
	}

	ri->ri_ops.eraserows = vcons_eraserows;	
	ri->ri_ops.erasecols = vcons_erasecols;	
	ri->ri_ops.putchar   = vcons_putchar;
	ri->ri_ops.cursor    = vcons_cursor;
	ri->ri_ops.copycols  = vcons_copycols;
	ri->ri_ops.copyrows  = vcons_copyrows;


	ri->ri_hw = scr;

	/* 
	 * we allocate both chars and attributes in one chunk, attributes first 
	 * because they have the (potentially) bigger alignment 
	 */
#ifdef WSDISPLAY_SCROLLSUPPORT
	cnt = (ri->ri_rows + WSDISPLAY_SCROLLBACK_LINES) * ri->ri_cols;
	scr->scr_lines_in_buffer = WSDISPLAY_SCROLLBACK_LINES;
	scr->scr_current_line = 0;
	scr->scr_line_wanted = 0;
	scr->scr_offset_to_zero = ri->ri_cols * WSDISPLAY_SCROLLBACK_LINES;
	scr->scr_current_offset = scr->scr_offset_to_zero;
#else
	cnt = ri->ri_rows * ri->ri_cols;
#endif
	scr->scr_attrs = (long *)malloc(cnt * (sizeof(long) + 
	    sizeof(uint32_t)), M_DEVBUF, M_WAITOK);
	if (scr->scr_attrs == NULL)
		return ENOMEM;

	scr->scr_chars = (uint32_t *)&scr->scr_attrs[cnt];

	ri->ri_ops.allocattr(ri, WS_DEFAULT_FG, WS_DEFAULT_BG, 0, defattr);
	scr->scr_defattr = *defattr;

	/* 
	 * fill the attribute buffer with *defattr, chars with 0x20 
	 * since we don't know if the driver tries to mimic firmware output or
	 * reset everything we do nothing to VRAM here, any driver that feels
	 * the need to clear screen or something will have to do it on its own
	 * Additional screens will start out in the background anyway so
	 * cleaning or not only really affects the initial console screen
	 */
	for (i = 0; i < cnt; i++) {
		scr->scr_attrs[i] = *defattr;
		scr->scr_chars[i] = 0x20;
	}

#ifdef VCONS_DRAW_INTR
	size = ri->ri_cols * ri->ri_rows;
	if (size > vd->cells) {
		if (vd->chars != NULL) free(vd->chars, M_DEVBUF);
		if (vd->attrs != NULL) free(vd->attrs, M_DEVBUF);
		vd->cells = size;
		vd->chars = malloc(size * sizeof(uint32_t), M_DEVBUF,
		    M_WAITOK|M_ZERO);
		vd->attrs = malloc(size * sizeof(long), M_DEVBUF,
		    M_WAITOK|M_ZERO);
		vcons_invalidate_cache(vd);
	}
#endif

	if(vd->active == NULL) {
		vd->active = scr;
		SCREEN_VISIBLE(scr);
	}
	
	if (existing) {
		SCREEN_VISIBLE(scr);
		vd->active = scr;
	} else {
		SCREEN_INVISIBLE(scr);
	}
	
	LIST_INSERT_HEAD(&vd->screens, scr, next);
	return 0;
}
コード例 #24
0
ファイル: kern_fork.c プロジェクト: 2trill2spill/freebsd
static void
do_fork(struct thread *td, struct fork_req *fr, struct proc *p2, struct thread *td2,
    struct vmspace *vm2, struct file *fp_procdesc)
{
	struct proc *p1, *pptr;
	int trypid;
	struct filedesc *fd;
	struct filedesc_to_leader *fdtol;
	struct sigacts *newsigacts;

	sx_assert(&proctree_lock, SX_SLOCKED);
	sx_assert(&allproc_lock, SX_XLOCKED);

	p1 = td->td_proc;

	trypid = fork_findpid(fr->fr_flags);

	sx_sunlock(&proctree_lock);

	p2->p_state = PRS_NEW;		/* protect against others */
	p2->p_pid = trypid;
	AUDIT_ARG_PID(p2->p_pid);
	LIST_INSERT_HEAD(&allproc, p2, p_list);
	allproc_gen++;
	LIST_INSERT_HEAD(PIDHASH(p2->p_pid), p2, p_hash);
	tidhash_add(td2);
	PROC_LOCK(p2);
	PROC_LOCK(p1);

	sx_xunlock(&allproc_lock);

	bcopy(&p1->p_startcopy, &p2->p_startcopy,
	    __rangeof(struct proc, p_startcopy, p_endcopy));
	pargs_hold(p2->p_args);

	PROC_UNLOCK(p1);

	bzero(&p2->p_startzero,
	    __rangeof(struct proc, p_startzero, p_endzero));

	/* Tell the prison that we exist. */
	prison_proc_hold(p2->p_ucred->cr_prison);

	PROC_UNLOCK(p2);

	/*
	 * Malloc things while we don't hold any locks.
	 */
	if (fr->fr_flags & RFSIGSHARE)
		newsigacts = NULL;
	else
		newsigacts = sigacts_alloc();

	/*
	 * Copy filedesc.
	 */
	if (fr->fr_flags & RFCFDG) {
		fd = fdinit(p1->p_fd, false);
		fdtol = NULL;
	} else if (fr->fr_flags & RFFDG) {
		fd = fdcopy(p1->p_fd);
		fdtol = NULL;
	} else {
		fd = fdshare(p1->p_fd);
		if (p1->p_fdtol == NULL)
			p1->p_fdtol = filedesc_to_leader_alloc(NULL, NULL,
			    p1->p_leader);
		if ((fr->fr_flags & RFTHREAD) != 0) {
			/*
			 * Shared file descriptor table, and shared
			 * process leaders.
			 */
			fdtol = p1->p_fdtol;
			FILEDESC_XLOCK(p1->p_fd);
			fdtol->fdl_refcount++;
			FILEDESC_XUNLOCK(p1->p_fd);
		} else {
			/* 
			 * Shared file descriptor table, and different
			 * process leaders.
			 */
			fdtol = filedesc_to_leader_alloc(p1->p_fdtol,
			    p1->p_fd, p2);
		}
	}
	/*
	 * Make a proc table entry for the new process.
	 * Start by zeroing the section of proc that is zero-initialized,
	 * then copy the section that is copied directly from the parent.
	 */

	PROC_LOCK(p2);
	PROC_LOCK(p1);

	bzero(&td2->td_startzero,
	    __rangeof(struct thread, td_startzero, td_endzero));

	bcopy(&td->td_startcopy, &td2->td_startcopy,
	    __rangeof(struct thread, td_startcopy, td_endcopy));

	bcopy(&p2->p_comm, &td2->td_name, sizeof(td2->td_name));
	td2->td_sigstk = td->td_sigstk;
	td2->td_flags = TDF_INMEM;
	td2->td_lend_user_pri = PRI_MAX;

#ifdef VIMAGE
	td2->td_vnet = NULL;
	td2->td_vnet_lpush = NULL;
#endif

	/*
	 * Allow the scheduler to initialize the child.
	 */
	thread_lock(td);
	sched_fork(td, td2);
	thread_unlock(td);

	/*
	 * Duplicate sub-structures as needed.
	 * Increase reference counts on shared objects.
	 */
	p2->p_flag = P_INMEM;
	p2->p_flag2 = p1->p_flag2 & (P2_NOTRACE | P2_NOTRACE_EXEC | P2_TRAPCAP);
	p2->p_swtick = ticks;
	if (p1->p_flag & P_PROFIL)
		startprofclock(p2);

	/*
	 * Whilst the proc lock is held, copy the VM domain data out
	 * using the VM domain method.
	 */
	vm_domain_policy_init(&p2->p_vm_dom_policy);
	vm_domain_policy_localcopy(&p2->p_vm_dom_policy,
	    &p1->p_vm_dom_policy);

	if (fr->fr_flags & RFSIGSHARE) {
		p2->p_sigacts = sigacts_hold(p1->p_sigacts);
	} else {
		sigacts_copy(newsigacts, p1->p_sigacts);
		p2->p_sigacts = newsigacts;
	}

	if (fr->fr_flags & RFTSIGZMB)
	        p2->p_sigparent = RFTSIGNUM(fr->fr_flags);
	else if (fr->fr_flags & RFLINUXTHPN)
	        p2->p_sigparent = SIGUSR1;
	else
	        p2->p_sigparent = SIGCHLD;

	p2->p_textvp = p1->p_textvp;
	p2->p_fd = fd;
	p2->p_fdtol = fdtol;

	if (p1->p_flag2 & P2_INHERIT_PROTECTED) {
		p2->p_flag |= P_PROTECTED;
		p2->p_flag2 |= P2_INHERIT_PROTECTED;
	}

	/*
	 * p_limit is copy-on-write.  Bump its refcount.
	 */
	lim_fork(p1, p2);

	thread_cow_get_proc(td2, p2);

	pstats_fork(p1->p_stats, p2->p_stats);

	PROC_UNLOCK(p1);
	PROC_UNLOCK(p2);

	/* Bump references to the text vnode (for procfs). */
	if (p2->p_textvp)
		vrefact(p2->p_textvp);

	/*
	 * Set up linkage for kernel based threading.
	 */
	if ((fr->fr_flags & RFTHREAD) != 0) {
		mtx_lock(&ppeers_lock);
		p2->p_peers = p1->p_peers;
		p1->p_peers = p2;
		p2->p_leader = p1->p_leader;
		mtx_unlock(&ppeers_lock);
		PROC_LOCK(p1->p_leader);
		if ((p1->p_leader->p_flag & P_WEXIT) != 0) {
			PROC_UNLOCK(p1->p_leader);
			/*
			 * The task leader is exiting, so process p1 is
			 * going to be killed shortly.  Since p1 obviously
			 * isn't dead yet, we know that the leader is either
			 * sending SIGKILL's to all the processes in this
			 * task or is sleeping waiting for all the peers to
			 * exit.  We let p1 complete the fork, but we need
			 * to go ahead and kill the new process p2 since
			 * the task leader may not get a chance to send
			 * SIGKILL to it.  We leave it on the list so that
			 * the task leader will wait for this new process
			 * to commit suicide.
			 */
			PROC_LOCK(p2);
			kern_psignal(p2, SIGKILL);
			PROC_UNLOCK(p2);
		} else
			PROC_UNLOCK(p1->p_leader);
	} else {
		p2->p_peers = NULL;
		p2->p_leader = p2;
	}

	sx_xlock(&proctree_lock);
	PGRP_LOCK(p1->p_pgrp);
	PROC_LOCK(p2);
	PROC_LOCK(p1);

	/*
	 * Preserve some more flags in subprocess.  P_PROFIL has already
	 * been preserved.
	 */
	p2->p_flag |= p1->p_flag & P_SUGID;
	td2->td_pflags |= (td->td_pflags & TDP_ALTSTACK) | TDP_FORKING;
	SESS_LOCK(p1->p_session);
	if (p1->p_session->s_ttyvp != NULL && p1->p_flag & P_CONTROLT)
		p2->p_flag |= P_CONTROLT;
	SESS_UNLOCK(p1->p_session);
	if (fr->fr_flags & RFPPWAIT)
		p2->p_flag |= P_PPWAIT;

	p2->p_pgrp = p1->p_pgrp;
	LIST_INSERT_AFTER(p1, p2, p_pglist);
	PGRP_UNLOCK(p1->p_pgrp);
	LIST_INIT(&p2->p_children);
	LIST_INIT(&p2->p_orphans);

	callout_init_mtx(&p2->p_itcallout, &p2->p_mtx, 0);

	/*
	 * If PF_FORK is set, the child process inherits the
	 * procfs ioctl flags from its parent.
	 */
	if (p1->p_pfsflags & PF_FORK) {
		p2->p_stops = p1->p_stops;
		p2->p_pfsflags = p1->p_pfsflags;
	}

	/*
	 * This begins the section where we must prevent the parent
	 * from being swapped.
	 */
	_PHOLD(p1);
	PROC_UNLOCK(p1);

	/*
	 * Attach the new process to its parent.
	 *
	 * If RFNOWAIT is set, the newly created process becomes a child
	 * of init.  This effectively disassociates the child from the
	 * parent.
	 */
	if ((fr->fr_flags & RFNOWAIT) != 0) {
		pptr = p1->p_reaper;
		p2->p_reaper = pptr;
	} else {
		p2->p_reaper = (p1->p_treeflag & P_TREE_REAPER) != 0 ?
		    p1 : p1->p_reaper;
		pptr = p1;
	}
	p2->p_pptr = pptr;
	LIST_INSERT_HEAD(&pptr->p_children, p2, p_sibling);
	LIST_INIT(&p2->p_reaplist);
	LIST_INSERT_HEAD(&p2->p_reaper->p_reaplist, p2, p_reapsibling);
	if (p2->p_reaper == p1)
		p2->p_reapsubtree = p2->p_pid;
	sx_xunlock(&proctree_lock);

	/* Inform accounting that we have forked. */
	p2->p_acflag = AFORK;
	PROC_UNLOCK(p2);

#ifdef KTRACE
	ktrprocfork(p1, p2);
#endif

	/*
	 * Finish creating the child process.  It will return via a different
	 * execution path later.  (ie: directly into user mode)
	 */
	vm_forkproc(td, p2, td2, vm2, fr->fr_flags);

	if (fr->fr_flags == (RFFDG | RFPROC)) {
		VM_CNT_INC(v_forks);
		VM_CNT_ADD(v_forkpages, p2->p_vmspace->vm_dsize +
		    p2->p_vmspace->vm_ssize);
	} else if (fr->fr_flags == (RFFDG | RFPROC | RFPPWAIT | RFMEM)) {
		VM_CNT_INC(v_vforks);
		VM_CNT_ADD(v_vforkpages, p2->p_vmspace->vm_dsize +
		    p2->p_vmspace->vm_ssize);
	} else if (p1 == &proc0) {
		VM_CNT_INC(v_kthreads);
		VM_CNT_ADD(v_kthreadpages, p2->p_vmspace->vm_dsize +
		    p2->p_vmspace->vm_ssize);
	} else {
		VM_CNT_INC(v_rforks);
		VM_CNT_ADD(v_rforkpages, p2->p_vmspace->vm_dsize +
		    p2->p_vmspace->vm_ssize);
	}

	/*
	 * Associate the process descriptor with the process before anything
	 * can happen that might cause that process to need the descriptor.
	 * However, don't do this until after fork(2) can no longer fail.
	 */
	if (fr->fr_flags & RFPROCDESC)
		procdesc_new(p2, fr->fr_pd_flags);

	/*
	 * Both processes are set up, now check if any loadable modules want
	 * to adjust anything.
	 */
	EVENTHANDLER_DIRECT_INVOKE(process_fork, p1, p2, fr->fr_flags);

	/*
	 * Set the child start time and mark the process as being complete.
	 */
	PROC_LOCK(p2);
	PROC_LOCK(p1);
	microuptime(&p2->p_stats->p_start);
	PROC_SLOCK(p2);
	p2->p_state = PRS_NORMAL;
	PROC_SUNLOCK(p2);

#ifdef KDTRACE_HOOKS
	/*
	 * Tell the DTrace fasttrap provider about the new process so that any
	 * tracepoints inherited from the parent can be removed. We have to do
	 * this only after p_state is PRS_NORMAL since the fasttrap module will
	 * use pfind() later on.
	 */
	if ((fr->fr_flags & RFMEM) == 0 && dtrace_fasttrap_fork)
		dtrace_fasttrap_fork(p1, p2);
#endif
	/*
	 * Hold the process so that it cannot exit after we make it runnable,
	 * but before we wait for the debugger.
	 */
	_PHOLD(p2);
	if (p1->p_ptevents & PTRACE_FORK) {
		/*
		 * Arrange for debugger to receive the fork event.
		 *
		 * We can report PL_FLAG_FORKED regardless of
		 * P_FOLLOWFORK settings, but it does not make a sense
		 * for runaway child.
		 */
		td->td_dbgflags |= TDB_FORK;
		td->td_dbg_forked = p2->p_pid;
		td2->td_dbgflags |= TDB_STOPATFORK;
	}
	if (fr->fr_flags & RFPPWAIT) {
		td->td_pflags |= TDP_RFPPWAIT;
		td->td_rfppwait_p = p2;
		td->td_dbgflags |= TDB_VFORK;
	}
	PROC_UNLOCK(p2);

	/*
	 * Now can be swapped.
	 */
	_PRELE(p1);
	PROC_UNLOCK(p1);

	/*
	 * Tell any interested parties about the new process.
	 */
	knote_fork(p1->p_klist, p2->p_pid);
	SDT_PROBE3(proc, , , create, p2, p1, fr->fr_flags);

	if (fr->fr_flags & RFPROCDESC) {
		procdesc_finit(p2->p_procdesc, fp_procdesc);
		fdrop(fp_procdesc, td);
	}

	if ((fr->fr_flags & RFSTOPPED) == 0) {
		/*
		 * If RFSTOPPED not requested, make child runnable and
		 * add to run queue.
		 */
		thread_lock(td2);
		TD_SET_CAN_RUN(td2);
		sched_add(td2, SRQ_BORING);
		thread_unlock(td2);
		if (fr->fr_pidp != NULL)
			*fr->fr_pidp = p2->p_pid;
	} else {
		*fr->fr_procp = p2;
	}

	PROC_LOCK(p2);
	/*
	 * Wait until debugger is attached to child.
	 */
	while (td2->td_proc == p2 && (td2->td_dbgflags & TDB_STOPATFORK) != 0)
		cv_wait(&p2->p_dbgwait, &p2->p_mtx);
	_PRELE(p2);
	racct_proc_fork_done(p2);
	PROC_UNLOCK(p2);
}
コード例 #25
0
static int
e1000phy_attach(device_t dev)
{
	struct mii_softc *sc;
	struct mii_attach_args *ma;
	struct mii_data *mii;
	const char *sep = "";
	int fast_ether = 0;

	sc = device_get_softc(dev);
	ma = device_get_ivars(dev);
	mii_softc_init(sc, ma);
	sc->mii_dev = device_get_parent(dev);
	mii = device_get_softc(sc->mii_dev);
	LIST_INSERT_HEAD(&mii->mii_phys, sc, mii_list);

	sc->mii_inst = mii->mii_instance;
	sc->mii_service = e1000phy_service;
	sc->mii_reset = e1000phy_reset;
	sc->mii_anegticks = MII_ANEGTICKS_GIGE;
	sc->mii_pdata = mii;

	sc->mii_flags |= MIIF_NOISOLATE;

	switch (sc->mii_model) {
	case MII_MODEL_MARVELL_E1011:
	case MII_MODEL_MARVELL_E1112:
		if (PHY_READ(sc, E1000_ESSR) & E1000_ESSR_FIBER_LINK)
			sc->mii_flags |= MIIF_HAVEFIBER;
		break;
	case MII_MODEL_MARVELL_E3082:
		/* 88E3082 10/100 Fast Ethernet PHY. */
		sc->mii_anegticks = MII_ANEGTICKS;
		fast_ether = 1;
		break;
	}

	mii->mii_instance++;

	e1000phy_reset(sc);

#define	ADD(m, c)	ifmedia_add(&mii->mii_media, (m), (c), NULL)
#define PRINT(s)	kprintf("%s%s", sep, s); sep = ", "

	device_printf(dev, "%s", "");

	ADD(IFM_MAKEWORD(IFM_ETHER, IFM_NONE, 0, sc->mii_inst),
	    E1000_CR_ISOLATE);
	if ((sc->mii_flags & MIIF_HAVEFIBER) == 0) {
		if (!fast_ether) {
			/*
			 * 1000T-simplex not supported; driver must ignore
			 * this entry, but it must be present in order to
			 * manually set full-duplex.
			 */
			ADD(IFM_MAKEWORD(IFM_ETHER, IFM_1000_T, 0,
			    sc->mii_inst), E1000_CR_SPEED_1000);
			ADD(IFM_MAKEWORD(IFM_ETHER, IFM_1000_T, IFM_FDX,
			    sc->mii_inst),
			    E1000_CR_SPEED_1000 | E1000_CR_FULL_DUPLEX);
			PRINT("1000baseT-FDX");
		}
		ADD(IFM_MAKEWORD(IFM_ETHER, IFM_100_TX, IFM_FDX, sc->mii_inst),
		    E1000_CR_SPEED_100 | E1000_CR_FULL_DUPLEX);
		PRINT("100baseTX-FDX");
		ADD(IFM_MAKEWORD(IFM_ETHER, IFM_100_TX, 0, sc->mii_inst),
		    E1000_CR_SPEED_100);
		PRINT("100baseTX");
		ADD(IFM_MAKEWORD(IFM_ETHER, IFM_10_T, IFM_FDX, sc->mii_inst),
		    E1000_CR_SPEED_10 | E1000_CR_FULL_DUPLEX);
		PRINT("10baseTX-FDX");
		ADD(IFM_MAKEWORD(IFM_ETHER, IFM_10_T, 0, sc->mii_inst),
		    E1000_CR_SPEED_10);
		PRINT("10baseTX");
	} else {
		ADD(IFM_MAKEWORD(IFM_ETHER, IFM_1000_SX, IFM_FDX, sc->mii_inst),
		    E1000_CR_SPEED_1000 | E1000_CR_FULL_DUPLEX);
		PRINT("1000baseSX-FDX");
	}

	ADD(IFM_MAKEWORD(IFM_ETHER, IFM_AUTO, 0, sc->mii_inst), 0);
	PRINT("auto\n");

#undef ADD
#undef PRINT

	MIIBUS_MEDIAINIT(sc->mii_dev);
	return(0);
}
コード例 #26
0
void
ahd_execute_scb(void *arg, bus_dma_segment_t *dm_segs, int nsegments)
{
    struct	scb *scb;
    struct	scsi_xfer *xs;
    struct	ahd_softc *ahd;
    struct	ahd_initiator_tinfo *tinfo;
    struct	ahd_tmode_tstate *tstate;
    u_int	mask;
    int	s;

    scb = (struct scb *)arg;
    xs = scb->xs;
    xs->error = CAM_REQ_INPROG;
    xs->status = 0;
    ahd = (struct ahd_softc *)xs->sc_link->adapter_softc;

    if (nsegments != 0) {
        void *sg;
        int op;
        u_int i;

        ahd_setup_data_scb(ahd, scb);

        /* Copy the segments into our SG list */
        for (i = nsegments, sg = scb->sg_list; i > 0; i--) {

            sg = ahd_sg_setup(ahd, scb, sg, dm_segs->ds_addr,
                              dm_segs->ds_len,
                              /*last*/i == 1);
            dm_segs++;
        }

        if ((xs->flags & SCSI_DATA_IN) != 0)
            op = BUS_DMASYNC_PREREAD;
        else
            op = BUS_DMASYNC_PREWRITE;

        bus_dmamap_sync(ahd->parent_dmat, scb->dmamap, 0,
                        scb->dmamap->dm_mapsize, op);

    }

    ahd_lock(ahd, &s);

    /*
     * Last time we need to check if this SCB needs to
     * be aborted.
     */
    if (xs->flags & ITSDONE) {
        if (nsegments != 0)
            bus_dmamap_unload(ahd->parent_dmat,
                              scb->dmamap);
        ahd_free_scb(ahd, scb);
        ahd_unlock(ahd, &s);
        return;
    }

    tinfo = ahd_fetch_transinfo(ahd, SCSIID_CHANNEL(ahd, scb->hscb->scsiid),
                                SCSIID_OUR_ID(scb->hscb->scsiid),
                                SCSIID_TARGET(ahd, scb->hscb->scsiid),
                                &tstate);

    mask = SCB_GET_TARGET_MASK(ahd, scb);

    if ((tstate->discenable & mask) != 0)
        scb->hscb->control |= DISCENB;

    if ((tstate->tagenable & mask) != 0)
        scb->hscb->control |= TAG_ENB;

    if ((tinfo->curr.ppr_options & MSG_EXT_PPR_PROT_IUS) != 0) {
        scb->flags |= SCB_PACKETIZED;
        if (scb->hscb->task_management != 0)
            scb->hscb->control &= ~MK_MESSAGE;
    }

    if ((tstate->auto_negotiate & mask) != 0) {
        scb->flags |= SCB_AUTO_NEGOTIATE;
        scb->hscb->control |= MK_MESSAGE;
    }

    /* XXX with ahc there was some bus_dmamap_sync(PREREAD|PREWRITE); */

    LIST_INSERT_HEAD(&ahd->pending_scbs, scb, pending_links);

    if (!(xs->flags & SCSI_POLL))
        timeout_add_msec(&xs->stimeout, xs->timeout);

    scb->flags |= SCB_ACTIVE;

    if ((scb->flags & SCB_TARGET_IMMEDIATE) != 0) {
        /* Define a mapping from our tag to the SCB. */
        ahd->scb_data.scbindex[SCB_GET_TAG(scb)] = scb;
        ahd_pause(ahd);
        ahd_set_scbptr(ahd, SCB_GET_TAG(scb));
        ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_TARG);
        ahd_unpause(ahd);
    } else {
        ahd_queue_scb(ahd, scb);
    }

    if (!(xs->flags & SCSI_POLL)) {
        int target = xs->sc_link->target;
        int lun = SCB_GET_LUN(scb);

        if (ahd->inited_target[target] == 0) {
            struct  ahd_devinfo devinfo;

            ahd_adapter_req_set_xfer_mode(ahd, scb);
            ahd_compile_devinfo(&devinfo, ahd->our_id, target, lun,
                                'A', /*XXX milos*/ROLE_UNKNOWN);
            ahd_scb_devinfo(ahd, &devinfo, scb);
            ahd_update_neg_request(ahd, &devinfo, tstate, tinfo,
                                   AHD_NEG_IF_NON_ASYNC);
            ahd->inited_target[target] = 1;
        }

        ahd_unlock(ahd, &s);
        return;
    }

    /*
     * If we can't use interrupts, poll for completion
     */
    SC_DEBUG(xs->sc_link, SDEV_DB3, ("cmd_poll\n"));

    do {
        if (ahd_poll(ahd, xs->timeout)) {
            if (!(xs->flags & SCSI_SILENT))
                printf("cmd fail\n");
            ahd_timeout(scb);
            break;
        }
    } while (!(xs->flags & ITSDONE));

    ahd_unlock(ahd, &s);
}
コード例 #27
0
ファイル: test_list.c プロジェクト: mysidia/snservices1
int
main(int argc, char **argv)
{
  /*
   * initialize the list heads
   */
  LIST_INIT(&tl);
  LIST_INIT(&tl2);

  dump_list("tl", &tl);
  dump_list("tl2", &tl2);

  /*
   * add the elements in the order a1, a2, a3.  Since each is inserted at
   * the list head, walking the list forward will generate "a3, a2, a1"
   * ordering.
   */

  printf("TL:   a1\n");
  printf("TL2:  a3\n");

  LIST_INSERT_HEAD(&tl, &a1, ent);
  LIST_INSERT_HEAD(&tl2, &a3, ent2);

  dump_list("tl", &tl);
  dump_list("tl2", &tl2);

  printf("TL:   a2 a1\n");
  printf("TL2:  a2 a3\n");

  LIST_INSERT_HEAD(&tl, &a2, ent);
  LIST_INSERT_HEAD(&tl2, &a2, ent2);

  dump_list("tl", &tl);
  dump_list("tl2", &tl2);

  printf("TL:   a3 a2 a1\n");
  printf("TL2:  a1 a2 a3\n");

  LIST_INSERT_HEAD(&tl, &a3, ent);
  LIST_INSERT_HEAD(&tl2, &a1, ent2);

  dump_list("tl", &tl);
  dump_list("tl2", &tl2);

  /*
   * remove the middle element, list 2
   */

  LIST_REMOVE(&a2, ent2);

  dump_list("tl", &tl);
  dump_list("tl2", &tl2);

  /*
   * remove the list head, list 2
   */

  LIST_REMOVE(&a1, ent2);

  dump_list("tl", &tl);
  dump_list("tl2", &tl2);

  /*
   * remove the only member.  This sets the head to NULL as well, list 2
   */

  LIST_REMOVE(&a3, ent2);

  dump_list("tl", &tl);
  dump_list("tl2", &tl2);

  /*
   * remove the middle element.
   */

  LIST_REMOVE(&a2, ent);

  dump_list("tl", &tl);
  dump_list("tl2", &tl2);

  /*
   * remove the list head
   */

  LIST_REMOVE(&a3, ent);

  dump_list("tl", &tl);
  dump_list("tl2", &tl2);

  /*
   * remove the only member.  This sets the head to NULL as well.
   */

  LIST_REMOVE(&a1, ent);

  dump_list("tl", &tl);
  dump_list("tl2", &tl2);

  return 0;
}
コード例 #28
0
int                     
checklaunchd()                  
{               
	launch_data_t checkin_response = NULL; 
#ifdef LION_TEST
    launch_data_t checkin_request = NULL;
#endif
	launch_data_t sockets_dict, listening_fd_array;
	launch_data_t listening_fd;
	struct sockaddr_storage fdsockaddr;
	socklen_t fdsockaddrlen = sizeof(fdsockaddr);
	int socketct;
	int i;
	int listenerct;
	int returnval = 0;
	int fd;
	
	/* check in with launchd */
#ifdef LION_TEST
    if ((checkin_request = launch_data_new_string(LAUNCH_KEY_CHECKIN)) == NULL) {
#else
	if ((checkin_response = launch_socket_service_check_in()) == NULL) {
#endif
		plog(ASL_LEVEL_ERR, 
			 "failed to launch_socket_service_check_in.\n");
		goto done;
	}
#ifdef LION_TEST
    if ((checkin_response = launch_msg(checkin_request)) == NULL) {
        plog(ASL_LEVEL_ERR, "failed to launch_msg.\n");
        goto done;
    }
#endif
	if (LAUNCH_DATA_ERRNO == launch_data_get_type(checkin_response)) {
		plog(ASL_LEVEL_ERR, 
			 "launch_data_get_type error %d\n",
			 launch_data_get_errno(checkin_response));
		goto done;
	}
	if ( (sockets_dict = launch_data_dict_lookup(checkin_response, LAUNCH_JOBKEY_SOCKETS)) == NULL){
		plog(ASL_LEVEL_ERR, 
			 "failed to launch_data_dict_lookup.\n");
		goto done;
	}
	if ( !(socketct = launch_data_dict_get_count(sockets_dict))){
		plog(ASL_LEVEL_ERR, 
			 "launch_data_dict_get_count returns no socket defined.\n");
		goto done;
	}
	
	if ( (listening_fd_array = launch_data_dict_lookup(sockets_dict, "Listeners")) == NULL ){
		plog(ASL_LEVEL_ERR, 
			 "failed to launch_data_dict_lookup.\n");
		goto done;
	}
	listenerct = launch_data_array_get_count(listening_fd_array);
	for (i = 0; i < listenerct; i++) {
		listening_fd = launch_data_array_get_index(listening_fd_array, i);
		fd = launch_data_get_fd( listening_fd );
		if ( getsockname( fd , (struct sockaddr *)&fdsockaddr, &fdsockaddrlen)){
			continue;
		}
		
		/* Is this the VPN control socket? */ 
		if ( fdsockaddr.ss_family == AF_UNIX && 
				(!(strcmp(vpncontrolsock_path, ((struct sockaddr_un *)&fdsockaddr)->sun_path))))
		{       
			plog(ASL_LEVEL_INFO, 
				 "found launchd socket.\n");
			returnval = fd;
			break;
		}
	}
	// TODO: check if we have any leaked fd
	if ( listenerct == i){
		plog(ASL_LEVEL_ERR, 
			 "failed to find launchd socket\n");               
		returnval = 0;
	}
	
done:   
	if (checkin_response)
		launch_data_free(checkin_response);
	return(returnval);
}

		
void
vpncontrol_handler(void *unused)
{
	struct sockaddr_storage from;
	socklen_t fromlen = sizeof(from);
    int sock;

	struct vpnctl_socket_elem *sock_elem;
	
    sock_elem = racoon_malloc(sizeof(struct vpnctl_socket_elem));
	if (sock_elem == NULL) {
		plog(ASL_LEVEL_ERR, 
			"memory error: %s\n", strerror(errno));
		return; //%%%%%% terminate
	}
	LIST_INIT(&sock_elem->bound_addresses);
    
	sock_elem->sock = accept(lcconf->sock_vpncontrol, (struct sockaddr *)&from, &fromlen);
	if (sock_elem->sock < 0) {
		plog(ASL_LEVEL_ERR, 
			"failed to accept vpn_control command: %s\n", strerror(errno));
		racoon_free(sock_elem);
		return; //%%%%% terminate
	}
	LIST_INSERT_HEAD(&lcconf->vpnctl_comm_socks, sock_elem, chain);
    
    sock_elem->source = dispatch_source_create(DISPATCH_SOURCE_TYPE_READ, sock_elem->sock, 0, dispatch_get_main_queue());
    if (sock_elem->source == NULL) {
		plog(ASL_LEVEL_ERR, "could not create comm socket source.");
		racoon_free(sock_elem);
		return; //%%%%% terminate
    }
    dispatch_source_set_event_handler(sock_elem->source, 
                                        ^{
                                                vpncontrol_comm_handler(sock_elem);
                                        });
    sock = sock_elem->sock;
	
    dispatch_source_t the_source = sock_elem->source;
    dispatch_source_set_cancel_handler(sock_elem->source,
                                       ^{
                                           close(sock);
                                           dispatch_release(the_source); /* Release the source on cancel */
                                       });
コード例 #29
0
ファイル: undefined.c プロジェクト: coolgoose85/FreeBSD
void
install_coproc_handler_static(int coproc, struct undefined_handler *uh)
{

	LIST_INSERT_HEAD(&undefined_handlers[coproc], uh, uh_link);
}
コード例 #30
0
ファイル: if_vether.c プロジェクト: rubo77/MeshBSD
/*
 * Ctor.
 */
static int
vether_clone_create(struct if_clone *ifc, int unit, caddr_t data)
{
	struct vether_softc *sc;
	struct ifnet *ifp;	
	uint32_t randval;
	uint8_t	lla[ETHER_ADDR_LEN];
/*
 * Allocate software context.
 */ 
	sc = malloc(sizeof(struct vether_softc), M_DEVBUF, M_WAITOK|M_ZERO);
	ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
	if (ifp == NULL) {
		free(sc, M_DEVBUF);
		return (ENOSPC);
	}
	if_initname(ifp, vether_name, unit);
/*
 * Bind software context.
 */ 	
	VETHER_LOCK_INIT(sc);
	ifp->if_softc = sc;
/*
 * Initialize specific attributes.
 */ 
	ifp->if_init = vether_init;
	ifp->if_ioctl = vether_ioctl;
	ifp->if_start = vether_start;
 
 	ifp->if_snd.ifq_maxlen = ifqmaxlen;
 	ifp->if_flags = (IFF_SIMPLEX|IFF_BROADCAST|IFF_MULTICAST|IFF_VETHER);
 
	ifp->if_capabilities = IFCAP_VLAN_MTU|IFCAP_JUMBO_MTU;
	ifp->if_capenable = IFCAP_VLAN_MTU|IFCAP_JUMBO_MTU;
	
	ifmedia_init(&sc->sc_ifm, 0, vether_media_change, vether_media_status);
	ifmedia_add(&sc->sc_ifm, IFM_ETHER|IFM_AUTO, 0, NULL);
	ifmedia_set(&sc->sc_ifm, IFM_ETHER|IFM_AUTO);
 	
 	sc->sc_status = IFM_AVALID;
/*
 * Generate randomized lla.  
 */
	lla[0] = 0x0;
	randval = arc4random();
	memcpy(&lla[1], &randval, sizeof(uint32_t));
	lla[5] = (uint8_t)unit; /* Interface major number */
/*
 * Initialize ethernet specific attributes and perform inclusion 
 * mapping on link layer, netgraph(4) domain and generate by bpf(4) 
 * implemented Inspection Access Point maps to by ifnet(9) defined 
 * generic interface.
 */ 	
 	ether_ifattach(ifp, lla);
 	ifp->if_baudrate = 0;
 
	mtx_lock(&vether_list_mtx);
	LIST_INSERT_HEAD(&vether_list, sc, vether_list);
	mtx_unlock(&vether_list_mtx);

	ifp->if_drv_flags |= IFF_DRV_RUNNING;
 
	return (0);
}