Ejemplo n.º 1
0
/*
 * Load a dvmamap from an array of segs or an mlist (if the first
 * "segs" entry's mlist is non-null).  It calls iommu_dvmamap_load_segs()
 * or iommu_dvmamap_load_mlist() for part of the 2nd pass through the
 * mapping.  This is ugly.  A better solution would probably be to have
 * function pointers for implementing the traversal.  That way, there
 * could be one core load routine for each of the three required algorithms
 * (buffer, seg, and mlist).  That would also mean that the traversal
 * algorithm would then only need one implementation for each algorithm
 * instead of two (one for populating the iomap and one for populating
 * the dvma map).
 */
int
viommu_dvmamap_load_raw(bus_dma_tag_t t, bus_dma_tag_t t0, bus_dmamap_t map,
    bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags)
{
	int i;
	int left;
	int err = 0;
	bus_size_t sgsize;
	bus_size_t boundary, align;
	u_long dvmaddr, sgstart, sgend;
	struct iommu_state *is;
	struct iommu_map_state *ims = map->_dm_cookie;

#ifdef DIAGNOSTIC
	if (ims == NULL)
		panic("viommu_dvmamap_load_raw: null map state");
	if (ims->ims_iommu == NULL)
		panic("viommu_dvmamap_load_raw: null iommu");
#endif
	is = ims->ims_iommu;

	if (map->dm_nsegs) {
		/* Already in use?? */
#ifdef DIAGNOSTIC
		panic("iommu_dvmamap_load_raw: map still in use");
#endif
		bus_dmamap_unload(t0, map);
	}

	/*
	 * A boundary presented to bus_dmamem_alloc() takes precedence
	 * over boundary in the map.
	 */
	if ((boundary = segs[0]._ds_boundary) == 0)
		boundary = map->_dm_boundary;

	align = MAX(segs[0]._ds_align, PAGE_SIZE);

	/*
	 * Make sure that on error condition we return "no valid mappings".
	 */
	map->dm_nsegs = 0;

	iommu_iomap_clear_pages(ims);
	if (segs[0]._ds_mlist) {
		struct pglist *mlist = segs[0]._ds_mlist;
		struct vm_page *m;
		for (m = TAILQ_FIRST(mlist); m != NULL;
		    m = TAILQ_NEXT(m,pageq)) {
			err = iommu_iomap_insert_page(ims, VM_PAGE_TO_PHYS(m));

			if(err) {
				printf("iomap insert error: %d for "
				    "pa 0x%lx\n", err, VM_PAGE_TO_PHYS(m));
				iommu_iomap_clear_pages(ims);
				return (EFBIG);
			}
		}
	} else {
		/* Count up the total number of pages we need */
		for (i = 0, left = size; left > 0 && i < nsegs; i++) {
			bus_addr_t a, aend;
			bus_size_t len = segs[i].ds_len;
			bus_addr_t addr = segs[i].ds_addr;
			int seg_len = MIN(left, len);

			if (len < 1)
				continue;

			aend = round_page(addr + seg_len);
			for (a = trunc_page(addr); a < aend; a += PAGE_SIZE) {

				err = iommu_iomap_insert_page(ims, a);
				if (err) {
					printf("iomap insert error: %d for "
					    "pa 0x%llx\n", err, a);
					iommu_iomap_clear_pages(ims);
					return (EFBIG);
				}
			}

			left -= seg_len;
		}
	}
	sgsize = ims->ims_map.ipm_pagecnt * PAGE_SIZE;

	mtx_enter(&is->is_mtx);
	if (flags & BUS_DMA_24BIT) {
		sgstart = MAX(is->is_dvmamap->ex_start, 0xff000000);
		sgend = MIN(is->is_dvmamap->ex_end, 0xffffffff);
	} else {
		sgstart = is->is_dvmamap->ex_start;
		sgend = is->is_dvmamap->ex_end;
	}

	/* 
	 * If our segment size is larger than the boundary we need to 
	 * split the transfer up into little pieces ourselves.
	 */
	err = extent_alloc_subregion(is->is_dvmamap, sgstart, sgend,
	    sgsize, align, 0, (sgsize > boundary) ? 0 : boundary, 
	    EX_NOWAIT | EX_BOUNDZERO, (u_long *)&dvmaddr);
	mtx_leave(&is->is_mtx);

	if (err != 0)
		return (err);

#ifdef DEBUG
	if (dvmaddr == (bus_addr_t)-1)	{ 
		printf("iommu_dvmamap_load_raw(): extent_alloc(%d, %x) "
		    "failed!\n", (int)sgsize, flags);
#ifdef DDB
		if (iommudebug & IDB_BREAK)
			Debugger();
#else
		panic("");
#endif
	}		
#endif	
	if (dvmaddr == (bus_addr_t)-1)
		return (ENOMEM);

	/* Set the active DVMA map */
	map->_dm_dvmastart = dvmaddr;
	map->_dm_dvmasize = sgsize;

	map->dm_mapsize = size;

	if (viommu_iomap_load_map(is, ims, dvmaddr, flags))
		return (EFBIG);

	if (segs[0]._ds_mlist)
		err = viommu_dvmamap_load_mlist(t, is, map, segs[0]._ds_mlist,
		    flags, size, boundary);
	else
		err = viommu_dvmamap_load_seg(t, is, map, segs, nsegs,
		    flags, size, boundary);

	if (err)
		viommu_iomap_unload_map(is, ims);

	return (err);
}
Ejemplo n.º 2
0
static void
mta_flush_task(struct mta_session *s, int delivery, const char *error, size_t count,
	int cache)
{
	struct mta_envelope	*e;
	char			 relay[LINE_MAX];
	size_t			 n;
	struct sockaddr_storage	 ss;
	struct sockaddr		*sa;
	socklen_t		 sa_len;
	const char		*domain;

	(void)snprintf(relay, sizeof relay, "%s", mta_host_to_text(s->route->dst));
	n = 0;
	while ((e = TAILQ_FIRST(&s->task->envelopes))) {

		if (count && n == count) {
			stat_decrement("mta.envelope", n);
			return;
		}

		TAILQ_REMOVE(&s->task->envelopes, e, entry);

		/* we're about to log, associate session to envelope */
		e->session = s->id;
		e->ext = s->ext;

		/* XXX */
		/*
		 * getsockname() can only fail with ENOBUFS here
		 * best effort, don't log source ...
		 */
		sa = (struct sockaddr *)&ss;
		sa_len = sizeof(ss);
		if (getsockname(s->io.sock, sa, &sa_len) < 0)
			mta_delivery_log(e, NULL, relay, delivery, error);
		else
			mta_delivery_log(e, sa_to_text(sa),
			    relay, delivery, error);

		mta_delivery_notify(e);

		domain = strchr(e->dest, '@');
		if (domain) {
			domain++;
			mta_hoststat_update(domain, error);
			if (cache)
				mta_hoststat_cache(domain, e->id);
		}

		n++;
	}

	free(s->task->sender);
	free(s->task);
	s->task = NULL;

	if (s->datafp) {
		fclose(s->datafp);
		s->datafp = NULL;
	}

	stat_decrement("mta.envelope", n);
	stat_decrement("mta.task.running", 1);
	stat_decrement("mta.task", 1);
}
Ejemplo n.º 3
0
/*
 * ex_txt --
 *	Get lines from the terminal for ex.
 *
 * PUBLIC: int ex_txt __P((SCR *, TEXTH *, ARG_CHAR_T, u_int32_t));
 */
int
ex_txt(SCR *sp, TEXTH *tiqh, ARG_CHAR_T prompt, u_int32_t flags)
{
	EVENT ev;
	GS *gp;
	TEXT ait, *ntp, *tp;
	carat_t carat_st;
	size_t cnt;
	int rval;
	int nochange;

	rval = 0;

	/*
	 * Get a TEXT structure with some initial buffer space, reusing the
	 * last one if it's big enough.  (All TEXT bookkeeping fields default
	 * to 0 -- text_init() handles this.)
	 */
	if (!TAILQ_EMPTY(tiqh)) {
		tp = TAILQ_FIRST(tiqh);
		if (TAILQ_NEXT(tp, q) != NULL || tp->lb_len < 32) {
			text_lfree(tiqh);
			goto newtp;
		}
		tp->len = 0;
	} else {
newtp:		if ((tp = text_init(sp, NULL, 0, 32)) == NULL)
			goto err;
		TAILQ_INSERT_HEAD(tiqh, tp, q);
	}

	/* Set the starting line number. */
	tp->lno = sp->lno + 1;

	/*
	 * If it's a terminal, set up autoindent, put out the prompt, and
	 * set it up so we know we were suspended.  Otherwise, turn off
	 * the autoindent flag, as that requires less special casing below.
	 *
	 * XXX
	 * Historic practice is that ^Z suspended command mode (but, because
	 * it ran in cooked mode, it was unaffected by the autowrite option.)
	 * On restart, any "current" input was discarded, whether in insert
	 * mode or not, and ex was in command mode.  This code matches historic
	 * practice, but not 'cause it's easier.
	 */
	gp = sp->gp;
	if (F_ISSET(gp, G_SCRIPTED))
		LF_CLR(TXT_AUTOINDENT);
	else {
		if (LF_ISSET(TXT_AUTOINDENT)) {
			LF_SET(TXT_EOFCHAR);
			if (v_txt_auto(sp, sp->lno, NULL, 0, tp))
				goto err;
		}
		txt_prompt(sp, tp, prompt, flags);
	}

	for (carat_st = C_NOTSET, nochange = 0;;) {
		if (v_event_get(sp, &ev, 0, 0))
			goto err;

		/* Deal with all non-character events. */
		switch (ev.e_event) {
		case E_CHARACTER:
			break;
		case E_ERR:
			goto err;
		case E_REPAINT:
		case E_WRESIZE:
			continue;
		case E_EOF:
			rval = 1;
			/* FALLTHROUGH */
		case E_INTERRUPT:
			/*
			 * Handle EOF/SIGINT events by discarding partially
			 * entered text and returning.  EOF returns failure,
			 * E_INTERRUPT returns success.
			 */
			goto notlast;
		default:
			v_event_err(sp, &ev);
			goto notlast;
		}

		/*
		 * Deal with character events.
		 *
		 * Check to see if the character fits into the input buffer.
		 * (Use tp->len, ignore overwrite and non-printable chars.)
		 */
		BINC_GOTOW(sp, tp->lb, tp->lb_len, tp->len + 1);

		switch (ev.e_value) {
		case K_CR:
			/*
			 * !!!
			 * Historically, <carriage-return>'s in the command
			 * weren't special, so the ex parser would return an
			 * unknown command error message.  However, if they
			 * terminated the command if they were in a map.  I'm
			 * pretty sure this still isn't right, but it handles
			 * what I've seen so far.
			 */
			if (!F_ISSET(&ev.e_ch, CH_MAPPED))
				goto ins_ch;
			/* FALLTHROUGH */
		case K_NL:
			/*
			 * '\' can escape <carriage-return>/<newline>.  We
			 * don't discard the backslash because we need it
			 * to get the <newline> through the ex parser.
			 */
			if (LF_ISSET(TXT_BACKSLASH) &&
			    tp->len != 0 && tp->lb[tp->len - 1] == '\\')
				goto ins_ch;

			/*
			 * CR returns from the ex command line.
			 *
			 * XXX
			 * Terminate with a nul, needed by filter.
			 */
			if (LF_ISSET(TXT_CR)) {
				tp->lb[tp->len] = '\0';
				goto done;
			}

			/*
			 * '.' may terminate text input mode; free the current
			 * TEXT.
			 */
			if (LF_ISSET(TXT_DOTTERM) && tp->len == tp->ai + 1 &&
			    tp->lb[tp->len - 1] == '.') {
notlast:			TAILQ_REMOVE(tiqh, tp, q);
				text_free(tp);
				goto done;
			}

			/* Set up bookkeeping for the new line. */
			if ((ntp = text_init(sp, NULL, 0, 32)) == NULL)
				goto err;
			ntp->lno = tp->lno + 1;

			/*
			 * Reset the autoindent line value.  0^D keeps the ai
			 * line from changing, ^D changes the level, even if
			 * there were no characters in the old line.  Note, if
			 * using the current tp structure, use the cursor as
			 * the length, the autoindent characters may have been
			 * erased.
			 */
			if (LF_ISSET(TXT_AUTOINDENT)) {
				if (nochange) {
					nochange = 0;
					if (v_txt_auto(sp,
					    OOBLNO, &ait, ait.ai, ntp))
						goto err;
					free(ait.lb);
				} else
					if (v_txt_auto(sp,
					    OOBLNO, tp, tp->len, ntp))
						goto err;
				carat_st = C_NOTSET;
			}
			txt_prompt(sp, ntp, prompt, flags);

			/*
			 * Swap old and new TEXT's, and insert the new TEXT
			 * into the queue.
			 */
			tp = ntp;
			TAILQ_INSERT_TAIL(tiqh, tp, q);
			break;
		case K_CARAT:			/* Delete autoindent chars. */
			if (tp->len <= tp->ai && LF_ISSET(TXT_AUTOINDENT))
				carat_st = C_CARATSET;
			goto ins_ch;
		case K_ZERO:			/* Delete autoindent chars. */
			if (tp->len <= tp->ai && LF_ISSET(TXT_AUTOINDENT))
				carat_st = C_ZEROSET;
			goto ins_ch;
		case K_CNTRLD:			/* Delete autoindent char. */
			/*
			 * !!!
			 * Historically, the ^D command took (but then ignored)
			 * a count.  For simplicity, we don't return it unless
			 * it's the first character entered.  The check for len
			 * equal to 0 is okay, TXT_AUTOINDENT won't be set.
			 */
			if (LF_ISSET(TXT_CNTRLD)) {
				for (cnt = 0; cnt < tp->len; ++cnt)
					if (!isblank(tp->lb[cnt]))
						break;
				if (cnt == tp->len) {
					tp->len = 1;
					tp->lb[0] = ev.e_c;
					tp->lb[1] = '\0';

					/*
					 * Put out a line separator, in case
					 * the command fails.
					 */
					(void)putchar('\n');
					goto done;
				}
			}

			/*
			 * POSIX 1003.1b-1993, paragraph 7.1.1.9, states that
			 * the EOF characters are discarded if there are other
			 * characters to process in the line, i.e. if the EOF
			 * is not the first character in the line.  For this
			 * reason, historic ex discarded the EOF characters,
			 * even if occurring in the middle of the input line.
			 * We match that historic practice.
			 *
			 * !!!
			 * The test for discarding in the middle of the line is
			 * done in the switch, because the CARAT forms are N+1,
			 * not N.
			 *
			 * !!!
			 * There's considerable magic to make the terminal code
			 * return the EOF character at all.  See that code for
			 * details.
			 */
			if (!LF_ISSET(TXT_AUTOINDENT) || tp->len == 0)
				continue;
			switch (carat_st) {
			case C_CARATSET:		/* ^^D */
				if (tp->len > tp->ai + 1)
					continue;

				/* Save the ai string for later. */
				ait.lb = NULL;
				ait.lb_len = 0;
				BINC_GOTOW(sp, ait.lb, ait.lb_len, tp->ai);
				MEMCPY(ait.lb, tp->lb, tp->ai);
				ait.ai = ait.len = tp->ai;

				carat_st = C_NOTSET;
				nochange = 1;
				goto leftmargin;
			case C_ZEROSET:			/* 0^D */
				if (tp->len > tp->ai + 1)
					continue;

				carat_st = C_NOTSET;
leftmargin:			(void)gp->scr_ex_adjust(sp, EX_TERM_CE);
				tp->ai = tp->len = 0;
				break;
			case C_NOTSET:			/* ^D */
				if (tp->len > tp->ai)
					continue;

				if (txt_dent(sp, tp))
					goto err;
				break;
			default:
				abort();
			}

			/* Clear and redisplay the line. */
			(void)gp->scr_ex_adjust(sp, EX_TERM_CE);
			txt_prompt(sp, tp, prompt, flags);
			break;
		default:
			/*
			 * See the TXT_BEAUTIFY comment in vi/v_txt_ev.c.
			 *
			 * Silently eliminate any iscntrl() character that was
			 * not already handled specially, except for <tab> and
			 * <ff>.
			 */
ins_ch:			if (LF_ISSET(TXT_BEAUTIFY) && ISCNTRL(ev.e_c) &&
			    ev.e_value != K_FORMFEED && ev.e_value != K_TAB)
				break;

			tp->lb[tp->len++] = ev.e_c;
			break;
		}
	}
	/* NOTREACHED */

done:	return (rval);

err:	
alloc_err:
	return (1);
}
Ejemplo n.º 4
0
int
_pthread_create(pthread_t *thread, const pthread_attr_t *attr,
	       void *(*start_routine) (void *), void *arg)
{
	struct pthread	*curthread = _get_curthread();
	struct itimerval itimer;
	int		f_gc = 0;
	int             ret = 0;
	pthread_t       gc_thread;
	pthread_t       new_thread;
	pthread_attr_t	pattr;
	void           *stack;
#if !defined(__ia64__)
	u_long 		stackp;
#endif

	if (thread == NULL)
		return(EINVAL);

	/*
	 * Locking functions in libc are required when there are
	 * threads other than the initial thread.
	 */
	__isthreaded = 1;

	/* Allocate memory for the thread structure: */
	if ((new_thread = (pthread_t) malloc(sizeof(struct pthread))) == NULL) {
		/* Insufficient memory to create a thread: */
		ret = EAGAIN;
	} else {
		/* Check if default thread attributes are required: */
		if (attr == NULL || *attr == NULL) {
			/* Use the default thread attributes: */
			pattr = &_pthread_attr_default;
		} else {
			pattr = *attr;
		}
		/* Check if a stack was specified in the thread attributes: */
		if ((stack = pattr->stackaddr_attr) != NULL) {
		}
		/* Allocate a stack: */
		else {
			stack = _thread_stack_alloc(pattr->stacksize_attr,
			    pattr->guardsize_attr);
			if (stack == NULL) {
				ret = EAGAIN;
				free(new_thread);
			}
		}

		/* Check for errors: */
		if (ret != 0) {
		} else {
			/* Initialise the thread structure: */
			memset(new_thread, 0, sizeof(struct pthread));
			new_thread->slice_usec = -1;
			new_thread->stack = stack;
			new_thread->start_routine = start_routine;
			new_thread->arg = arg;

			new_thread->cancelflags = PTHREAD_CANCEL_ENABLE |
			    PTHREAD_CANCEL_DEFERRED;

			/*
			 * Write a magic value to the thread structure
			 * to help identify valid ones:
			 */
			new_thread->magic = PTHREAD_MAGIC;

			/* Initialise the thread for signals: */
			new_thread->sigmask = curthread->sigmask;
			new_thread->sigmask_seqno = 0;

			/* Initialize the signal frame: */
			new_thread->curframe = NULL;

			/* Initialise the jump buffer: */
			_setjmp(new_thread->ctx.jb);

			/*
			 * Set up new stack frame so that it looks like it
			 * returned from a longjmp() to the beginning of
			 * _thread_start().
			 */
			SET_RETURN_ADDR_JB(new_thread->ctx.jb, _thread_start);

#if !defined(__ia64__)
			stackp = (long)new_thread->stack + pattr->stacksize_attr - sizeof(double);
#if defined(__amd64__)
			stackp &= ~0xFUL;
#endif
			/* The stack starts high and builds down: */
			SET_STACK_JB(new_thread->ctx.jb, stackp);
#else
			SET_STACK_JB(new_thread->ctx.jb,
			    (long)new_thread->stack, pattr->stacksize_attr);
#endif

			/* Copy the thread attributes: */
			memcpy(&new_thread->attr, pattr, sizeof(struct pthread_attr));

			/*
			 * Check if this thread is to inherit the scheduling
			 * attributes from its parent:
			 */
			if (new_thread->attr.flags & PTHREAD_INHERIT_SCHED) {
				/* Copy the scheduling attributes: */
				new_thread->base_priority =
				    curthread->base_priority &
				    ~PTHREAD_SIGNAL_PRIORITY;
				new_thread->attr.prio =
				    curthread->base_priority &
				    ~PTHREAD_SIGNAL_PRIORITY;
				new_thread->attr.sched_policy =
				    curthread->attr.sched_policy;
			} else {
				/*
				 * Use just the thread priority, leaving the
				 * other scheduling attributes as their
				 * default values:
				 */
				new_thread->base_priority =
				    new_thread->attr.prio;
			}
			new_thread->active_priority = new_thread->base_priority;
			new_thread->inherited_priority = 0;

			/* Initialize joiner to NULL (no joiner): */
			new_thread->joiner = NULL;

			/* Initialize the mutex queue: */
			TAILQ_INIT(&new_thread->mutexq);

			/* Initialise hooks in the thread structure: */
			new_thread->specific = NULL;
			new_thread->cleanup = NULL;
			new_thread->flags = 0;
			new_thread->poll_data.nfds = 0;
			new_thread->poll_data.fds = NULL;
			new_thread->continuation = NULL;

			/*
			 * Defer signals to protect the scheduling queues
			 * from access by the signal handler:
			 */
			_thread_kern_sig_defer();

			/*
			 * Initialise the unique id which GDB uses to
			 * track threads.
			 */
			new_thread->uniqueid = next_uniqueid++;

			/*
			 * Check if the garbage collector thread
			 * needs to be started.
			 */
			f_gc = (TAILQ_FIRST(&_thread_list) == _thread_initial);

			/* Add the thread to the linked list of all threads: */
			TAILQ_INSERT_HEAD(&_thread_list, new_thread, tle);

			if (pattr->suspend == PTHREAD_CREATE_SUSPENDED) {
				new_thread->flags |= PTHREAD_FLAGS_SUSPENDED;
				new_thread->state = PS_SUSPENDED;
			} else {
				new_thread->state = PS_RUNNING;
				PTHREAD_PRIOQ_INSERT_TAIL(new_thread);
			}

			/*
			 * Undefer and handle pending signals, yielding
			 * if necessary.
			 */
			_thread_kern_sig_undefer();

			/* Return a pointer to the thread structure: */
			(*thread) = new_thread;

			if (f_gc != 0) {
				/* Install the scheduling timer: */
				itimer.it_interval.tv_sec = 0;
				itimer.it_interval.tv_usec = _clock_res_usec;
				itimer.it_value = itimer.it_interval;
				if (setitimer(_ITIMER_SCHED_TIMER, &itimer,
				    NULL) != 0)
					PANIC("Cannot set interval timer");
			}

			/* Schedule the new user thread: */
			_thread_kern_sched(NULL);

			/*
			 * Start a garbage collector thread
			 * if necessary.
			 */
			if (f_gc && _pthread_create(&gc_thread, NULL,
				    _thread_gc, NULL) != 0)
				PANIC("Can't create gc thread");

		}
	}

	/* Return the status: */
	return (ret);
}
Ejemplo n.º 5
0
int
iwm_mvm_config_umac_scan(struct iwm_softc *sc)
{
	struct ieee80211com *ic = &sc->sc_ic;
	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);

	struct iwm_scan_config *scan_config;
	int ret, j, nchan;
	size_t cmd_size;
	struct ieee80211_channel *c;
	struct iwm_host_cmd hcmd = {
		.id = iwm_cmd_id(IWM_SCAN_CFG_CMD, IWM_ALWAYS_LONG_GROUP, 0),
		.flags = IWM_CMD_SYNC,
	};
	static const uint32_t rates = (IWM_SCAN_CONFIG_RATE_1M |
	    IWM_SCAN_CONFIG_RATE_2M | IWM_SCAN_CONFIG_RATE_5M |
	    IWM_SCAN_CONFIG_RATE_11M | IWM_SCAN_CONFIG_RATE_6M |
	    IWM_SCAN_CONFIG_RATE_9M | IWM_SCAN_CONFIG_RATE_12M |
	    IWM_SCAN_CONFIG_RATE_18M | IWM_SCAN_CONFIG_RATE_24M |
	    IWM_SCAN_CONFIG_RATE_36M | IWM_SCAN_CONFIG_RATE_48M |
	    IWM_SCAN_CONFIG_RATE_54M);

	cmd_size = sizeof(*scan_config) + sc->ucode_capa.n_scan_channels;

	scan_config = malloc(cmd_size, M_DEVBUF, M_NOWAIT | M_ZERO);
	if (scan_config == NULL)
		return ENOMEM;

	scan_config->tx_chains = htole32(iwm_mvm_get_valid_tx_ant(sc));
	scan_config->rx_chains = htole32(iwm_mvm_get_valid_rx_ant(sc));
	scan_config->legacy_rates = htole32(rates |
	    IWM_SCAN_CONFIG_SUPPORTED_RATE(rates));

	/* These timings correspond to iwlwifi's UNASSOC scan. */
	scan_config->dwell_active = 10;
	scan_config->dwell_passive = 110;
	scan_config->dwell_fragmented = 44;
	scan_config->dwell_extended = 90;
	scan_config->out_of_channel_time = htole32(0);
	scan_config->suspend_time = htole32(0);

	IEEE80211_ADDR_COPY(scan_config->mac_addr,
	    vap ? vap->iv_myaddr : ic->ic_macaddr);

	scan_config->bcast_sta_id = sc->sc_aux_sta.sta_id;
	scan_config->channel_flags = IWM_CHANNEL_FLAG_EBS |
	    IWM_CHANNEL_FLAG_ACCURATE_EBS | IWM_CHANNEL_FLAG_EBS_ADD |
	    IWM_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE;

	for (nchan = j = 0;
	    j < ic->ic_nchans && nchan < sc->ucode_capa.n_scan_channels; j++) {
		c = &ic->ic_channels[j];
		/* For 2GHz, only populate 11b channels */
		/* For 5GHz, only populate 11a channels */
		/*
		 * Catch other channels, in case we have 900MHz channels or
		 * something in the chanlist.
		 */
		if (iwm_mvm_scan_skip_channel(c))
			continue;
		scan_config->channel_array[nchan++] =
		    ieee80211_mhz2ieee(c->ic_freq, 0);
	}

	scan_config->flags = htole32(IWM_SCAN_CONFIG_FLAG_ACTIVATE |
	    IWM_SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS |
	    IWM_SCAN_CONFIG_FLAG_SET_TX_CHAINS |
	    IWM_SCAN_CONFIG_FLAG_SET_RX_CHAINS |
	    IWM_SCAN_CONFIG_FLAG_SET_AUX_STA_ID |
	    IWM_SCAN_CONFIG_FLAG_SET_ALL_TIMES |
	    IWM_SCAN_CONFIG_FLAG_SET_LEGACY_RATES |
	    IWM_SCAN_CONFIG_FLAG_SET_MAC_ADDR |
	    IWM_SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS|
	    IWM_SCAN_CONFIG_N_CHANNELS(nchan) |
	    IWM_SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED);

	hcmd.data[0] = scan_config;
	hcmd.len[0] = cmd_size;

	IWM_DPRINTF(sc, IWM_DEBUG_SCAN, "Sending UMAC scan config\n");

	ret = iwm_send_cmd(sc, &hcmd);
	if (!ret)
		IWM_DPRINTF(sc, IWM_DEBUG_SCAN,
		    "UMAC scan config was sent successfully\n");

	free(scan_config, M_DEVBUF);
	return ret;
}

static boolean_t
iwm_mvm_scan_use_ebs(struct iwm_softc *sc)
{
	const struct iwm_ucode_capabilities *capa = &sc->ucode_capa;

	/* We can only use EBS if:
	 *	1. the feature is supported;
	 *	2. the last EBS was successful;
	 *	3. if only single scan, the single scan EBS API is supported;
	 *	4. it's not a p2p find operation.
	 */
	return ((capa->flags & IWM_UCODE_TLV_FLAGS_EBS_SUPPORT) &&
		sc->last_ebs_successful);
}

int
iwm_mvm_umac_scan(struct iwm_softc *sc)
{
	struct iwm_host_cmd hcmd = {
		.id = iwm_cmd_id(IWM_SCAN_REQ_UMAC, IWM_ALWAYS_LONG_GROUP, 0),
		.len = { 0, },
		.data = { NULL, },
		.flags = IWM_CMD_SYNC,
	};
	struct ieee80211_scan_state *ss = sc->sc_ic.ic_scan;
	struct iwm_scan_req_umac *req;
	struct iwm_scan_req_umac_tail *tail;
	size_t req_len;
	uint8_t i, nssid;
	int ret;

	req_len = sizeof(struct iwm_scan_req_umac) +
	    (sizeof(struct iwm_scan_channel_cfg_umac) *
	    sc->ucode_capa.n_scan_channels) +
	    sizeof(struct iwm_scan_req_umac_tail);
	if (req_len > IWM_MAX_CMD_PAYLOAD_SIZE)
		return ENOMEM;
	req = malloc(req_len, M_DEVBUF, M_NOWAIT | M_ZERO);
	if (req == NULL)
		return ENOMEM;

	hcmd.len[0] = (uint16_t)req_len;
	hcmd.data[0] = (void *)req;

	IWM_DPRINTF(sc, IWM_DEBUG_SCAN, "Handling ieee80211 scan request\n");

	/* These timings correspond to iwlwifi's UNASSOC scan. */
	req->active_dwell = 10;
	req->passive_dwell = 110;
	req->fragmented_dwell = 44;
	req->extended_dwell = 90;
	req->max_out_time = 0;
	req->suspend_time = 0;

	req->scan_priority = htole32(IWM_SCAN_PRIORITY_HIGH);
	req->ooc_priority = htole32(IWM_SCAN_PRIORITY_HIGH);

	nssid = MIN(ss->ss_nssid, IWM_PROBE_OPTION_MAX);
	req->n_channels = iwm_mvm_umac_scan_fill_channels(sc,
	    (struct iwm_scan_channel_cfg_umac *)req->data, nssid);

	req->general_flags = htole32(IWM_UMAC_SCAN_GEN_FLAGS_PASS_ALL |
	    IWM_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE |
	    IWM_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL);

	tail = (void *)((char *)&req->data +
		sizeof(struct iwm_scan_channel_cfg_umac) *
			sc->ucode_capa.n_scan_channels);

	/* Check if we're doing an active directed scan. */
	for (i = 0; i < nssid; i++) {
		tail->direct_scan[i].id = IEEE80211_ELEMID_SSID;
		tail->direct_scan[i].len = MIN(ss->ss_ssid[i].len,
		    IEEE80211_NWID_LEN);
		memcpy(tail->direct_scan[i].ssid, ss->ss_ssid[i].ssid,
		    tail->direct_scan[i].len);
		/* XXX debug */
	}
	if (nssid != 0) {
		req->general_flags |=
		    htole32(IWM_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT);
	} else
		req->general_flags |= htole32(IWM_UMAC_SCAN_GEN_FLAGS_PASSIVE);

	if (iwm_mvm_scan_use_ebs(sc))
		req->channel_flags = IWM_SCAN_CHANNEL_FLAG_EBS |
				     IWM_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
				     IWM_SCAN_CHANNEL_FLAG_CACHE_ADD;

	if (iwm_mvm_rrm_scan_needed(sc))
		req->general_flags |=
		    htole32(IWM_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED);

	ret = iwm_mvm_fill_probe_req(sc, &tail->preq);
	if (ret) {
		free(req, M_DEVBUF);
		return ret;
	}

	/* Specify the scan plan: We'll do one iteration. */
	tail->schedule[0].interval = 0;
	tail->schedule[0].iter_count = 1;

	ret = iwm_send_cmd(sc, &hcmd);
	if (!ret)
		IWM_DPRINTF(sc, IWM_DEBUG_SCAN,
		    "Scan request was sent successfully\n");
	free(req, M_DEVBUF);
	return ret;
}

int
iwm_mvm_lmac_scan(struct iwm_softc *sc)
{
	struct iwm_host_cmd hcmd = {
		.id = IWM_SCAN_OFFLOAD_REQUEST_CMD,
		.len = { 0, },
		.data = { NULL, },
		.flags = IWM_CMD_SYNC,
	};
	struct ieee80211_scan_state *ss = sc->sc_ic.ic_scan;
	struct iwm_scan_req_lmac *req;
	size_t req_len;
	uint8_t i, nssid;
	int ret;

	IWM_DPRINTF(sc, IWM_DEBUG_SCAN,
	    "Handling ieee80211 scan request\n");

	req_len = sizeof(struct iwm_scan_req_lmac) +
	    (sizeof(struct iwm_scan_channel_cfg_lmac) *
	    sc->ucode_capa.n_scan_channels) + sizeof(struct iwm_scan_probe_req);
	if (req_len > IWM_MAX_CMD_PAYLOAD_SIZE)
		return ENOMEM;
	req = malloc(req_len, M_DEVBUF, M_NOWAIT | M_ZERO);
	if (req == NULL)
		return ENOMEM;

	hcmd.len[0] = (uint16_t)req_len;
	hcmd.data[0] = (void *)req;

	/* These timings correspond to iwlwifi's UNASSOC scan. */
	req->active_dwell = 10;
	req->passive_dwell = 110;
	req->fragmented_dwell = 44;
	req->extended_dwell = 90;
	req->max_out_time = 0;
	req->suspend_time = 0;

	req->scan_prio = htole32(IWM_SCAN_PRIORITY_HIGH);
	req->rx_chain_select = iwm_mvm_scan_rx_chain(sc);
	req->iter_num = htole32(1);
	req->delay = 0;

	req->scan_flags = htole32(IWM_MVM_LMAC_SCAN_FLAG_PASS_ALL |
	    IWM_MVM_LMAC_SCAN_FLAG_ITER_COMPLETE |
	    IWM_MVM_LMAC_SCAN_FLAG_EXTENDED_DWELL);
	if (iwm_mvm_rrm_scan_needed(sc))
		req->scan_flags |= htole32(IWM_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED);

	req->flags = iwm_mvm_scan_rxon_flags(sc->sc_ic.ic_scan->ss_chans[0]);

	req->filter_flags =
	    htole32(IWM_MAC_FILTER_ACCEPT_GRP | IWM_MAC_FILTER_IN_BEACON);

	/* Tx flags 2 GHz. */
	req->tx_cmd[0].tx_flags = htole32(IWM_TX_CMD_FLG_SEQ_CTL |
	    IWM_TX_CMD_FLG_BT_DIS);
	req->tx_cmd[0].rate_n_flags =
	    iwm_mvm_scan_rate_n_flags(sc, IEEE80211_CHAN_2GHZ, 1/*XXX*/);
	req->tx_cmd[0].sta_id = sc->sc_aux_sta.sta_id;

	/* Tx flags 5 GHz. */
	req->tx_cmd[1].tx_flags = htole32(IWM_TX_CMD_FLG_SEQ_CTL |
	    IWM_TX_CMD_FLG_BT_DIS);
	req->tx_cmd[1].rate_n_flags =
	    iwm_mvm_scan_rate_n_flags(sc, IEEE80211_CHAN_5GHZ, 1/*XXX*/);
	req->tx_cmd[1].sta_id = sc->sc_aux_sta.sta_id;

	/* Check if we're doing an active directed scan. */
	nssid = MIN(ss->ss_nssid, IWM_PROBE_OPTION_MAX);
	for (i = 0; i < nssid; i++) {
		req->direct_scan[i].id = IEEE80211_ELEMID_SSID;
		req->direct_scan[i].len = MIN(ss->ss_ssid[i].len,
		    IEEE80211_NWID_LEN);
		memcpy(req->direct_scan[i].ssid, ss->ss_ssid[i].ssid,
		    req->direct_scan[i].len);
		/* XXX debug */
	}
	if (nssid != 0) {
		req->scan_flags |=
		    htole32(IWM_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION);
	} else
		req->scan_flags |= htole32(IWM_MVM_LMAC_SCAN_FLAG_PASSIVE);

	req->n_channels = iwm_mvm_lmac_scan_fill_channels(sc,
	    (struct iwm_scan_channel_cfg_lmac *)req->data, nssid);

	ret = iwm_mvm_fill_probe_req(sc,
			    (struct iwm_scan_probe_req *)(req->data +
			    (sizeof(struct iwm_scan_channel_cfg_lmac) *
			    sc->ucode_capa.n_scan_channels)));
	if (ret) {
		free(req, M_DEVBUF);
		return ret;
	}

	/* Specify the scan plan: We'll do one iteration. */
	req->schedule[0].iterations = 1;
	req->schedule[0].full_scan_mul = 1;

	if (iwm_mvm_scan_use_ebs(sc)) {
		req->channel_opt[0].flags =
			htole16(IWM_SCAN_CHANNEL_FLAG_EBS |
				IWM_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
				IWM_SCAN_CHANNEL_FLAG_CACHE_ADD);
		req->channel_opt[0].non_ebs_ratio =
			htole16(IWM_DENSE_EBS_SCAN_RATIO);
		req->channel_opt[1].flags =
			htole16(IWM_SCAN_CHANNEL_FLAG_EBS |
				IWM_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
				IWM_SCAN_CHANNEL_FLAG_CACHE_ADD);
		req->channel_opt[1].non_ebs_ratio =
			htole16(IWM_SPARSE_EBS_SCAN_RATIO);
	}

	ret = iwm_send_cmd(sc, &hcmd);
	if (!ret) {
		IWM_DPRINTF(sc, IWM_DEBUG_SCAN,
		    "Scan request was sent successfully\n");
	}
	free(req, M_DEVBUF);
	return ret;
}
Ejemplo n.º 6
0
/*
 * move the pointers into the stream
 */
int stream_move(struct stream_object *so, size_t offset, int whence, int mode)
{
   size_t tmp_size = 0;
   size_t move = 0;
   
   struct so_list *so_curr = NULL;
   size_t po_off = 0;

   /* get the values into temp variable */
   switch (mode) {
      case STREAM_SIDE1:
         so_curr = so->side1.so_curr;
         po_off = so->side1.po_off;
         break;
      case STREAM_SIDE2:
         so_curr = so->side2.so_curr;
         po_off = so->side2.po_off;
         break;
   }

   /* no movement */
   if (offset == 0)
      return 0;
   
   /* 
    * the offest is calculated from the beginning,
    * so move to the first packet
    */
   if (whence == SEEK_SET) {
      so_curr = TAILQ_FIRST(&so->so_head);
      po_off = 0;
   }

   /* the other mode is SEEK_CUR */

   /* search the first packet matching the selected mode */
   while (so_curr->side != mode) {
      so_curr = TAILQ_NEXT(so_curr, next);
      /* don't go after the end of the stream */
      if (so_curr == TAILQ_END(&so->pl_head))
         return 0;
   }

   while (offset) {
      /* get the lenght to jump to in the current po */
      tmp_size = (so_curr->po.DATA.len - po_off < offset) ? so_curr->po.DATA.len - po_off : offset;

      /* update the offset */
      po_off += tmp_size;

      /* decrement the total offset by the packet lenght */
      offset -= tmp_size;

      /* update the total movement */
      move += tmp_size;

      /* we have reached the end of the packet, go to the next one */
      if (po_off == so_curr->po.DATA.len) {
         /* search the next packet matching the selected mode */
         do {
            /* don't go after the end of the stream */
            if (TAILQ_NEXT(so_curr, next) != TAILQ_END(&so->pl_head))
               so_curr = TAILQ_NEXT(so_curr, next);
            else
               goto move_end;
            
         } while (so_curr->side != mode);

         /* reset the offset for the packet */
         po_off = 0;
      }
   }
   
move_end:
   /* restore the value in the real stream object */
   switch (mode) {
      case STREAM_SIDE1:
         so->side1.so_curr = so_curr;
         so->side1.po_off = po_off;
         break;
      case STREAM_SIDE2:
         so->side2.so_curr = so_curr;
         so->side2.po_off = po_off;
         break;
   }
   
   return move;
}
Ejemplo n.º 7
0
mx_channel_t *
mx_channel_netconf (mx_sock_session_t *mssp, mx_sock_t *client, int xml_mode)
{
    LIBSSH2_CHANNEL *channel;
    mx_channel_t *mcp;

    mcp = TAILQ_FIRST(&mssp->mss_released);
    if (mcp) {
	mx_log("S%u reusing channel C%u for client S%u",
	       mssp->mss_base.ms_id, mcp->mc_id, client->ms_id);

	TAILQ_REMOVE(&mssp->mss_released, mcp, mc_link);
	TAILQ_INSERT_HEAD(&mssp->mss_channels, mcp, mc_link);

	mcp->mc_state = MSS_RPC_INITIAL;
	mcp->mc_client = client;
	if (mx_mti(client)->mti_set_channel)
	    mx_mti(client)->mti_set_channel(client, mcp->mc_session, mcp);

	return mcp;
    }

    /* Must use blocking IO for channel creation */
    libssh2_session_set_blocking(mssp->mss_session, 1);

    channel = libssh2_channel_open_session(mssp->mss_session);
    if (channel == NULL) {
	mx_log("S%u could not open netconf channel", mssp->mss_base.ms_id);
	return NULL;
    }

    if (!xml_mode) {
	if (libssh2_channel_subsystem(channel, "netconf") != 0) {
	    mx_log("S%u could not open netconf subsystem",
		   mssp->mss_base.ms_id);
	    goto try_xml_mode;
	}
	mx_log("S%u opened netconf subsystem channel to %s",
	       mssp->mss_base.ms_id, mssp->mss_target);
    } else {
	static const char command[] = "xml-mode netconf need-trailer";

    try_xml_mode:
	if (libssh2_channel_process_startup(channel,
					    "exec", sizeof("exec") - 1,
					    command, strlen(command)) != 0) {
	    mx_log("S%u could not open netconf xml-mode",
		   mssp->mss_base.ms_id);
	    libssh2_channel_free(channel);
	    channel = NULL;
	} else {
	    mx_log("S%u opened netconf xml-mode channel to %s",
		   mssp->mss_base.ms_id, mssp->mss_target);
	}
    }

    libssh2_session_set_blocking(mssp->mss_session, 0);

    if (channel == NULL) {
	mx_log("S%u could not open netconf channel", mssp->mss_base.ms_id);
	return NULL;
    }

    mcp = mx_channel_create(mssp, client, channel);
    if (mcp == NULL) {
	/* XXX fail */
	return NULL;
    }

    mx_channel_netconf_send_hello(mcp);
    mx_channel_netconf_read_hello(mcp);

    return mcp;
}
Ejemplo n.º 8
0
int
main(int argc, char * const argv[])
{
    int ch;
    int fdr, fdw;
    off_t t, d, start, len;
    size_t i, j;
    int error, state;
    u_char *buf;
    u_int sectorsize;
    time_t t1, t2;
    struct stat sb;
    u_int n, snapshot = 60;

    while ((ch = getopt(argc, argv, "b:r:w:s:")) != -1) {
        switch (ch) {
        case 'b':
            bigsize = strtoul(optarg, NULL, 0);
            break;
        case 'r':
            rworklist = strdup(optarg);
            if (rworklist == NULL)
                err(1, "Cannot allocate enough memory");
            break;
        case 's':
            snapshot = strtoul(optarg, NULL, 0);
            break;
        case 'w':
            wworklist = strdup(optarg);
            if (wworklist == NULL)
                err(1, "Cannot allocate enough memory");
            break;
        default:
            usage();
            /* NOTREACHED */
        }
    }
    argc -= optind;
    argv += optind;

    if (argc < 1 || argc > 2)
        usage();

    fdr = open(argv[0], O_RDONLY);
    if (fdr < 0)
        err(1, "Cannot open read descriptor %s", argv[0]);

    error = fstat(fdr, &sb);
    if (error < 0)
        err(1, "fstat failed");
    if (S_ISBLK(sb.st_mode) || S_ISCHR(sb.st_mode)) {
        error = ioctl(fdr, DIOCGSECTORSIZE, &sectorsize);
        if (error < 0)
            err(1, "DIOCGSECTORSIZE failed");

        minsize = sectorsize;
        bigsize = (bigsize / sectorsize) * sectorsize;

        error = ioctl(fdr, DIOCGMEDIASIZE, &t);
        if (error < 0)
            err(1, "DIOCGMEDIASIZE failed");
    } else {
        t = sb.st_size;
    }

    if (bigsize < minsize)
        bigsize = minsize;

    for (ch = 0; (bigsize >> ch) > minsize; ch++)
        continue;
    medsize = bigsize >> (ch / 2);
    medsize = (medsize / minsize) * minsize;

    fprintf(stderr, "Bigsize = %zu, medsize = %zu, minsize = %zu\n",
            bigsize, medsize, minsize);

    buf = malloc(bigsize);
    if (buf == NULL)
        err(1, "Cannot allocate %zu bytes buffer", bigsize);

    if (argc > 1) {
        fdw = open(argv[1], O_WRONLY | O_CREAT, DEFFILEMODE);
        if (fdw < 0)
            err(1, "Cannot open write descriptor %s", argv[1]);
        if (ftruncate(fdw, t) < 0)
            err(1, "Cannot truncate output %s to %jd bytes",
                argv[1], (intmax_t)t);
    } else
        fdw = -1;

    if (rworklist != NULL) {
        d = read_worklist(t);
    } else {
        new_lump(0, t, 0);
        d = 0;
    }
    if (wworklist != NULL)
        signal(SIGINT, sighandler);

    t1 = 0;
    start = len = i = state = 0;
    PRINT_HEADER;
    n = 0;
    for (;;) {
        lp = TAILQ_FIRST(&lumps);
        if (lp == NULL)
            break;
        while (lp->len > 0 && !aborting) {
            /* These are only copied for printing stats */
            start = lp->start;
            len = lp->len;
            state = lp->state;

            i = MIN(lp->len, (off_t)bigsize);
            if (lp->state == 1)
                i = MIN(lp->len, (off_t)medsize);
            if (lp->state > 1)
                i = MIN(lp->len, (off_t)minsize);
            time(&t2);
            if (t1 != t2 || lp->len < (off_t)bigsize) {
                PRINT_STATUS(start, i, len, state, d, t);
                t1 = t2;
                if (++n == snapshot) {
                    save_worklist();
                    n = 0;
                }
            }
            if (i == 0) {
                errx(1, "BOGUS i %10jd", (intmax_t)i);
            }
            fflush(stdout);
            j = pread(fdr, buf, i, lp->start);
            if (j == i) {
                d += i;
                if (fdw >= 0)
                    j = pwrite(fdw, buf, i, lp->start);
                else
                    j = i;
                if (j != i)
                    printf("\nWrite error at %jd/%zu\n",
                           lp->start, i);
                lp->start += i;
                lp->len -= i;
                continue;
            }
            printf("\n%jd %zu failed (%s)\n",
                   lp->start, i, strerror(errno));
            if (errno == EINVAL) {
                printf("read() size too big? Try with -b 131072");
                aborting = 1;
            }
            if (errno == ENXIO)
                aborting = 1;
            new_lump(lp->start, i, lp->state + 1);
            lp->start += i;
            lp->len -= i;
        }
        if (aborting) {
            save_worklist();
            return (0);
        }
        TAILQ_REMOVE(&lumps, lp, list);
        free(lp);
    }
    PRINT_STATUS(start, i, len, state, d, t);
    save_worklist();
    printf("\nCompleted\n");
    return (0);
}
Ejemplo n.º 9
0
/*
 * This is a slightly strangely structured routine.  It always puts
 * all the pages for a vnode.  It starts by releasing pages which
 * are clean and simultaneously looks up the smallest offset for a
 * dirty page beloning to the object.  If there is no smallest offset,
 * all pages have been cleaned.  Otherwise, it finds a contiguous range
 * of dirty pages starting from the smallest offset and writes them out.
 * After this the scan is restarted.
 */
int
genfs_do_putpages(struct vnode *vp, off_t startoff, off_t endoff, int flags,
	struct vm_page **busypg)
{
	char databuf[MAXPHYS];
	struct uvm_object *uobj = &vp->v_uobj;
	struct vm_page *pg, *pg_next;
	voff_t smallest;
	voff_t curoff, bufoff;
	off_t eof;
	size_t xfersize;
	int bshift = vp->v_mount->mnt_fs_bshift;
	int bsize = 1 << bshift;
#if 0
	int async = (flags & PGO_SYNCIO) == 0;
#else
	int async = 0;
#endif

 restart:
	/* check if all pages are clean */
	smallest = -1;
	for (pg = TAILQ_FIRST(&uobj->memq); pg; pg = pg_next) {
		pg_next = TAILQ_NEXT(pg, listq.queue);

		/*
		 * XXX: this is not correct at all.  But it's based on
		 * assumptions we can make when accessing the pages
		 * only through the file system and not through the
		 * virtual memory subsystem.  Well, at least I hope
		 * so ;)
		 */
		KASSERT((pg->flags & PG_BUSY) == 0);

		/* If we can just dump the page, do so */
		if (pg->flags & PG_CLEAN || flags & PGO_FREE) {
			uvm_pagefree(pg);
			continue;
		}

		if (pg->offset < smallest || smallest == -1)
			smallest = pg->offset;
	}

	/* all done? */
	if (TAILQ_EMPTY(&uobj->memq)) {
		vp->v_iflag &= ~VI_ONWORKLST;
		mutex_exit(&uobj->vmobjlock);
		return 0;
	}

	/* we need to flush */
	GOP_SIZE(vp, vp->v_writesize, &eof, 0);
	for (curoff = smallest; curoff < eof; curoff += PAGE_SIZE) {
		void *curva;

		if (curoff - smallest >= MAXPHYS)
			break;
		pg = uvm_pagelookup(uobj, curoff);
		if (pg == NULL)
			break;

		/* XXX: see comment about above KASSERT */
		KASSERT((pg->flags & PG_BUSY) == 0);

		curva = databuf + (curoff-smallest);
		memcpy(curva, (void *)pg->uanon, PAGE_SIZE);
		rumpvm_enterva((vaddr_t)curva, pg);

		pg->flags |= PG_CLEAN;
	}
	KASSERT(curoff > smallest);

	mutex_exit(&uobj->vmobjlock);

	/* then we write */
	for (bufoff = 0; bufoff < MIN(curoff-smallest,eof); bufoff+=xfersize) {
		struct buf *bp;
		struct vnode *devvp;
		daddr_t bn, lbn;
		int run, error;

		lbn = (smallest + bufoff) >> bshift;
		error = VOP_BMAP(vp, lbn, &devvp, &bn, &run);
		if (error)
			panic("%s: VOP_BMAP failed: %d", __func__, error);

		xfersize = MIN(((lbn+1+run) << bshift) - (smallest+bufoff),
		     curoff - (smallest+bufoff));

		/*
		 * We might run across blocks which aren't allocated yet.
		 * A reason might be e.g. the write operation being still
		 * in the kernel page cache while truncate has already
		 * enlarged the file.  So just ignore those ranges.
		 */
		if (bn == -1)
			continue;

		bp = getiobuf(vp, true);

		/* only write max what we are allowed to write */
		bp->b_bcount = xfersize;
		if (smallest + bufoff + xfersize > eof)
			bp->b_bcount -= (smallest+bufoff+xfersize) - eof;
		bp->b_bcount = (bp->b_bcount + DEV_BSIZE-1) & ~(DEV_BSIZE-1);

		KASSERT(bp->b_bcount > 0);
		KASSERT(smallest >= 0);

		DPRINTF(("putpages writing from %x to %x (vp size %x)\n",
		    (int)(smallest + bufoff),
		    (int)(smallest + bufoff + bp->b_bcount),
		    (int)eof));

		bp->b_bufsize = round_page(bp->b_bcount);
		bp->b_lblkno = 0;
		bp->b_blkno = bn + (((smallest+bufoff)&(bsize-1))>>DEV_BSHIFT);
		bp->b_data = databuf + bufoff;
		bp->b_flags = B_WRITE;
		bp->b_cflags |= BC_BUSY;

		if (async) {
			bp->b_flags |= B_ASYNC;
			bp->b_iodone = uvm_aio_biodone;
		}

		vp->v_numoutput++;
		VOP_STRATEGY(devvp, bp);
		if (bp->b_error)
			panic("%s: VOP_STRATEGY lazy bum %d",
			    __func__, bp->b_error);
		if (!async)
			putiobuf(bp);
	}
	rumpvm_flushva();

	mutex_enter(&uobj->vmobjlock);
	goto restart;
}
Ejemplo n.º 10
0
int
pf_map_addr(sa_family_t af, struct pf_rule *r, struct pf_addr *saddr,
    struct pf_addr *naddr, struct pf_addr *init_addr, struct pf_src_node **sn)
{
	struct pf_pool		*rpool = &r->rpool;
	struct pf_addr		*raddr = NULL, *rmask = NULL;

	if (*sn == NULL && r->rpool.opts & PF_POOL_STICKYADDR &&
	    (r->rpool.opts & PF_POOL_TYPEMASK) != PF_POOL_NONE) {
		*sn = pf_find_src_node(saddr, r, af, 0);
		if (*sn != NULL && !PF_AZERO(&(*sn)->raddr, af)) {
			PF_ACPY(naddr, &(*sn)->raddr, af);
			if (V_pf_status.debug >= PF_DEBUG_MISC) {
				printf("pf_map_addr: src tracking maps ");
				pf_print_host(saddr, 0, af);
				printf(" to ");
				pf_print_host(naddr, 0, af);
				printf("\n");
			}
			return (0);
		}
	}

	if (rpool->cur->addr.type == PF_ADDR_NOROUTE)
		return (1);
	if (rpool->cur->addr.type == PF_ADDR_DYNIFTL) {
		switch (af) {
#ifdef INET
		case AF_INET:
			if (rpool->cur->addr.p.dyn->pfid_acnt4 < 1 &&
			    (rpool->opts & PF_POOL_TYPEMASK) !=
			    PF_POOL_ROUNDROBIN)
				return (1);
			 raddr = &rpool->cur->addr.p.dyn->pfid_addr4;
			 rmask = &rpool->cur->addr.p.dyn->pfid_mask4;
			break;
#endif /* INET */
#ifdef INET6
		case AF_INET6:
			if (rpool->cur->addr.p.dyn->pfid_acnt6 < 1 &&
			    (rpool->opts & PF_POOL_TYPEMASK) !=
			    PF_POOL_ROUNDROBIN)
				return (1);
			raddr = &rpool->cur->addr.p.dyn->pfid_addr6;
			rmask = &rpool->cur->addr.p.dyn->pfid_mask6;
			break;
#endif /* INET6 */
		}
	} else if (rpool->cur->addr.type == PF_ADDR_TABLE) {
		if ((rpool->opts & PF_POOL_TYPEMASK) != PF_POOL_ROUNDROBIN)
			return (1); /* unsupported */
	} else {
		raddr = &rpool->cur->addr.v.a.addr;
		rmask = &rpool->cur->addr.v.a.mask;
	}

	switch (rpool->opts & PF_POOL_TYPEMASK) {
	case PF_POOL_NONE:
		PF_ACPY(naddr, raddr, af);
		break;
	case PF_POOL_BITMASK:
		PF_POOLMASK(naddr, raddr, rmask, saddr, af);
		break;
	case PF_POOL_RANDOM:
		if (init_addr != NULL && PF_AZERO(init_addr, af)) {
			switch (af) {
#ifdef INET
			case AF_INET:
				rpool->counter.addr32[0] = htonl(arc4random());
				break;
#endif /* INET */
#ifdef INET6
			case AF_INET6:
				if (rmask->addr32[3] != 0xffffffff)
					rpool->counter.addr32[3] =
					    htonl(arc4random());
				else
					break;
				if (rmask->addr32[2] != 0xffffffff)
					rpool->counter.addr32[2] =
					    htonl(arc4random());
				else
					break;
				if (rmask->addr32[1] != 0xffffffff)
					rpool->counter.addr32[1] =
					    htonl(arc4random());
				else
					break;
				if (rmask->addr32[0] != 0xffffffff)
					rpool->counter.addr32[0] =
					    htonl(arc4random());
				break;
#endif /* INET6 */
			}
			PF_POOLMASK(naddr, raddr, rmask, &rpool->counter, af);
			PF_ACPY(init_addr, naddr, af);

		} else {
			PF_AINC(&rpool->counter, af);
			PF_POOLMASK(naddr, raddr, rmask, &rpool->counter, af);
		}
		break;
	case PF_POOL_SRCHASH:
	    {
		unsigned char hash[16];

		pf_hash(saddr, (struct pf_addr *)&hash, &rpool->key, af);
		PF_POOLMASK(naddr, raddr, rmask, (struct pf_addr *)&hash, af);
		break;
	    }
	case PF_POOL_ROUNDROBIN:
	    {
		struct pf_pooladdr *acur = rpool->cur;

		/*
		 * XXXGL: in the round-robin case we need to store
		 * the round-robin machine state in the rule, thus
		 * forwarding thread needs to modify rule.
		 *
		 * This is done w/o locking, because performance is assumed
		 * more important than round-robin precision.
		 *
		 * In the simpliest case we just update the "rpool->cur"
		 * pointer. However, if pool contains tables or dynamic
		 * addresses, then "tblidx" is also used to store machine
		 * state. Since "tblidx" is int, concurrent access to it can't
		 * lead to inconsistence, only to lost of precision.
		 *
		 * Things get worse, if table contains not hosts, but
		 * prefixes. In this case counter also stores machine state,
		 * and for IPv6 address, counter can't be updated atomically.
		 * Probably, using round-robin on a table containing IPv6
		 * prefixes (or even IPv4) would cause a panic.
		 */

		if (rpool->cur->addr.type == PF_ADDR_TABLE) {
			if (!pfr_pool_get(rpool->cur->addr.p.tbl,
			    &rpool->tblidx, &rpool->counter, af))
				goto get_addr;
		} else if (rpool->cur->addr.type == PF_ADDR_DYNIFTL) {
			if (!pfr_pool_get(rpool->cur->addr.p.dyn->pfid_kt,
			    &rpool->tblidx, &rpool->counter, af))
				goto get_addr;
		} else if (pf_match_addr(0, raddr, rmask, &rpool->counter, af))
			goto get_addr;

	try_next:
		if (TAILQ_NEXT(rpool->cur, entries) == NULL)
			rpool->cur = TAILQ_FIRST(&rpool->list);
		else
			rpool->cur = TAILQ_NEXT(rpool->cur, entries);
		if (rpool->cur->addr.type == PF_ADDR_TABLE) {
			rpool->tblidx = -1;
			if (pfr_pool_get(rpool->cur->addr.p.tbl,
			    &rpool->tblidx, &rpool->counter, af)) {
				/* table contains no address of type 'af' */
				if (rpool->cur != acur)
					goto try_next;
				return (1);
			}
		} else if (rpool->cur->addr.type == PF_ADDR_DYNIFTL) {
			rpool->tblidx = -1;
			if (pfr_pool_get(rpool->cur->addr.p.dyn->pfid_kt,
			    &rpool->tblidx, &rpool->counter, af)) {
				/* table contains no address of type 'af' */
				if (rpool->cur != acur)
					goto try_next;
				return (1);
			}
		} else {
			raddr = &rpool->cur->addr.v.a.addr;
			rmask = &rpool->cur->addr.v.a.mask;
			PF_ACPY(&rpool->counter, raddr, af);
		}

	get_addr:
		PF_ACPY(naddr, &rpool->counter, af);
		if (init_addr != NULL && PF_AZERO(init_addr, af))
			PF_ACPY(init_addr, naddr, af);
		PF_AINC(&rpool->counter, af);
		break;
	    }
	}
	if (*sn != NULL)
		PF_ACPY(&(*sn)->raddr, naddr, af);

	if (V_pf_status.debug >= PF_DEBUG_MISC &&
	    (rpool->opts & PF_POOL_TYPEMASK) != PF_POOL_NONE) {
		printf("pf_map_addr: selected address ");
		pf_print_host(naddr, 0, af);
		printf("\n");
	}

	return (0);
}
Ejemplo n.º 11
0
/*
 * Read proc's from memory file into buffer bp, which has space to hold
 * at most maxcnt procs.
 */
static int
kvm_proclist(kvm_t *kd, int what, int arg, struct proc *p,
    struct kinfo_proc *bp, int maxcnt)
{
	int cnt = 0;
	struct kinfo_proc kinfo_proc, *kp;
	struct pgrp pgrp;
	struct session sess;
	struct cdev t_cdev;
	struct tty tty;
	struct vmspace vmspace;
	struct sigacts sigacts;
#if 0
	struct pstats pstats;
#endif
	struct ucred ucred;
	struct prison pr;
	struct thread mtd;
	struct proc proc;
	struct proc pproc;
	struct sysentvec sysent;
	char svname[KI_EMULNAMELEN];

	kp = &kinfo_proc;
	kp->ki_structsize = sizeof(kinfo_proc);
	/*
	 * Loop on the processes. this is completely broken because we need to be
	 * able to loop on the threads and merge the ones that are the same process some how.
	 */
	for (; cnt < maxcnt && p != NULL; p = LIST_NEXT(&proc, p_list)) {
		memset(kp, 0, sizeof *kp);
		if (KREAD(kd, (u_long)p, &proc)) {
			_kvm_err(kd, kd->program, "can't read proc at %p", p);
			return (-1);
		}
		if (proc.p_state == PRS_NEW)
			continue;
		if (proc.p_state != PRS_ZOMBIE) {
			if (KREAD(kd, (u_long)TAILQ_FIRST(&proc.p_threads),
			    &mtd)) {
				_kvm_err(kd, kd->program,
				    "can't read thread at %p",
				    TAILQ_FIRST(&proc.p_threads));
				return (-1);
			}
		}
		if (KREAD(kd, (u_long)proc.p_ucred, &ucred) == 0) {
			kp->ki_ruid = ucred.cr_ruid;
			kp->ki_svuid = ucred.cr_svuid;
			kp->ki_rgid = ucred.cr_rgid;
			kp->ki_svgid = ucred.cr_svgid;
			kp->ki_cr_flags = ucred.cr_flags;
			if (ucred.cr_ngroups > KI_NGROUPS) {
				kp->ki_ngroups = KI_NGROUPS;
				kp->ki_cr_flags |= KI_CRF_GRP_OVERFLOW;
			} else
				kp->ki_ngroups = ucred.cr_ngroups;
			kvm_read(kd, (u_long)ucred.cr_groups, kp->ki_groups,
			    kp->ki_ngroups * sizeof(gid_t));
			kp->ki_uid = ucred.cr_uid;
			if (ucred.cr_prison != NULL) {
				if (KREAD(kd, (u_long)ucred.cr_prison, &pr)) {
					_kvm_err(kd, kd->program,
					    "can't read prison at %p",
					    ucred.cr_prison);
					return (-1);
				}
				kp->ki_jid = pr.pr_id;
			}
		}

		switch(what & ~KERN_PROC_INC_THREAD) {

		case KERN_PROC_GID:
			if (kp->ki_groups[0] != (gid_t)arg)
				continue;
			break;

		case KERN_PROC_PID:
			if (proc.p_pid != (pid_t)arg)
				continue;
			break;

		case KERN_PROC_RGID:
			if (kp->ki_rgid != (gid_t)arg)
				continue;
			break;

		case KERN_PROC_UID:
			if (kp->ki_uid != (uid_t)arg)
				continue;
			break;

		case KERN_PROC_RUID:
			if (kp->ki_ruid != (uid_t)arg)
				continue;
			break;
		}
		/*
		 * We're going to add another proc to the set.  If this
		 * will overflow the buffer, assume the reason is because
		 * nprocs (or the proc list) is corrupt and declare an error.
		 */
		if (cnt >= maxcnt) {
			_kvm_err(kd, kd->program, "nprocs corrupt");
			return (-1);
		}
		/*
		 * gather kinfo_proc
		 */
		kp->ki_paddr = p;
		kp->ki_addr = 0;	/* XXX uarea */
		/* kp->ki_kstack = proc.p_thread.td_kstack; XXXKSE */
		kp->ki_args = proc.p_args;
		kp->ki_tracep = proc.p_tracevp;
		kp->ki_textvp = proc.p_textvp;
		kp->ki_fd = proc.p_fd;
		kp->ki_vmspace = proc.p_vmspace;
		if (proc.p_sigacts != NULL) {
			if (KREAD(kd, (u_long)proc.p_sigacts, &sigacts)) {
				_kvm_err(kd, kd->program,
				    "can't read sigacts at %p", proc.p_sigacts);
				return (-1);
			}
			kp->ki_sigignore = sigacts.ps_sigignore;
			kp->ki_sigcatch = sigacts.ps_sigcatch;
		}
#if 0
		if ((proc.p_flag & P_INMEM) && proc.p_stats != NULL) {
			if (KREAD(kd, (u_long)proc.p_stats, &pstats)) {
				_kvm_err(kd, kd->program,
				    "can't read stats at %x", proc.p_stats);
				return (-1);
			}
			kp->ki_start = pstats.p_start;

			/*
			 * XXX: The times here are probably zero and need
			 * to be calculated from the raw data in p_rux and
			 * p_crux.
			 */
			kp->ki_rusage = pstats.p_ru;
			kp->ki_childstime = pstats.p_cru.ru_stime;
			kp->ki_childutime = pstats.p_cru.ru_utime;
			/* Some callers want child-times in a single value */
			timeradd(&kp->ki_childstime, &kp->ki_childutime,
			    &kp->ki_childtime);
		}
#endif
		if (proc.p_oppid)
			kp->ki_ppid = proc.p_oppid;
		else if (proc.p_pptr) {
			if (KREAD(kd, (u_long)proc.p_pptr, &pproc)) {
				_kvm_err(kd, kd->program,
				    "can't read pproc at %p", proc.p_pptr);
				return (-1);
			}
			kp->ki_ppid = pproc.p_pid;
		} else
			kp->ki_ppid = 0;
		if (proc.p_pgrp == NULL)
			goto nopgrp;
		if (KREAD(kd, (u_long)proc.p_pgrp, &pgrp)) {
			_kvm_err(kd, kd->program, "can't read pgrp at %p",
				 proc.p_pgrp);
			return (-1);
		}
		kp->ki_pgid = pgrp.pg_id;
		kp->ki_jobc = pgrp.pg_jobc;
		if (KREAD(kd, (u_long)pgrp.pg_session, &sess)) {
			_kvm_err(kd, kd->program, "can't read session at %p",
				pgrp.pg_session);
			return (-1);
		}
		kp->ki_sid = sess.s_sid;
		(void)memcpy(kp->ki_login, sess.s_login,
						sizeof(kp->ki_login));
		kp->ki_kiflag = sess.s_ttyvp ? KI_CTTY : 0;
		if (sess.s_leader == p)
			kp->ki_kiflag |= KI_SLEADER;
		if ((proc.p_flag & P_CONTROLT) && sess.s_ttyp != NULL) {
			if (KREAD(kd, (u_long)sess.s_ttyp, &tty)) {
				_kvm_err(kd, kd->program,
					 "can't read tty at %p", sess.s_ttyp);
				return (-1);
			}
			if (tty.t_dev != NULL) {
				if (KREAD(kd, (u_long)tty.t_dev, &t_cdev)) {
					_kvm_err(kd, kd->program,
						 "can't read cdev at %p",
						tty.t_dev);
					return (-1);
				}
#if 0
				kp->ki_tdev = t_cdev.si_udev;
#else
				kp->ki_tdev = NODEV;
#endif
			}
			if (tty.t_pgrp != NULL) {
				if (KREAD(kd, (u_long)tty.t_pgrp, &pgrp)) {
					_kvm_err(kd, kd->program,
						 "can't read tpgrp at %p",
						tty.t_pgrp);
					return (-1);
				}
				kp->ki_tpgid = pgrp.pg_id;
			} else
				kp->ki_tpgid = -1;
			if (tty.t_session != NULL) {
				if (KREAD(kd, (u_long)tty.t_session, &sess)) {
					_kvm_err(kd, kd->program,
					    "can't read session at %p",
					    tty.t_session);
					return (-1);
				}
				kp->ki_tsid = sess.s_sid;
			}
		} else {
nopgrp:
			kp->ki_tdev = NODEV;
		}
		if ((proc.p_state != PRS_ZOMBIE) && mtd.td_wmesg)
			(void)kvm_read(kd, (u_long)mtd.td_wmesg,
			    kp->ki_wmesg, WMESGLEN);

		(void)kvm_read(kd, (u_long)proc.p_vmspace,
		    (char *)&vmspace, sizeof(vmspace));
		kp->ki_size = vmspace.vm_map.size;
		/*
		 * Approximate the kernel's method of calculating
		 * this field.
		 */
#define		pmap_resident_count(pm) ((pm)->pm_stats.resident_count)
		kp->ki_rssize = pmap_resident_count(&vmspace.vm_pmap);
		kp->ki_swrss = vmspace.vm_swrss;
		kp->ki_tsize = vmspace.vm_tsize;
		kp->ki_dsize = vmspace.vm_dsize;
		kp->ki_ssize = vmspace.vm_ssize;

		switch (what & ~KERN_PROC_INC_THREAD) {

		case KERN_PROC_PGRP:
			if (kp->ki_pgid != (pid_t)arg)
				continue;
			break;

		case KERN_PROC_SESSION:
			if (kp->ki_sid != (pid_t)arg)
				continue;
			break;

		case KERN_PROC_TTY:
			if ((proc.p_flag & P_CONTROLT) == 0 ||
			     kp->ki_tdev != (dev_t)arg)
				continue;
			break;
		}
		if (proc.p_comm[0] != 0)
			strlcpy(kp->ki_comm, proc.p_comm, MAXCOMLEN);
		(void)kvm_read(kd, (u_long)proc.p_sysent, (char *)&sysent,
		    sizeof(sysent));
		(void)kvm_read(kd, (u_long)sysent.sv_name, (char *)&svname,
		    sizeof(svname));
		if (svname[0] != 0)
			strlcpy(kp->ki_emul, svname, KI_EMULNAMELEN);
		if ((proc.p_state != PRS_ZOMBIE) &&
		    (mtd.td_blocked != 0)) {
			kp->ki_kiflag |= KI_LOCKBLOCK;
			if (mtd.td_lockname)
				(void)kvm_read(kd,
				    (u_long)mtd.td_lockname,
				    kp->ki_lockname, LOCKNAMELEN);
			kp->ki_lockname[LOCKNAMELEN] = 0;
		}
		kp->ki_runtime = cputick2usec(proc.p_rux.rux_runtime);
		kp->ki_pid = proc.p_pid;
		kp->ki_siglist = proc.p_siglist;
		SIGSETOR(kp->ki_siglist, mtd.td_siglist);
		kp->ki_sigmask = mtd.td_sigmask;
		kp->ki_xstat = KW_EXITCODE(proc.p_xexit, proc.p_xsig);
		kp->ki_acflag = proc.p_acflag;
		kp->ki_lock = proc.p_lock;
		if (proc.p_state != PRS_ZOMBIE) {
			kp->ki_swtime = (ticks - proc.p_swtick) / hz;
			kp->ki_flag = proc.p_flag;
			kp->ki_sflag = 0;
			kp->ki_nice = proc.p_nice;
			kp->ki_traceflag = proc.p_traceflag;
			if (proc.p_state == PRS_NORMAL) {
				if (TD_ON_RUNQ(&mtd) ||
				    TD_CAN_RUN(&mtd) ||
				    TD_IS_RUNNING(&mtd)) {
					kp->ki_stat = SRUN;
				} else if (mtd.td_state ==
				    TDS_INHIBITED) {
					if (P_SHOULDSTOP(&proc)) {
						kp->ki_stat = SSTOP;
					} else if (
					    TD_IS_SLEEPING(&mtd)) {
						kp->ki_stat = SSLEEP;
					} else if (TD_ON_LOCK(&mtd)) {
						kp->ki_stat = SLOCK;
					} else {
						kp->ki_stat = SWAIT;
					}
				}
			} else {
				kp->ki_stat = SIDL;
			}
			/* Stuff from the thread */
			kp->ki_pri.pri_level = mtd.td_priority;
			kp->ki_pri.pri_native = mtd.td_base_pri;
			kp->ki_lastcpu = mtd.td_lastcpu;
			kp->ki_wchan = mtd.td_wchan;
			kp->ki_oncpu = mtd.td_oncpu;
			if (mtd.td_name[0] != '\0')
				strlcpy(kp->ki_tdname, mtd.td_name, sizeof(kp->ki_tdname));
			kp->ki_pctcpu = 0;
			kp->ki_rqindex = 0;

			/*
			 * Note: legacy fields; wraps at NO_CPU_OLD or the
			 * old max CPU value as appropriate
			 */
			if (mtd.td_lastcpu == NOCPU)
				kp->ki_lastcpu_old = NOCPU_OLD;
			else if (mtd.td_lastcpu > MAXCPU_OLD)
				kp->ki_lastcpu_old = MAXCPU_OLD;
			else
				kp->ki_lastcpu_old = mtd.td_lastcpu;

			if (mtd.td_oncpu == NOCPU)
				kp->ki_oncpu_old = NOCPU_OLD;
			else if (mtd.td_oncpu > MAXCPU_OLD)
				kp->ki_oncpu_old = MAXCPU_OLD;
			else
				kp->ki_oncpu_old = mtd.td_oncpu;
		} else {
			kp->ki_stat = SZOMB;
		}
		kp->ki_tdev_freebsd11 = kp->ki_tdev; /* truncate */
		bcopy(&kinfo_proc, bp, sizeof(kinfo_proc));
		++bp;
		++cnt;
	}
	return (cnt);
}
Ejemplo n.º 12
0
static struct pf_rule *
pf_match_translation(struct pf_pdesc *pd, struct mbuf *m, int off,
    int direction, struct pfi_kif *kif, struct pf_addr *saddr, u_int16_t sport,
    struct pf_addr *daddr, uint16_t dport, int rs_num,
    struct pf_anchor_stackframe *anchor_stack)
{
	struct pf_rule		*r, *rm = NULL;
	struct pf_ruleset	*ruleset = NULL;
	int			 tag = -1;
	int			 rtableid = -1;
	int			 asd = 0;

	r = TAILQ_FIRST(pf_main_ruleset.rules[rs_num].active.ptr);
	while (r && rm == NULL) {
		struct pf_rule_addr	*src = NULL, *dst = NULL;
		struct pf_addr_wrap	*xdst = NULL;

		if (r->action == PF_BINAT && direction == PF_IN) {
			src = &r->dst;
			if (r->rpool.cur != NULL)
				xdst = &r->rpool.cur->addr;
		} else {
			src = &r->src;
			dst = &r->dst;
		}

		r->evaluations++;
		if (pfi_kif_match(r->kif, kif) == r->ifnot)
			r = r->skip[PF_SKIP_IFP].ptr;
		else if (r->direction && r->direction != direction)
			r = r->skip[PF_SKIP_DIR].ptr;
		else if (r->af && r->af != pd->af)
			r = r->skip[PF_SKIP_AF].ptr;
		else if (r->proto && r->proto != pd->proto)
			r = r->skip[PF_SKIP_PROTO].ptr;
		else if (PF_MISMATCHAW(&src->addr, saddr, pd->af,
		    src->neg, kif, M_GETFIB(m)))
			r = r->skip[src == &r->src ? PF_SKIP_SRC_ADDR :
			    PF_SKIP_DST_ADDR].ptr;
		else if (src->port_op && !pf_match_port(src->port_op,
		    src->port[0], src->port[1], sport))
			r = r->skip[src == &r->src ? PF_SKIP_SRC_PORT :
			    PF_SKIP_DST_PORT].ptr;
		else if (dst != NULL &&
		    PF_MISMATCHAW(&dst->addr, daddr, pd->af, dst->neg, NULL,
		    M_GETFIB(m)))
			r = r->skip[PF_SKIP_DST_ADDR].ptr;
		else if (xdst != NULL && PF_MISMATCHAW(xdst, daddr, pd->af,
		    0, NULL, M_GETFIB(m)))
			r = TAILQ_NEXT(r, entries);
		else if (dst != NULL && dst->port_op &&
		    !pf_match_port(dst->port_op, dst->port[0],
		    dst->port[1], dport))
			r = r->skip[PF_SKIP_DST_PORT].ptr;
		else if (r->match_tag && !pf_match_tag(m, r, &tag,
		    pd->pf_mtag ? pd->pf_mtag->tag : 0))
			r = TAILQ_NEXT(r, entries);
		else if (r->os_fingerprint != PF_OSFP_ANY && (pd->proto !=
		    IPPROTO_TCP || !pf_osfp_match(pf_osfp_fingerprint(pd, m,
		    off, pd->hdr.tcp), r->os_fingerprint)))
			r = TAILQ_NEXT(r, entries);
		else {
			if (r->tag)
				tag = r->tag;
			if (r->rtableid >= 0)
				rtableid = r->rtableid;
			if (r->anchor == NULL) {
				rm = r;
			} else
				pf_step_into_anchor(anchor_stack, &asd,
				    &ruleset, rs_num, &r, NULL, NULL);
		}
		if (r == NULL)
			pf_step_out_of_anchor(anchor_stack, &asd, &ruleset,
			    rs_num, &r, NULL, NULL);
	}

	if (tag > 0 && pf_tag_packet(m, pd, tag))
		return (NULL);
	if (rtableid >= 0)
		M_SETFIB(m, rtableid);

	if (rm != NULL && (rm->action == PF_NONAT ||
	    rm->action == PF_NORDR || rm->action == PF_NOBINAT))
		return (NULL);
	return (rm);
}
Ejemplo n.º 13
0
bool
rc_service_daemons_crashed(const char *service)
{
	char dirpath[PATH_MAX];
	DIR *dp;
	struct dirent *d;
	char *path = dirpath;
	FILE *fp;
	char *line = NULL;
	size_t len = 0;
	char **argv = NULL;
	char *exec = NULL;
	char *name = NULL;
	char *pidfile = NULL;
	pid_t pid = 0;
	RC_PIDLIST *pids;
	RC_PID *p1;
	RC_PID *p2;
	char *p;
	char *token;
	bool retval = false;
	RC_STRINGLIST *list = NULL;
	RC_STRING *s;
	size_t i;
	char *ch_root;
	char *spidfile;

	path += snprintf(dirpath, sizeof(dirpath), RC_SVCDIR "/daemons/%s",
	    basename_c(service));

	if (!(dp = opendir(dirpath)))
		return false;

	while ((d = readdir(dp))) {
		if (d->d_name[0] == '.')
			continue;

		snprintf(path, sizeof(dirpath) - (path - dirpath), "/%s",
		    d->d_name);
		fp = fopen(dirpath, "r");
		if (!fp)
			break;

		while ((rc_getline(&line, &len, fp))) {
			p = line;
			if ((token = strsep(&p, "=")) == NULL || !p)
				continue;

			if (!*p)
				continue;

			if (strcmp(token, "exec") == 0) {
				if (exec)
					free(exec);
				exec = xstrdup(p);
			} else if (strncmp(token, "argv_", 5) == 0) {
				if (!list)
					list = rc_stringlist_new();
				rc_stringlist_add(list, p);
			} else if (strcmp(token, "name") == 0) {
				if (name)
					free(name);
				name = xstrdup(p);
			} else if (strcmp(token, "pidfile") == 0) {
				pidfile = xstrdup(p);
				break;
			}
		}
		fclose(fp);

		ch_root = rc_service_value_get(basename_c(service), "chroot");
		spidfile = pidfile;
		if (ch_root && pidfile) {
			spidfile = xmalloc(strlen(ch_root) + strlen(pidfile) + 1);
			strcpy(spidfile, ch_root);
			strcat(spidfile, pidfile);
			free(pidfile);
			pidfile = spidfile;
		}

		pid = 0;
		if (pidfile) {
			retval = true;
			if ((fp = fopen(pidfile, "r"))) {
				if (fscanf(fp, "%d", &pid) == 1)
					retval = false;
				fclose(fp);
			}
			free(pidfile);
			pidfile = NULL;

			/* We have the pid, so no need to match
			   on exec or name */
			free(exec);
			exec = NULL;
			free(name);
			name = NULL;
		} else {
			if (exec) {
				if (!list)
					list = rc_stringlist_new();
				if (!TAILQ_FIRST(list))
					rc_stringlist_add(list, exec);

				free(exec);
				exec = NULL;
			}

			if (list) {
				/* We need to flatten our linked list
				   into an array */
				i = 0;
				TAILQ_FOREACH(s, list, entries)
				    i++;
				argv = xmalloc(sizeof(char *) * (i + 1));
				i = 0;
				TAILQ_FOREACH(s, list, entries)
				    argv[i++] = s->value;
				argv[i] = '\0';
			}
		}

		if (!retval) {
			if (pid != 0) {
				if (kill(pid, 0) == -1 && errno == ESRCH)
					retval = true;
			} else if ((pids = rc_find_pids(exec,
				    (const char *const *)argv,
				    0, pid)))
			{
				p1 = LIST_FIRST(pids);
				while (p1) {
					p2 = LIST_NEXT(p1, entries);
					free(p1);
					p1 = p2;
				}
				free(pids);
			} else
				retval = true;
		}
		rc_stringlist_free(list);
		list = NULL;
		free(argv);
		argv = NULL;
		free(exec);
		exec = NULL;
		free(name);
		name = NULL;
		if (retval)
			break;
	}
	closedir(dp);
	free(line);

	return retval;
}
Ejemplo n.º 14
0
/*
 * Execute ready systimers.  Called directly from the platform-specific
 * one-shot timer clock interrupt (e.g. clkintr()) or via an IPI.  May
 * be called simultaniously on multiple cpus and always operations on 
 * the current cpu's queue.  Systimer functions are responsible for calling
 * hardclock, statclock, and other finely-timed routines.
 */
void
systimer_intr(sysclock_t *timep, int in_ipi, struct intrframe *frame)
{
    globaldata_t gd = mycpu;
    sysclock_t time = *timep;
    systimer_t info;

    if (gd->gd_syst_nest)
	return;

    crit_enter();
    ++gd->gd_syst_nest;
    while ((info = TAILQ_FIRST(&gd->gd_systimerq)) != NULL) {
	/*
	 * If we haven't reached the requested time, tell the cputimer
	 * how much is left and break out.
	 */
	if ((int)(info->time - time) > 0) {
	    cputimer_intr_reload(info->time - time);
	    break;
	}

	/*
	 * Dequeue and execute, detect a loss of the systimer.  Note
	 * that the in-progress systimer pointer can only be used to
	 * detect a loss of the systimer, it is only useful within
	 * this code sequence and becomes stale otherwise.
	 */
	info->flags &= ~SYSTF_ONQUEUE;
	TAILQ_REMOVE(info->queue, info, node);
	gd->gd_systimer_inprog = info;
	crit_exit();
	info->func(info, in_ipi, frame);
	crit_enter();

	/*
	 * The caller may deleted or even re-queue the systimer itself
	 * with a delete/add sequence.  If the caller does not mess with
	 * the systimer we will requeue the periodic interval automatically.
	 *
	 * If this is a non-queued periodic interrupt, do not allow multiple
	 * events to build up (used for things like the callout timer to
	 * prevent premature timeouts due to long interrupt disablements,
	 * BIOS 8254 glitching, and so forth).  However, we still want to
	 * keep things synchronized between cpus for efficient handling of
	 * the timer interrupt so jump in multiples of the periodic rate.
	 */
	if (gd->gd_systimer_inprog == info && info->periodic) {
	    if (info->which != sys_cputimer) {
		info->periodic = sys_cputimer->fromhz(info->freq);
		info->which = sys_cputimer;
	    }
	    info->time += info->periodic;
	    if ((info->flags & SYSTF_NONQUEUED) &&
		(int)(info->time - time) <= 0
	    ) {
		info->time += ((time - info->time + info->periodic - 1) / 
				info->periodic) * info->periodic;
	    }
	    systimer_add(info);
	}
	gd->gd_systimer_inprog = NULL;
    }
    --gd->gd_syst_nest;
    crit_exit();
}
Ejemplo n.º 15
0
int
cmd_swap_pane_exec(struct cmd *self, struct cmd_ctx *ctx)
{
	struct args		*args = self->args;
	struct winlink		*src_wl, *dst_wl;
	struct window		*src_w, *dst_w;
	struct window_pane	*tmp_wp, *src_wp, *dst_wp;
	struct layout_cell	*src_lc, *dst_lc;
	u_int			 sx, sy, xoff, yoff;

	dst_wl = cmd_find_pane(ctx, args_get(args, 't'), NULL, &dst_wp);
	if (dst_wl == NULL)
		return (-1);
	dst_w = dst_wl->window;

	if (!args_has(args, 's')) {
		src_w = dst_w;
		if (args_has(self->args, 'D')) {
			src_wp = TAILQ_NEXT(dst_wp, entry);
			if (src_wp == NULL)
				src_wp = TAILQ_FIRST(&dst_w->panes);
		} else if (args_has(self->args, 'U')) {
			src_wp = TAILQ_PREV(dst_wp, window_panes, entry);
			if (src_wp == NULL)
				src_wp = TAILQ_LAST(&dst_w->panes, window_panes);
		} else
			return (0);
	} else {
		src_wl = cmd_find_pane(ctx, args_get(args, 's'), NULL, &src_wp);
		if (src_wl == NULL)
			return (-1);
		src_w = src_wl->window;
	}

	if (src_wp == dst_wp)
		return (0);

	tmp_wp = TAILQ_PREV(dst_wp, window_panes, entry);
	TAILQ_REMOVE(&dst_w->panes, dst_wp, entry);
	TAILQ_REPLACE(&src_w->panes, src_wp, dst_wp, entry);
	if (tmp_wp == src_wp)
		tmp_wp = dst_wp;
	if (tmp_wp == NULL)
		TAILQ_INSERT_HEAD(&dst_w->panes, src_wp, entry);
	else
		TAILQ_INSERT_AFTER(&dst_w->panes, tmp_wp, src_wp, entry);

	src_lc = src_wp->layout_cell;
	dst_lc = dst_wp->layout_cell;
	src_lc->wp = dst_wp;
	dst_wp->layout_cell = src_lc;
	dst_lc->wp = src_wp;
	src_wp->layout_cell = dst_lc;

	src_wp->window = dst_w;
	dst_wp->window = src_w;

	sx = src_wp->sx; sy = src_wp->sy;
	xoff = src_wp->xoff; yoff = src_wp->yoff;
	src_wp->xoff = dst_wp->xoff; src_wp->yoff = dst_wp->yoff;
	window_pane_resize(src_wp, dst_wp->sx, dst_wp->sy);
	dst_wp->xoff = xoff; dst_wp->yoff = yoff;
	window_pane_resize(dst_wp, sx, sy);

	if (!args_has(self->args, 'd')) {
		if (src_w != dst_w) {
			window_set_active_pane(src_w, dst_wp);
			window_set_active_pane(dst_w, src_wp);
		} else {
			tmp_wp = dst_wp;
			if (!window_pane_visible(tmp_wp))
				tmp_wp = src_wp;
			window_set_active_pane(src_w, tmp_wp);
		}
	} else {
		if (src_w->active == src_wp)
			window_set_active_pane(src_w, dst_wp);
		if (dst_w->active == dst_wp)
			window_set_active_pane(dst_w, src_wp);
	}
	if (src_w != dst_w) {
		if (src_w->last == src_wp)
			src_w->last = NULL;
		if (dst_w->last == dst_wp)
			dst_w->last = NULL;
	}
	server_redraw_window(src_w);
	server_redraw_window(dst_w);

	return (0);
}
Ejemplo n.º 16
0
Archivo: netif.c Proyecto: Aconex/pcp
void
refresh_netif_metrics(void)
{
#if 0
    int			i;
    int			sts;
    unsigned long	kaddr;
    struct ifnethead	ifnethead;
    struct ifnet	ifnet;
    struct ifnet	*ifp;
    static int		warn = 0;	/* warn once control */

    /*
     * Not sure that the order of chained netif structs is invariant,
     * especially if interfaces are added to the configuration after
     * initial system boot ... so mark all the instances as inactive
     * and re-match based on the interface name
     */
    pmdaCacheOp(indomtab[NETIF_INDOM].it_indom, PMDA_CACHE_INACTIVE);

    kaddr = symbols[KERN_IFNET].n_value;
    if (kvmp == NULL || kaddr == 0) {
	/* no network interface metrics for us today ... */
	if ((warn & WARN_INIT) == 0) {
	    fprintf(stderr, "refresh_netif_metrics: Warning: cannot get any network interface metrics\n");
	    warn |= WARN_INIT;
	}
	return;
    }

    /*
     * Kernel data structures for the linked list of network interface
     * information.
     *
     * _ifnet -> struct ifnethead {
     *		    struct ifnet *tqh_first;
     *		    struct ifnet **tqh_last;
     *		    ...
     *		 }
     *
     * and within an ifnet struct (declared in <net/if_var.h>) we find
     * the linked list maintained in if_link, the external interface
     * name in if_xname[] and if_data which is a nested if_data stuct
     * (declared in <net/if.h>) that contains many of the goodies we're
     * after, e.g. u_char ifi_type, u_long ifi_mtu, u_long ifi_baudrate,
     * u_long ifi_ipackets, u_long ifi_opackets, u_long ifi_ibytes,
     * u_long ifi_obytes, etc.
     */
    if (kvm_read(kvmp, kaddr, (char *)&ifnethead, sizeof(ifnethead)) != sizeof(ifnethead)) {
	if ((warn & WARN_READ_HEAD) == 0) {
	    fprintf(stderr, "refresh_netif_metrics: Warning: kvm_read: ifnethead: %s\n", kvm_geterr(kvmp));
	    warn |= WARN_READ_HEAD;
	}
	return;
    }

    for (i = 0; ; i++) {
	if (i == 0)
	    kaddr = (unsigned long)TAILQ_FIRST(&ifnethead);
	else
	    kaddr = (unsigned long)TAILQ_NEXT(&ifnet, if_link);

	if (kaddr == 0)
	    break;

	if (kvm_read(kvmp, kaddr, (char *)&ifnet, sizeof(ifnet)) != sizeof(ifnet)) {
	    fprintf(stderr, "refresh_netif_metrics: Error: kvm_read: ifnet[%d]: %s\n", i, kvm_geterr(kvmp));
	    return;
	}

	/* skip network interfaces that are not interesting ...  */
	if (strcmp(ifnet.if_xname, "lo0") == 0)
	    continue;

	sts = pmdaCacheLookupName(indomtab[NETIF_INDOM].it_indom, ifnet.if_xname, NULL, (void **)&ifp);
	if (sts == PMDA_CACHE_ACTIVE) {
	    fprintf(stderr, "refresh_netif_metrics: Warning: duplicate name (%s) in network interface indom\n", ifnet.if_xname);
	    continue;
	}
	else if (sts == PMDA_CACHE_INACTIVE) {
	    /* reactivate an existing entry */
	    pmdaCacheStore(indomtab[NETIF_INDOM].it_indom, PMDA_CACHE_ADD, ifnet.if_xname, (void *)ifp);
	}
	else {
	    /* new entry */
	    ifp = (struct ifnet *)malloc(sizeof(*ifp));
	    if (ifp == NULL) {
		fprintf(stderr, "Error: struct ifnet alloc failed for network interface \"%s\"\n", ifnet.if_xname);
		__pmNoMem("refresh_netif_metrics", sizeof(*ifp), PM_FATAL_ERR);
		/*NOTREACHED*/
	    }
	    pmdaCacheStore(indomtab[NETIF_INDOM].it_indom, PMDA_CACHE_ADD, ifnet.if_xname, (void *)ifp);
	}
	memcpy((void *)ifp, (void *)&ifnet, sizeof(*ifp));
    }
#endif
}
Ejemplo n.º 17
0
void
sleepq_enqueue(sleepq_t *sq, wchan_t wchan, const char *wmesg, syncobj_t *sobj)
{
	struct lwp *l = curlwp;

#ifndef T2EX
	if (__predict_false(sobj != &sleep_syncobj || strcmp(wemsg, "callout"))) {
#else
	if (__predict_false(sobj != &sleep_syncobj || (strcmp(wmesg, "callout") != 0 && strcmp(wmesg, "select") != 0 && strcmp(wmesg, "pollsock") != 0))) {
#endif
		panic("sleepq: unsupported enqueue");
	}

	/*
	 * Remove an LWP from a sleep queue if the LWP was deleted while in
	 * the waiting state.
	 */
	if ( l->l_sleepq != NULL && (l->l_stat & LSSLEEP) != 0 ) {
		sleepq_remove(l->l_sleepq, l);
	}

#ifndef T2EX
	l->l_syncobj = sobj;
#endif
	l->l_wchan = wchan;
	l->l_sleepq = sq;
#ifndef T2EX
	l->l_wmesg = wmesg;
	l->l_slptime = 0;
#endif
	l->l_stat = LSSLEEP;
#ifndef T2EX
	l->l_sleeperr = 0;
#endif

	TAILQ_INSERT_TAIL(sq, l, l_sleepchain);
}

int
sleepq_block(int timo, bool hatch)
{
	struct lwp *l = curlwp;
	int error = 0;

	//KASSERT(timo == 0 && !hatch);

	if (timo != 0) {
		callout_schedule(&l->l_timeout_ch, timo);
	}

#ifdef T2EX
	if ( l->l_mutex != NULL ) {
		mutex_exit(l->l_mutex);
	}
#endif

	mutex_enter(&sq_mtx);
	while (l->l_wchan) {
		if ( hatch ) {
			error = cv_timedwait_sig( &sq_cv, &sq_mtx, timo );
		}
		else {
			error = cv_timedwait( &sq_cv, &sq_mtx, timo );
		}

		if (error == EINTR) {
			if (l->l_wchan) {
				TAILQ_REMOVE(l->l_sleepq, l, l_sleepchain);
				l->l_wchan = NULL;
				l->l_sleepq = NULL;
			}
		}
	}
	mutex_exit(&sq_mtx);

#ifdef T2EX
	l->l_mutex = &spc_lock;
#endif

	if (timo != 0) {
		/*
		 * Even if the callout appears to have fired, we need to
		 * stop it in order to synchronise with other CPUs.
		 */
		if (callout_halt(&l->l_timeout_ch, NULL)) {
			error = EWOULDBLOCK;
		}
	}

	return error;
}

#ifdef T2EX
lwp_t *
sleepq_wake(sleepq_t *sq, wchan_t wchan, u_int expected, kmutex_t *mp)
{
	struct lwp *l;
	bool found = false;

	TAILQ_FOREACH(l, sq, l_sleepchain) {
		if (l->l_wchan == wchan) {
			found = true;
			l->l_wchan = NULL;
		}
	}
	if (found)
		cv_broadcast(&sq_cv);

	mutex_spin_exit(mp);
	return NULL;
}
#else
/*
 * sleepq_wake:
 *
 *	Wake zero or more LWPs blocked on a single wait channel.
 */
lwp_t *
sleepq_wake(sleepq_t *sq, wchan_t wchan, u_int expected, kmutex_t *mp)
{
	lwp_t *l, *next;
	int swapin = 0;

	KASSERT(mutex_owned(mp));

	for (l = TAILQ_FIRST(sq); l != NULL; l = next) {
		KASSERT(l->l_sleepq == sq);
		KASSERT(l->l_mutex == mp);
		next = TAILQ_NEXT(l, l_sleepchain);
		if (l->l_wchan != wchan)
			continue;
		swapin |= sleepq_remove(sq, l);
		if (--expected == 0)
			break;
	}

	mutex_spin_exit(mp);

#if 0
	/*
	 * If there are newly awakend threads that need to be swapped in,
	 * then kick the swapper into action.
	 */
	if (swapin)
		uvm_kick_scheduler();
#endif

	return l;
}
Ejemplo n.º 18
0
/*
 * Look for the request in the cache
 * If found then
 *    return action and optionally reply
 * else
 *    insert it in the cache
 *
 * The rules are as follows:
 * - if in progress, return DROP request
 * - if completed within DELAY of the current time, return DROP it
 * - if completed a longer time ago return REPLY if the reply was cached or
 *   return DOIT
 * Update/add new request at end of lru list
 */
int
nfsrv_getcache(struct nfsrv_descript *nd, struct nfssvc_sock *slp,
	       struct mbuf **repp)
{
	struct nfsrvcache *rp;
	struct mbuf *mb;
	struct sockaddr_in *saddr;
	caddr_t bpos;
	int ret;

	/*
	 * Don't cache recent requests for reliable transport protocols.
	 * (Maybe we should for the case of a reconnect, but..)
	 */
	if (!nd->nd_nam2)
		return (RC_DOIT);

	lwkt_gettoken(&srvcache_token);
loop:
	for (rp = NFSRCHASH(nd->nd_retxid)->lh_first; rp != NULL;
	    rp = rp->rc_hash.le_next) {
	    if (nd->nd_retxid == rp->rc_xid && nd->nd_procnum == rp->rc_proc &&
		netaddr_match(NETFAMILY(rp), &rp->rc_haddr, nd->nd_nam)) {
		        NFS_DPF(RC, ("H%03x", rp->rc_xid & 0xfff));
			if ((rp->rc_flag & RC_LOCKED) != 0) {
				rp->rc_flag |= RC_WANTED;
				tsleep((caddr_t)rp, 0, "nfsrc", 0);
				goto loop;
			}
			rp->rc_flag |= RC_LOCKED;
			/* If not at end of LRU chain, move it there */
			if (TAILQ_NEXT(rp, rc_lru) != NULL) {
				TAILQ_REMOVE(&nfsrvlruhead, rp, rc_lru);
				TAILQ_INSERT_TAIL(&nfsrvlruhead, rp, rc_lru);
			}
			if (rp->rc_state == RC_UNUSED)
				panic("nfsrv cache");
			if (rp->rc_state == RC_INPROG) {
				nfsstats.srvcache_inproghits++;
				ret = RC_DROPIT;
			} else if (rp->rc_flag & RC_REPSTATUS) {
				nfsstats.srvcache_nonidemdonehits++;
				nfs_rephead(0, nd, slp, rp->rc_status,
					    repp, &mb, &bpos);
				ret = RC_REPLY;
			} else if (rp->rc_flag & RC_REPMBUF) {
				nfsstats.srvcache_nonidemdonehits++;
				*repp = m_copym(rp->rc_reply, 0, M_COPYALL,
						MB_WAIT);
				ret = RC_REPLY;
			} else {
				nfsstats.srvcache_idemdonehits++;
				rp->rc_state = RC_INPROG;
				ret = RC_DOIT;
			}
			rp->rc_flag &= ~RC_LOCKED;
			if (rp->rc_flag & RC_WANTED) {
				rp->rc_flag &= ~RC_WANTED;
				wakeup((caddr_t)rp);
			}
			lwkt_reltoken(&srvcache_token);
			return (ret);
		}
	}

	nfsstats.srvcache_misses++;
	NFS_DPF(RC, ("M%03x", nd->nd_retxid & 0xfff));
	if (numnfsrvcache < desirednfsrvcache) {
		rp = kmalloc((u_long)sizeof *rp, M_NFSD, M_WAITOK | M_ZERO);
		numnfsrvcache++;
		rp->rc_flag = RC_LOCKED;
	} else {
		rp = TAILQ_FIRST(&nfsrvlruhead);
		while ((rp->rc_flag & RC_LOCKED) != 0) {
			rp->rc_flag |= RC_WANTED;
			tsleep((caddr_t)rp, 0, "nfsrc", 0);
			rp = TAILQ_FIRST(&nfsrvlruhead);
		}
		rp->rc_flag |= RC_LOCKED;
		LIST_REMOVE(rp, rc_hash);
		TAILQ_REMOVE(&nfsrvlruhead, rp, rc_lru);
		if (rp->rc_flag & RC_REPMBUF) {
			m_freem(rp->rc_reply);
			rp->rc_reply = NULL;
			rp->rc_flag &= ~RC_REPMBUF;
		}
		if (rp->rc_flag & RC_NAM) {
			kfree(rp->rc_nam, M_SONAME);
			rp->rc_nam = NULL;
			rp->rc_flag &= ~RC_NAM;
		}
	}
	TAILQ_INSERT_TAIL(&nfsrvlruhead, rp, rc_lru);

	rp->rc_state = RC_INPROG;
	rp->rc_xid = nd->nd_retxid;
	saddr = (struct sockaddr_in *)nd->nd_nam;
	switch (saddr->sin_family) {
	case AF_INET:
		rp->rc_flag |= RC_INETADDR;
		rp->rc_inetaddr = saddr->sin_addr.s_addr;
		break;
	case AF_ISO:
	default:
		rp->rc_flag |= RC_NAM;
		rp->rc_nam = dup_sockaddr(nd->nd_nam);
		break;
	};
	rp->rc_proc = nd->nd_procnum;
	LIST_INSERT_HEAD(NFSRCHASH(nd->nd_retxid), rp, rc_hash);
	rp->rc_flag &= ~RC_LOCKED;
	if (rp->rc_flag & RC_WANTED) {
		rp->rc_flag &= ~RC_WANTED;
		wakeup((caddr_t)rp);
	}
	lwkt_reltoken(&srvcache_token);

	return (RC_DOIT);
}
Ejemplo n.º 19
0
ssize_t mem_pos_seek(struct mem_buffer_pos * pos, ssize_t n) {
    ssize_t done = 0;

    assert(pos);
    assert(pos->m_buffer);

    if (n > 0) {
        while(pos->m_trunk && n > 0) {
            int left = pos->m_trunk->m_size - pos->m_pos_in_trunk - 1;
            if (left >= n) {
                pos->m_pos_in_trunk += n;
                done += n;
                n = 0;
            }
            else {
                done += (left + 1);
                n -= (left + 1);

                do {
                    pos->m_trunk = TAILQ_NEXT(pos->m_trunk, m_next);
                } while(pos->m_trunk && pos->m_trunk->m_size == 0);

                pos->m_pos_in_trunk = 0;
            }
        }
    }
    else if (n < 0) {
        n = -n;
        while(n > 0) {
            int d;

            if (pos->m_trunk == NULL || pos->m_pos_in_trunk == 0) {
                struct mem_buffer_trunk * pre;
                pre =
                    pos->m_trunk == NULL
                    ? TAILQ_LAST(&pos->m_buffer->m_trunks, mem_buffer_trunk_list)
                    : TAILQ_PREV(pos->m_trunk, mem_buffer_trunk_list, m_next);

                while(pre && pre != TAILQ_FIRST(&pos->m_buffer->m_trunks) && pre->m_size <= 0) {
                    pre = TAILQ_PREV(pre, mem_buffer_trunk_list, m_next);
                }

                if (pre && pre->m_size > 0) {
                    pos->m_trunk = pre;
                    pos->m_pos_in_trunk = pre->m_size - 1;
                    done -= 1;
                    n -= 1;
                }
                else {
                    break;
                }
            }

            assert(pos->m_trunk);
            assert(pos->m_trunk->m_size > 0);

            d = pos->m_pos_in_trunk;
            if (d == 0) continue;
            if (d >= n) d = n;

            assert(pos->m_pos_in_trunk >= (size_t)d);
            pos->m_pos_in_trunk -= d;
            done -= d;
            n -= d;
        }
    }

    return done;
}
Ejemplo n.º 20
0
static int
run_stop_schedule(const char *exec, const char *const *argv,
    const char *pidfile, uid_t uid,
    bool test, bool progress)
{
	SCHEDULEITEM *item = TAILQ_FIRST(&schedule);
	int nkilled = 0;
	int tkilled = 0;
	int nrunning = 0;
	long nloops, nsecs;
	struct timespec ts;
	pid_t pid = 0;
	const char *const *p;
	bool progressed = false;

	if (exec)
		einfov("Will stop %s", exec);
	if (pidfile)
		einfov("Will stop PID in pidfile `%s'", pidfile);
	if (uid)
		einfov("Will stop processes owned by UID %d", uid);
	if (argv && *argv) {
		einfovn("Will stop processes of `");
		if (rc_yesno(getenv("EINFO_VERBOSE"))) {
			for (p = argv; p && *p; p++) {
				if (p != argv)
					printf(" ");
				printf("%s", *p);
			}
			printf("'\n");
		}
	}

	if (pidfile) {
		pid = get_pid(pidfile);
		if (pid == -1)
			return 0;
	}

	while (item) {
		switch (item->type) {
		case SC_GOTO:
			item = item->gotoitem;
			continue;

		case SC_SIGNAL:
			nrunning = 0;
			nkilled = do_stop(exec, argv, pid, uid, item->value, test);
			if (nkilled == 0) {
				if (tkilled == 0) {
					if (progressed)
						printf("\n");
						eerror("%s: no matching processes found", applet);
				}
				return tkilled;
			}
			else if (nkilled == -1)
				return 0;

			tkilled += nkilled;
			break;
		case SC_TIMEOUT:
			if (item->value < 1) {
				item = NULL;
				break;
			}

			ts.tv_sec = 0;
			ts.tv_nsec = POLL_INTERVAL;

			for (nsecs = 0; nsecs < item->value; nsecs++) {
				for (nloops = 0;
				     nloops < ONE_SECOND / POLL_INTERVAL;
				     nloops++)
				{
					if ((nrunning = do_stop(exec, argv,
						    pid, uid, 0, test)) == 0)
						return 0;


					if (nanosleep(&ts, NULL) == -1) {
						if (progressed) {
							printf("\n");
							progressed = false;
						}
						if (errno == EINTR)
							eerror("%s: caught an"
							    " interrupt", applet);
						else {
							eerror("%s: nanosleep: %s",
							    applet, strerror(errno));
							return 0;
						}
					}
				}
				if (progress) {
					printf(".");
					fflush(stdout);
					progressed = true;
				}
			}
			break;
		default:
			if (progressed) {
				printf("\n");
				progressed = false;
			}
			eerror("%s: invalid schedule item `%d'",
			    applet, item->type);
			return 0;
		}

		if (item)
			item = TAILQ_NEXT(item, entries);
	}

	if (test || (tkilled > 0 && nrunning == 0))
		return nkilled;

	if (progressed)
		printf("\n");
	if (nrunning == 1)
		eerror("%s: %d process refused to stop", applet, nrunning);
	else
		eerror("%s: %d process(es) refused to stop", applet, nrunning);

	return -nrunning;
}
Ejemplo n.º 21
0
/*
 * This procedure is the main loop of our per-cpu helper thread.  The
 * sc->isrunning flag prevents us from racing hardclock_softtick() and
 * a critical section is sufficient to interlock sc->curticks and protect
 * us from remote IPI's / list removal.
 *
 * The thread starts with the MP lock released and not in a critical
 * section.  The loop itself is MP safe while individual callbacks
 * may or may not be, so we obtain or release the MP lock as appropriate.
 */
static void
softclock_handler(void *arg)
{
	softclock_pcpu_t sc;
	struct callout *c;
	struct callout_tailq *bucket;
	void (*c_func)(void *);
	void *c_arg;
#ifdef SMP
	int mpsafe = 1;
#endif

	/*
	 * Run the callout thread at the same priority as other kernel
	 * threads so it can be round-robined.
	 */
	/*lwkt_setpri_self(TDPRI_SOFT_NORM);*/

	sc = arg;
	crit_enter();
loop:
	while (sc->softticks != (int)(sc->curticks + 1)) {
		bucket = &sc->callwheel[sc->softticks & callwheelmask];

		for (c = TAILQ_FIRST(bucket); c; c = sc->next) {
			if (c->c_time != sc->softticks) {
				sc->next = TAILQ_NEXT(c, c_links.tqe);
				continue;
			}
#ifdef SMP
			if (c->c_flags & CALLOUT_MPSAFE) {
				if (mpsafe == 0) {
					mpsafe = 1;
					rel_mplock();
				}
			} else {
				/*
				 * The request might be removed while we 
				 * are waiting to get the MP lock.  If it
				 * was removed sc->next will point to the
				 * next valid request or NULL, loop up.
				 */
				if (mpsafe) {
					mpsafe = 0;
					sc->next = c;
					get_mplock();
					if (c != sc->next)
						continue;
				}
			}
#endif
			sc->next = TAILQ_NEXT(c, c_links.tqe);
			TAILQ_REMOVE(bucket, c, c_links.tqe);

			sc->running = c;
			c_func = c->c_func;
			c_arg = c->c_arg;
			c->c_func = NULL;
			KKASSERT(c->c_flags & CALLOUT_DID_INIT);
			c->c_flags &= ~CALLOUT_PENDING;
			crit_exit();
			c_func(c_arg);
			crit_enter();
			sc->running = NULL;
			/* NOTE: list may have changed */
		}
		++sc->softticks;
	}
	sc->isrunning = 0;
	lwkt_deschedule_self(&sc->thread);	/* == curthread */
	lwkt_switch();
	goto loop;
	/* NOT REACHED */
}
Ejemplo n.º 22
0
static struct menu *
menu_handle_key(XEvent *e, struct menu_ctx *mc, struct menu_q *menuq,
    struct menu_q *resultq)
{
	struct menu	*mi;
	enum ctltype	 ctl;
	char		 chr[32];
	size_t		 len;
	int 		 clen, i;
	wchar_t 	 wc;

	if (menu_keycode(&e->xkey, &ctl, chr) < 0)
		return (NULL);

	switch (ctl) {
	case CTL_ERASEONE:
		if ((len = strlen(mc->searchstr)) > 0) {
			clen = 1;
			while (mbtowc(&wc, &mc->searchstr[len-clen], MB_CUR_MAX) == -1)
				clen++;
			for (i = 1; i <= clen; i++)
				mc->searchstr[len - i] = '\0';
			mc->changed = 1;
		}
		break;
	case CTL_UP:
		mi = TAILQ_LAST(resultq, menu_q);
		if (mi == NULL)
			break;

		TAILQ_REMOVE(resultq, mi, resultentry);
		TAILQ_INSERT_HEAD(resultq, mi, resultentry);
		break;
	case CTL_DOWN:
		mi = TAILQ_FIRST(resultq);
		if (mi == NULL)
			break;

		TAILQ_REMOVE(resultq, mi, resultentry);
		TAILQ_INSERT_TAIL(resultq, mi, resultentry);
		break;
	case CTL_RETURN:
		/*
		 * Return whatever the cursor is currently on. Else
		 * even if dummy is zero, we need to return something.
		 */
		if ((mi = TAILQ_FIRST(resultq)) == NULL) {
			mi = xmalloc(sizeof *mi);
			(void)strlcpy(mi->text,
			    mc->searchstr, sizeof(mi->text));
			mi->dummy = 1;
		}
		mi->abort = 0;
		return (mi);
	case CTL_WIPE:
		mc->searchstr[0] = '\0';
		mc->changed = 1;
		break;
	case CTL_TAB:
		if ((mi = TAILQ_FIRST(resultq)) != NULL) {
			/*
			 * - We are in exec_path menu mode
			 * - It is equal to the input
			 * We got a command, launch the file menu
			 */
			if ((mc->flags & CWM_MENU_FILE) &&
			    (strncmp(mc->searchstr, mi->text,
					strlen(mi->text))) == 0)
				return (menu_complete_path(mc));

			/*
			 * Put common prefix of the results into searchstr
			 */
			(void)strlcpy(mc->searchstr,
					mi->text, sizeof(mc->searchstr));
			while ((mi = TAILQ_NEXT(mi, resultentry)) != NULL) {
				i = 0;
				while (tolower(mc->searchstr[i]) ==
					       tolower(mi->text[i]))
					i++;
				mc->searchstr[i] = '\0';
			}
			mc->changed = 1;
		}
		break;
	case CTL_ALL:
		mc->list = !mc->list;
		break;
	case CTL_ABORT:
		mi = xmalloc(sizeof *mi);
		mi->text[0] = '\0';
		mi->dummy = 1;
		mi->abort = 1;
		return (mi);
	default:
		break;
	}

	if (chr[0] != '\0') {
		mc->changed = 1;
		(void)strlcat(mc->searchstr, chr, sizeof(mc->searchstr));
	}

	mc->noresult = 0;
	if (mc->changed && mc->searchstr[0] != '\0') {
		(*mc->match)(menuq, resultq, mc->searchstr);
		/* If menuq is empty, never show we've failed */
		mc->noresult = TAILQ_EMPTY(resultq) && !TAILQ_EMPTY(menuq);
	} else if (mc->changed)
		TAILQ_INIT(resultq);

	if (!mc->list && mc->listing && !mc->changed) {
		TAILQ_INIT(resultq);
		mc->listing = 0;
	}

	return (NULL);
}
Ejemplo n.º 23
0
static int
iwm_mvm_fill_probe_req(struct iwm_softc *sc, struct iwm_scan_probe_req *preq)
{
	struct ieee80211com *ic = &sc->sc_ic;
	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
	struct ieee80211_frame *wh = (struct ieee80211_frame *)preq->buf;
	struct ieee80211_rateset *rs;
	size_t remain = sizeof(preq->buf);
	uint8_t *frm, *pos;

	memset(preq, 0, sizeof(*preq));

	/* Ensure enough space for header and SSID IE. */
	if (remain < sizeof(*wh) + 2)
		return ENOBUFS;

	/*
	 * Build a probe request frame.  Most of the following code is a
	 * copy & paste of what is done in net80211.
	 */
	wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
	    IEEE80211_FC0_SUBTYPE_PROBE_REQ;
	wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
	IEEE80211_ADDR_COPY(wh->i_addr1, ieee80211broadcastaddr);
	IEEE80211_ADDR_COPY(wh->i_addr2, vap ? vap->iv_myaddr : ic->ic_macaddr);
	IEEE80211_ADDR_COPY(wh->i_addr3, ieee80211broadcastaddr);
	*(uint16_t *)&wh->i_dur[0] = 0; /* filled by HW */
	*(uint16_t *)&wh->i_seq[0] = 0; /* filled by HW */

	frm = (uint8_t *)(wh + 1);
	frm = ieee80211_add_ssid(frm, NULL, 0);

	/* Tell the firmware where the MAC header is. */
	preq->mac_header.offset = 0;
	preq->mac_header.len = htole16(frm - (uint8_t *)wh);
	remain -= frm - (uint8_t *)wh;

	/* Fill in 2GHz IEs and tell firmware where they are. */
	rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
	if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
		if (remain < 4 + rs->rs_nrates)
			return ENOBUFS;
	} else if (remain < 2 + rs->rs_nrates) {
		return ENOBUFS;
	}
	preq->band_data[0].offset = htole16(frm - (uint8_t *)wh);
	pos = frm;
	frm = ieee80211_add_rates(frm, rs);
	if (rs->rs_nrates > IEEE80211_RATE_SIZE)
		frm = ieee80211_add_xrates(frm, rs);
	preq->band_data[0].len = htole16(frm - pos);
	remain -= frm - pos;

	if (iwm_mvm_rrm_scan_needed(sc)) {
		if (remain < 3)
			return ENOBUFS;
		*frm++ = IEEE80211_ELEMID_DSPARMS;
		*frm++ = 1;
		*frm++ = 0;
		remain -= 3;
	}

	if (sc->nvm_data->sku_cap_band_52GHz_enable) {
		/* Fill in 5GHz IEs. */
		rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
		if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
			if (remain < 4 + rs->rs_nrates)
				return ENOBUFS;
		} else if (remain < 2 + rs->rs_nrates) {
			return ENOBUFS;
		}
		preq->band_data[1].offset = htole16(frm - (uint8_t *)wh);
		pos = frm;
		frm = ieee80211_add_rates(frm, rs);
		if (rs->rs_nrates > IEEE80211_RATE_SIZE)
			frm = ieee80211_add_xrates(frm, rs);
		preq->band_data[1].len = htole16(frm - pos);
		remain -= frm - pos;
	}

	/* Send 11n IEs on both 2GHz and 5GHz bands. */
	preq->common_data.offset = htole16(frm - (uint8_t *)wh);
	pos = frm;
#if 0
	if (ic->ic_flags & IEEE80211_F_HTON) {
		if (remain < 28)
			return ENOBUFS;
		frm = ieee80211_add_htcaps(frm, ic);
		/* XXX add WME info? */
	}
#endif
	preq->common_data.len = htole16(frm - pos);

	return 0;
}
Ejemplo n.º 24
0
struct msg *
rsp_send_next(struct context *ctx, struct conn *conn)
{
    rstatus_t status;
    struct msg *rsp, *req; /* response and it's peer request */

    ASSERT_LOG((conn->type == CONN_DNODE_PEER_CLIENT) ||
               (conn->type = CONN_CLIENT), "conn %s", conn_get_type_string(conn));

    req = TAILQ_FIRST(&conn->omsg_q);
    if (req == NULL || !req->selected_rsp) {
        /* nothing is outstanding, initiate close? */
        if (req == NULL && conn->eof) {
            conn->done = 1;
            log_debug(LOG_INFO, "c %d is done", conn->sd);
        }

        status = event_del_out(ctx->evb, conn);
        if (status != DN_OK) {
            conn->err = errno;
        }

        return NULL;
    }

    rsp = conn->smsg;
    if (rsp != NULL) {
        ASSERT(!rsp->request);
        ASSERT(rsp->peer != NULL);
        req = TAILQ_NEXT(rsp->peer, c_tqe);
    }

    if (req == NULL || !req_done(conn, req)) {
        conn->smsg = NULL;
        return NULL;
    }
    ASSERT(req->request && !req->swallow);

    if (req_error(conn, req)) {
        rsp = rsp_make_error(ctx, conn, req);
        if (rsp == NULL) {
            conn->err = errno;
            return NULL;
        }
        rsp->peer = req;
        req->selected_rsp = rsp;
        log_error("creating new error rsp %p", rsp);
        if (conn->dyn_mode) {
      	  stats_pool_incr(ctx, conn->owner, peer_forward_error);
        } else {
      	  stats_pool_incr(ctx, conn->owner, forward_error);
        }
    } else {
        rsp = req->selected_rsp;
    }
    ASSERT(!rsp->request);

    conn->smsg = rsp;

    if (log_loggable(LOG_VVERB)) {
       log_debug(LOG_VVERB, "send next rsp %"PRIu64" on c %d", rsp->id, conn->sd);
    }

    return rsp;
}
Ejemplo n.º 25
0
Archivo: net.c Proyecto: zhengxle/cyon
static int
net_recv(struct connection *c)
{
	int			r;
	struct netbuf		*nb;
	struct netcontext	*nctx = (struct netcontext *)c->nctx;

	while (!TAILQ_EMPTY(&(c->recv_queue))) {
		nb = TAILQ_FIRST(&(c->recv_queue));
		if (nb->cb == NULL) {
			cyon_debug("cyon_read_client(): nb->cb == NULL");
			return (CYON_RESULT_ERROR);
		}

again:
		switch (c->l->type) {
		case EVENT_TYPE_INET_SOCKET:
			r = SSL_read(c->ssl,
			    (nb->buf + nb->s_off), (nb->b_len - nb->s_off));
			break;
		case EVENT_TYPE_UNIX_SOCKET:
			r = read(c->fd,
			    (nb->buf + nb->s_off), (nb->b_len - nb->s_off));
			break;
		}

		cyon_debug("net_recv(%ld/%ld bytes), progress with %d",
		    nb->s_off, nb->b_len, r);

		if (r <= 0 && c->l->type == EVENT_TYPE_INET_SOCKET) {
			if (!net_ssl_check(c, r, CONN_READ_POSSIBLE))
				return (CYON_RESULT_ERROR);
			if (!(c->flags & CONN_READ_POSSIBLE))
				return (CYON_RESULT_OK);
		}

		if ((r == -1 || r == 0) &&
		    c->l->type == EVENT_TYPE_UNIX_SOCKET) {
			if (r == 0) {
				c->flags &= ~CONN_READ_POSSIBLE;
				return (CYON_RESULT_ERROR);
			}

			if (!net_check(c, CONN_READ_POSSIBLE))
				return (CYON_RESULT_ERROR);
			if (!(c->flags & CONN_READ_POSSIBLE))
				return (CYON_RESULT_OK);
		}

		if (r != -1)
			nb->s_off += (size_t)r;

		if (nb->s_off == nb->b_len) {
			r = nb->cb(nb);
			if (nb->s_off == nb->b_len) {
				TAILQ_REMOVE(&(c->recv_queue), nb, list);

				if (nb->flags & NETBUF_USE_OPPOOL)
					pool_put(&(nctx->op_pool), nb->buf);
				else
					cyon_mem_free(nb->buf);
				pool_put(&(nctx->nb_pool), nb);
			}

			if (r != CYON_RESULT_OK)
				return (r);

			if (nb->s_off != nb->b_len)
				goto again;
		}
	}

	return (CYON_RESULT_OK);
}
Ejemplo n.º 26
0
/*---------------------------------------------------------------------------
 * Add an entry to the access lists. The clientspec either is
 * the name of the local socket or a host- or networkname or
 * numeric ip/host-bit-len spec.
 *---------------------------------------------------------------------------*/
int
monitor_start_rights(const char *clientspec)
{
	struct monitor_rights r;

	/* initialize the new rights entry */

	memset(&r, 0, sizeof r);

	/* check clientspec */

	if (*clientspec == '/')
	{
		struct sockaddr_un sa;

		/* this is a local socket spec, check if we already have one */

		if (local_rights != NULL)
			return I4BMAR_DUP;

		/* does it fit in a local socket address? */

		if (strlen(clientspec) > sizeof sa.sun_path)
			return I4BMAR_LENGTH;

		r.local = 1;
		strcpy(r.name, clientspec);

#ifndef I4B_NOTCPIP_MONITOR

	}
	else
	{
		/* remote entry, parse host/net and cidr */

		struct monitor_rights * rp;
		char hostname[FILENAME_MAX];
		char *p;

		p = strchr(clientspec, '/');

		if (!p)
		{
			struct hostent *host;
			u_int32_t hn;

			/* must be a host spec */

			r.mask = ~0;
			host = gethostbyname(clientspec);

			if (!host)
				return I4BMAR_NOIP;

			memcpy(&hn, host->h_addr_list[0], sizeof hn);
			r.net = (u_int32_t)ntohl(hn);
		}
		else if(p[1])
		{
			/* must be net/cidr spec */

			int l;
			struct netent *net;
			u_int32_t s = ~0U;
			int num = strtol(p+1, NULL, 10);

			if (num < 0 || num > 32)
				return I4BMAR_CIDR;

			s >>= num;
			s ^= ~0U;
			l = p - clientspec;

			if (l >= sizeof hostname)
				return I4BMAR_LENGTH;

			strncpy(hostname, clientspec, l);

			hostname[l] = '\0';

			net = getnetbyname(hostname);

			if (net == NULL)
				r.net = (u_int32_t)inet_network(hostname);
			else
				r.net = (u_int32_t)net->n_net;

			r.mask = s;
			r.net &= s;
		}
		else
		{
			return I4BMAR_CIDR;
		}

		/* check for duplicate entry */

		for (rp = TAILQ_FIRST(&rights); rp != NULL; rp = TAILQ_NEXT(rp, list))
		{
			if (rp->mask == r.mask &&
			    rp->net == r.net &&
			    rp->local == r.local)
			{
				return I4BMAR_DUP;
			}
		}
#endif
	}
Ejemplo n.º 27
0
static void
mta_enter_state(struct mta_session *s, int newstate)
{
	struct mta_envelope	 *e;
	size_t			 envid_sz;
	int			 oldstate;
	ssize_t			 q;
	char			 ibuf[LINE_MAX];
	char			 obuf[LINE_MAX];
	int			 offset;

    again:
	oldstate = s->state;

	log_trace(TRACE_MTA, "mta: %p: %s -> %s", s,
	    mta_strstate(oldstate),
	    mta_strstate(newstate));

	s->state = newstate;

	memset(s->replybuf, 0, sizeof s->replybuf);

	/* don't try this at home! */
#define mta_enter_state(_s, _st) do { newstate = _st; goto again; } while (0)

	switch (s->state) {
	case MTA_INIT:
	case MTA_BANNER:
		break;

	case MTA_EHLO:
		s->ext = 0;
		mta_send(s, "EHLO %s", s->helo);
		break;

	case MTA_HELO:
		s->ext = 0;
		mta_send(s, "HELO %s", s->helo);
		break;

	case MTA_LHLO:
		s->ext = 0;
		mta_send(s, "LHLO %s", s->helo);
		break;

	case MTA_STARTTLS:
		if (s->flags & MTA_DOWNGRADE_PLAIN)
			mta_enter_state(s, MTA_AUTH);
		if (s->flags & MTA_TLS) /* already started */
			mta_enter_state(s, MTA_AUTH);
		else if ((s->ext & MTA_EXT_STARTTLS) == 0) {
			if (s->flags & MTA_FORCE_TLS || s->flags & MTA_WANT_SECURE) {
				mta_error(s, "TLS required but not supported by remote host");
				s->flags |= MTA_RECONN;
			}
			else
				/* server doesn't support starttls, do not use it */
				mta_enter_state(s, MTA_AUTH);
		}
		else
			mta_send(s, "STARTTLS");
		break;

	case MTA_AUTH:
		if (s->relay->secret && s->flags & MTA_TLS) {
			if (s->ext & MTA_EXT_AUTH) {
				if (s->ext & MTA_EXT_AUTH_PLAIN) {
					mta_enter_state(s, MTA_AUTH_PLAIN);
					break;
				}
				if (s->ext & MTA_EXT_AUTH_LOGIN) {
					mta_enter_state(s, MTA_AUTH_LOGIN);
					break;
				}
				log_debug("debug: mta: %p: no supported AUTH method on session", s);
				mta_error(s, "no supported AUTH method");
			}
			else {
				log_debug("debug: mta: %p: AUTH not advertised on session", s);
				mta_error(s, "AUTH not advertised");
			}
		}
		else if (s->relay->secret) {
			log_debug("debug: mta: %p: not using AUTH on non-TLS "
			    "session", s);
			mta_error(s, "Refuse to AUTH over unsecure channel");
			mta_connect(s);
		} else {
			mta_enter_state(s, MTA_READY);
		}
		break;

	case MTA_AUTH_PLAIN:
		mta_send(s, "AUTH PLAIN %s", s->relay->secret);
		break;

	case MTA_AUTH_LOGIN:
		mta_send(s, "AUTH LOGIN");
		break;

	case MTA_AUTH_LOGIN_USER:
		memset(ibuf, 0, sizeof ibuf);
		if (base64_decode(s->relay->secret, (unsigned char *)ibuf,
				  sizeof(ibuf)-1) == -1) {
			log_debug("debug: mta: %p: credentials too large on session", s);
			mta_error(s, "Credentials too large");
			break;
		}

		memset(obuf, 0, sizeof obuf);
		base64_encode((unsigned char *)ibuf + 1, strlen(ibuf + 1), obuf, sizeof obuf);
		mta_send(s, "%s", obuf);

		memset(ibuf, 0, sizeof ibuf);
		memset(obuf, 0, sizeof obuf);
		break;

	case MTA_AUTH_LOGIN_PASS:
		memset(ibuf, 0, sizeof ibuf);
		if (base64_decode(s->relay->secret, (unsigned char *)ibuf,
				  sizeof(ibuf)-1) == -1) {
			log_debug("debug: mta: %p: credentials too large on session", s);
			mta_error(s, "Credentials too large");
			break;
		}

		offset = strlen(ibuf+1)+2;
		memset(obuf, 0, sizeof obuf);
		base64_encode((unsigned char *)ibuf + offset, strlen(ibuf + offset), obuf, sizeof obuf);
		mta_send(s, "%s", obuf);

		memset(ibuf, 0, sizeof ibuf);
		memset(obuf, 0, sizeof obuf);
		break;

	case MTA_READY:
		/* Ready to send a new mail */
		if (s->ready == 0) {
			s->ready = 1;
			s->relay->nconn_ready += 1;
			mta_route_ok(s->relay, s->route);
		}

		if (s->msgtried >= MAX_TRYBEFOREDISABLE) {
			log_info("smtp-out: Remote host seems to reject all mails on session %016"PRIx64,
			    s->id);
			mta_route_down(s->relay, s->route);
			mta_enter_state(s, MTA_QUIT);
			break;
		}

		if (s->msgcount >= s->relay->limits->max_mail_per_session) {
			log_debug("debug: mta: "
			    "%p: cannot send more message to relay %s", s,
			    mta_relay_to_text(s->relay));
			mta_enter_state(s, MTA_QUIT);
			break;
		}

		/*
		 * When downgrading from opportunistic TLS, clear flag and
		 * possibly reuse the same task (forbidden in other cases).
		 */
		if (s->flags & MTA_DOWNGRADE_PLAIN)
			s->flags &= ~MTA_DOWNGRADE_PLAIN;
		else if (s->task)
			fatalx("task should be NULL at this point");

		if (s->task == NULL)
			s->task = mta_route_next_task(s->relay, s->route);
		if (s->task == NULL) {
			log_debug("debug: mta: %p: no task for relay %s",
			    s, mta_relay_to_text(s->relay));

			if (s->relay->nconn > 1 ||
			    s->hangon >= s->relay->limits->sessdelay_keepalive) {
				mta_enter_state(s, MTA_QUIT);
				break;
			}

			log_debug("mta: debug: last connection: hanging on for %llds",
			    (long long)(s->relay->limits->sessdelay_keepalive -
			    s->hangon));
			s->flags |= MTA_HANGON;
			runq_schedule(hangon, time(NULL) + 1, NULL, s);
			break;
		}

		log_debug("debug: mta: %p: handling next task for relay %s", s,
			    mta_relay_to_text(s->relay));

		stat_increment("mta.task.running", 1);

		m_create(p_queue, IMSG_MTA_OPEN_MESSAGE, 0, 0, -1);
		m_add_id(p_queue, s->id);
		m_add_msgid(p_queue, s->task->msgid);
		m_close(p_queue);

		tree_xset(&wait_fd, s->id, s);
		s->flags |= MTA_WAIT;
		break;

	case MTA_MAIL:
		s->currevp = TAILQ_FIRST(&s->task->envelopes);

		e = s->currevp;
		s->hangon = 0;
		s->msgtried++;
		envid_sz = strlen(e->dsn_envid);
		if (s->ext & MTA_EXT_DSN) {
			mta_send(s, "MAIL FROM:<%s>%s%s%s%s",
			    s->task->sender,
			    e->dsn_ret ? " RET=" : "",
			    e->dsn_ret ? dsn_strret(e->dsn_ret) : "",
			    envid_sz ? " ENVID=" : "",
			    envid_sz ? e->dsn_envid : "");
		} else
			mta_send(s, "MAIL FROM:<%s>", s->task->sender);
		break;

	case MTA_RCPT:
		if (s->currevp == NULL)
			s->currevp = TAILQ_FIRST(&s->task->envelopes);

		e = s->currevp;
		if (s->ext & MTA_EXT_DSN) {
			mta_send(s, "RCPT TO:<%s>%s%s%s%s",
			    e->dest,
			    e->dsn_notify ? " NOTIFY=" : "",
			    e->dsn_notify ? dsn_strnotify(e->dsn_notify) : "",
			    e->dsn_orcpt ? " ORCPT=" : "",
			    e->dsn_orcpt ? e->dsn_orcpt : "");
		} else
			mta_send(s, "RCPT TO:<%s>", e->dest);

		s->rcptcount++;
		break;

	case MTA_DATA:
		fseek(s->datafp, 0, SEEK_SET);
		mta_send(s, "DATA");
		break;

	case MTA_BODY:
		if (s->datafp == NULL) {
			log_trace(TRACE_MTA, "mta: %p: end-of-file", s);
			mta_enter_state(s, MTA_EOM);
			break;
		}

		if ((q = mta_queue_data(s)) == -1) {
			s->flags |= MTA_FREE;
			break;
		}
		if (q == 0) {
			mta_enter_state(s, MTA_BODY);
			break;
		}

		log_trace(TRACE_MTA, "mta: %p: >>> [...%zd bytes...]", s, q);
		break;

	case MTA_EOM:
		mta_send(s, ".");
		break;

	case MTA_LMTP_EOM:
		/* LMTP reports status of each delivery, so enable read */
		io_set_read(&s->io);
		break;

	case MTA_RSET:
		if (s->datafp) {
			fclose(s->datafp);
			s->datafp = NULL;
		}
		mta_send(s, "RSET");
		break;

	case MTA_QUIT:
		mta_send(s, "QUIT");
		break;

	default:
		fatalx("mta_enter_state: unknown state");
	}
#undef mta_enter_state
}
Ejemplo n.º 28
0
/*
 * Allocate physical memory from the given physical address range.
 * Called by DMA-safe memory allocation methods.
 */
int
_dmamem_alloc_range(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
    bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
    int flags, paddr_t low, paddr_t high)
{
	paddr_t curaddr, lastaddr;
	vm_page_t m;
	struct pglist mlist;
	int curseg, error, plaflag;

	/* Always round the size. */
	size = round_page(size);

	/*
	 * Allocate pages from the VM system.
	 */
	plaflag = flags & BUS_DMA_NOWAIT ? UVM_PLA_NOWAIT : UVM_PLA_WAITOK;
	if (flags & BUS_DMA_ZERO)
		plaflag |= UVM_PLA_ZERO;

	TAILQ_INIT(&mlist);
	error = uvm_pglistalloc(size, low, high, alignment, boundary,
	    &mlist, nsegs, plaflag);
	if (error)
		return (error);

	/*
	 * Compute the location, size, and number of segments actually
	 * returned by the VM code.
	 */
	m = TAILQ_FIRST(&mlist);
	curseg = 0;
	lastaddr = segs[curseg].ds_addr =
	    (*t->_pa_to_device)(VM_PAGE_TO_PHYS(m));
	segs[curseg].ds_len = PAGE_SIZE;
	m = TAILQ_NEXT(m, pageq);

	for (; m != TAILQ_END(&mlist); m = TAILQ_NEXT(m, pageq)) {
		curaddr = VM_PAGE_TO_PHYS(m);
#ifdef DIAGNOSTIC
		if (curaddr < low || curaddr >= high) {
			printf("vm_page_alloc_memory returned non-sensical"
			    " address 0x%lx\n", curaddr);
			panic("_dmamem_alloc_range");
		}
#endif
		curaddr = (*t->_pa_to_device)(curaddr);
		if (curaddr == (lastaddr + PAGE_SIZE))
			segs[curseg].ds_len += PAGE_SIZE;
		else {
			curseg++;
			segs[curseg].ds_addr = curaddr;
			segs[curseg].ds_len = PAGE_SIZE;
		}
		lastaddr = curaddr;
	}

	*rsegs = curseg + 1;

	return (0);
}
Ejemplo n.º 29
0
/*
 * Clean up the internal mount structure and disassociate it from the mount.
 * This may issue I/O.
 *
 * Called with fs_token held.
 */
static void
hammer_free_hmp(struct mount *mp)
{
	hammer_mount_t hmp = (void *)mp->mnt_data;
	hammer_flush_group_t flg;
	int count;
	int dummy;

	/*
	 * Flush anything dirty.  This won't even run if the
	 * filesystem errored-out.
	 */
	count = 0;
	while (hammer_flusher_haswork(hmp)) {
		hammer_flusher_sync(hmp);
		++count;
		if (count >= 5) {
			if (count == 5)
				kprintf("HAMMER: umount flushing.");
			else
				kprintf(".");
			tsleep(&dummy, 0, "hmrufl", hz);
		}
		if (count == 30) {
			kprintf("giving up\n");
			break;
		}
	}
	if (count >= 5 && count < 30)
		kprintf("\n");

	/*
	 * If the mount had a critical error we have to destroy any
	 * remaining inodes before we can finish cleaning up the flusher.
	 */
	if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR) {
		RB_SCAN(hammer_ino_rb_tree, &hmp->rb_inos_root, NULL,
			hammer_destroy_inode_callback, NULL);
	}

	/*
	 * There shouldn't be any inodes left now and any left over
	 * flush groups should now be empty.
	 */
	KKASSERT(RB_EMPTY(&hmp->rb_inos_root));
	while ((flg = TAILQ_FIRST(&hmp->flush_group_list)) != NULL) {
		TAILQ_REMOVE(&hmp->flush_group_list, flg, flush_entry);
		KKASSERT(RB_EMPTY(&flg->flush_tree));
		if (flg->refs) {
			kprintf("HAMMER: Warning, flush_group %p was "
				"not empty on umount!\n", flg);
		}
		kfree(flg, hmp->m_misc);
	}

	/*
	 * We can finally destroy the flusher
	 */
	hammer_flusher_destroy(hmp);

	/*
	 * We may have held recovered buffers due to a read-only mount.
	 * These must be discarded.
	 */
	if (hmp->ronly)
		hammer_recover_flush_buffers(hmp, NULL, -1);

	/*
	 * Unload buffers and then volumes
	 */
        RB_SCAN(hammer_buf_rb_tree, &hmp->rb_bufs_root, NULL,
		hammer_unload_buffer, NULL);
	RB_SCAN(hammer_vol_rb_tree, &hmp->rb_vols_root, NULL,
		hammer_unload_volume, NULL);

	mp->mnt_data = NULL;
	mp->mnt_flag &= ~MNT_LOCAL;
	hmp->mp = NULL;
	hammer_destroy_objid_cache(hmp);
	hammer_destroy_dedup_cache(hmp);
	if (hmp->dedup_free_cache != NULL) {
		kfree(hmp->dedup_free_cache, hmp->m_misc);
		hmp->dedup_free_cache = NULL;
	}
	kmalloc_destroy(&hmp->m_misc);
	kmalloc_destroy(&hmp->m_inodes);
	lwkt_reltoken(&hmp->fs_token);
	kfree(hmp, M_HAMMER);
}
Ejemplo n.º 30
0
/*
 * Flush the next sequence number until an open flush group is encountered
 * or we reach (next).  Not all sequence numbers will have flush groups
 * associated with them.  These require that the UNDO/REDO FIFO still be
 * flushed since it can take at least one additional run to synchronize
 * the FIFO, and more to also synchronize the reserve structures.
 */
static int
hammer_flusher_flush(hammer_mount_t hmp, int *nomorep)
{
	hammer_flusher_info_t info;
	hammer_flush_group_t flg;
	hammer_reserve_t resv;
	int count;
	int seq;

	/*
	 * Just in-case there's a flush race on mount.  Seq number
	 * does not change.
	 */
	if (TAILQ_FIRST(&hmp->flusher.ready_list) == NULL) {
		*nomorep = 1;
		return (hmp->flusher.done);
	}
	*nomorep = 0;

	/*
	 * Flush the next sequence number.  Sequence numbers can exist
	 * without an assigned flush group, indicating that just a FIFO flush
	 * should occur.
	 */
	seq = hmp->flusher.done + 1;
	flg = TAILQ_FIRST(&hmp->flush_group_list);
	if (flg == NULL) {
		if (seq == hmp->flusher.next) {
			*nomorep = 1;
			return (hmp->flusher.done);
		}
	} else if (seq == flg->seq) {
		if (flg->closed) {
			KKASSERT(flg->running == 0);
			flg->running = 1;
			if (hmp->fill_flush_group == flg) {
				hmp->fill_flush_group =
					TAILQ_NEXT(flg, flush_entry);
			}
		} else {
			*nomorep = 1;
			return (hmp->flusher.done);
		}
	} else {
		/*
		 * Sequence number problems can only happen if a critical
		 * filesystem error occurred which forced the filesystem into
		 * read-only mode.
		 */
		KKASSERT(flg->seq - seq > 0 || hmp->ronly >= 2);
		flg = NULL;
	}

	/*
	 * We only do one flg but we may have to loop/retry.
	 *
	 * Due to various races it is possible to come across a flush
	 * group which as not yet been closed.
	 */
	count = 0;
	while (flg && flg->running) {
		++count;
		if (hammer_debug_general & 0x0001) {
			hdkprintf("%d ttl=%d recs=%d\n",
				flg->seq, flg->total_count, flg->refs);
		}
		if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
			break;
		hammer_start_transaction_fls(&hmp->flusher.trans, hmp);

		/*
		 * If the previous flush cycle just about exhausted our
		 * UNDO space we may have to do a dummy cycle to move the
		 * first_offset up before actually digging into a new cycle,
		 * or the new cycle will not have sufficient undo space.
		 */
		if (hammer_flusher_undo_exhausted(&hmp->flusher.trans, 3))
			hammer_flusher_finalize(&hmp->flusher.trans, 0);

		KKASSERT(hmp->next_flush_group != flg);

		/*
		 * Place the flg in the flusher structure and start the
		 * slaves running.  The slaves will compete for inodes
		 * to flush.
		 *
		 * Make a per-thread copy of the transaction.
		 */
		while ((info = TAILQ_FIRST(&hmp->flusher.ready_list)) != NULL) {
			TAILQ_REMOVE(&hmp->flusher.ready_list, info, entry);
			info->flg = flg;
			info->runstate = 1;
			info->trans = hmp->flusher.trans;
			TAILQ_INSERT_TAIL(&hmp->flusher.run_list, info, entry);
			wakeup(&info->runstate);
		}

		/*
		 * Wait for all slaves to finish running
		 */
		while (TAILQ_FIRST(&hmp->flusher.run_list) != NULL)
			tsleep(&hmp->flusher.ready_list, 0, "hmrfcc", 0);

		/*
		 * Do the final finalization, clean up
		 */
		hammer_flusher_finalize(&hmp->flusher.trans, 1);
		hmp->flusher.tid = hmp->flusher.trans.tid;

		hammer_done_transaction(&hmp->flusher.trans);

		/*
		 * Loop up on the same flg.  If the flg is done clean it up
		 * and break out.  We only flush one flg.
		 */
		if (RB_EMPTY(&flg->flush_tree)) {
			KKASSERT(flg->refs == 0);
			TAILQ_REMOVE(&hmp->flush_group_list, flg, flush_entry);
			kfree(flg, hmp->m_misc);
			break;
		}
		KKASSERT(TAILQ_FIRST(&hmp->flush_group_list) == flg);
	}

	/*
	 * We may have pure meta-data to flush, or we may have to finish
	 * cycling the UNDO FIFO, even if there were no flush groups.
	 */
	if (count == 0 && hammer_flusher_haswork(hmp)) {
		hammer_start_transaction_fls(&hmp->flusher.trans, hmp);
		hammer_flusher_finalize(&hmp->flusher.trans, 1);
		hammer_done_transaction(&hmp->flusher.trans);
	}

	/*
	 * Clean up any freed big-blocks (typically zone-2).
	 * resv->flush_group is typically set several flush groups ahead
	 * of the free to ensure that the freed block is not reused until
	 * it can no longer be reused.
	 */
	while ((resv = TAILQ_FIRST(&hmp->delay_list)) != NULL) {
		if (resv->flg_no - seq > 0)
			break;
		hammer_reserve_clrdelay(hmp, resv);
	}
	return (seq);
}