Пример #1
0
void
pccard_check_cis_quirks(device_t dev)
{
	struct pccard_softc *sc = PCCARD_SOFTC(dev);
	int wiped = 0;
	int i, j;
	struct pccard_function *pf, *pf_next, *pf_last;
	struct pccard_config_entry *cfe, *cfe_next;
	struct pccard_cis_quirk *q;

	pf = NULL;
	pf_last = NULL;

	for (i=0; i<n_pccard_cis_quirks; i++) {
		q = &pccard_cis_quirks[i];
		if (!pccard_cis_quirk_match(sc, q))
			continue;
		if (!wiped) {
			if (bootverbose) {
				device_printf(dev, "using CIS quirks for ");
				for (j = 0; j < 4; j++) {
					if (sc->card.cis1_info[j] == NULL)
						break;
					if (j)
						kprintf(", ");
					kprintf("%s", sc->card.cis1_info[j]);
				}
				kprintf("\n");
			}

			for (pf = STAILQ_FIRST(&sc->card.pf_head); pf != NULL;
			     pf = pf_next) {
				for (cfe = STAILQ_FIRST(&pf->cfe_head); cfe != NULL;
				     cfe = cfe_next) {
					cfe_next = STAILQ_NEXT(cfe, cfe_list);
					kfree(cfe, M_DEVBUF);
				}
				pf_next = STAILQ_NEXT(pf, pf_list);
				kfree(pf, M_DEVBUF);
			}

			STAILQ_INIT(&sc->card.pf_head);
			wiped = 1;
		}

		if (pf_last == q->pf) {
			cfe = kmalloc(sizeof(*cfe), M_DEVBUF, M_NOWAIT);
			if (cfe == NULL) {
				device_printf(dev, "no memory for quirk (1)\n");
				continue;
			}
			*cfe = *q->cfe;
			STAILQ_INSERT_TAIL(&pf->cfe_head, cfe, cfe_list);
		} else {
			pf = kmalloc(sizeof(*pf), M_DEVBUF, M_NOWAIT);
			if (pf == NULL) {
				device_printf(dev,
					"no memory for pccard function\n");
				continue;
			}
			*pf = *q->pf;
			STAILQ_INIT(&pf->cfe_head);
			cfe = kmalloc(sizeof(*cfe), M_DEVBUF, M_NOWAIT);
			if (cfe == NULL) {
				kfree(pf, M_DEVBUF);
				device_printf(dev, "no memory for quirk (2)\n");
				continue;
			}
			*cfe = *q->cfe;
			STAILQ_INSERT_TAIL(&pf->cfe_head, cfe, cfe_list);
			STAILQ_INSERT_TAIL(&sc->card.pf_head, pf, pf_list);
			pf_last = q->pf;
		}
	}
}
Пример #2
0
static void
cuda_send_inbound(struct cuda_softc *sc)
{
	device_t dev;
	struct cuda_packet *pkt;

	dev = sc->sc_dev;
	
	mtx_lock(&sc->sc_mutex);

	while ((pkt = STAILQ_FIRST(&sc->sc_inq)) != NULL) {
		STAILQ_REMOVE_HEAD(&sc->sc_inq, pkt_q);

		mtx_unlock(&sc->sc_mutex);

		/* check if we have a handler for this message */
		switch (pkt->type) {
		   case CUDA_ADB:
			if (pkt->len > 2) {
				adb_receive_raw_packet(sc->adb_bus,
				    pkt->data[0],pkt->data[1],
				    pkt->len - 2,&pkt->data[2]);
			} else {
				adb_receive_raw_packet(sc->adb_bus,
				    pkt->data[0],pkt->data[1],0,NULL);
			}
			break;
		   case CUDA_PSEUDO:
			mtx_lock(&sc->sc_mutex);
			switch (pkt->data[1]) {
			case CMD_AUTOPOLL:
				sc->sc_autopoll = 1;
				break;
			case CMD_READ_RTC:
				memcpy(&sc->sc_rtc, &pkt->data[2],
				    sizeof(sc->sc_rtc));
				wakeup(&sc->sc_rtc);
				break;
			case CMD_WRITE_RTC:
				break;
			}
			mtx_unlock(&sc->sc_mutex);
			break;
		   case CUDA_ERROR:
			/*
			 * CUDA will throw errors if we miss a race between
			 * sending and receiving packets. This is already
			 * handled when we abort packet output to handle
			 * this packet in cuda_intr(). Thus, we ignore
			 * these messages.
			 */
			break;
		   default:
			device_printf(dev,"unknown CUDA command %d\n",
			    pkt->type);
			break;
		}

		mtx_lock(&sc->sc_mutex);

		STAILQ_INSERT_TAIL(&sc->sc_freeq, pkt, pkt_q);
	}

	mtx_unlock(&sc->sc_mutex);
}
Пример #3
0
/*
 * Load an ELF section table and create a list of Elf_Scn structures.
 */
int
_libelf_load_scn(Elf *e, void *ehdr)
{
	int ec, swapbytes;
	size_t fsz, i, shnum;
	uint64_t shoff;
	char *src;
	Elf32_Ehdr *eh32;
	Elf64_Ehdr *eh64;
	Elf_Scn *scn;
	int (*xlator)(char *_d, size_t _dsz, char *_s, size_t _c, int _swap);

	assert(e != NULL);
	assert(ehdr != NULL);
	assert((e->e_flags & LIBELF_F_SHDRS_LOADED) == 0);

#define	CHECK_EHDR(E,EH)	do {				\
		if (fsz != (EH)->e_shentsize ||			\
		    shoff + fsz * shnum > e->e_rawsize) {	\
			LIBELF_SET_ERROR(HEADER, 0);		\
			return (0);				\
		}						\
	} while (0)

	ec = e->e_class;
	fsz = _libelf_fsize(ELF_T_SHDR, ec, e->e_version, (size_t) 1);
	assert(fsz > 0);

	shnum = e->e_u.e_elf.e_nscn;

	if (ec == ELFCLASS32) {
		eh32 = (Elf32_Ehdr *) ehdr;
		shoff = (uint64_t) eh32->e_shoff;
		CHECK_EHDR(e, eh32);
	} else {
		eh64 = (Elf64_Ehdr *) ehdr;
		shoff = eh64->e_shoff;
		CHECK_EHDR(e, eh64);
	}

	xlator = _libelf_get_translator(ELF_T_SHDR, ELF_TOMEMORY, ec);

	swapbytes = e->e_byteorder != LIBELF_PRIVATE(byteorder);
	src = e->e_rawfile + shoff;

	/*
	 * If the file is using extended numbering then section #0
	 * would have already been read in.
	 */

	i = 0;
	if (!STAILQ_EMPTY(&e->e_u.e_elf.e_scn)) {
		assert(STAILQ_FIRST(&e->e_u.e_elf.e_scn) ==
		    STAILQ_LAST(&e->e_u.e_elf.e_scn, _Elf_Scn, s_next));

		i = 1;
		src += fsz;
	}

	for (; i < shnum; i++, src += fsz) {
		if ((scn = _libelf_allocate_scn(e, i)) == NULL)
			return (0);

		(*xlator)((char *) &scn->s_shdr, sizeof(scn->s_shdr), src,
		    (size_t) 1, swapbytes);

		if (ec == ELFCLASS32) {
			scn->s_offset = scn->s_rawoff =
			    scn->s_shdr.s_shdr32.sh_offset;
			scn->s_size = scn->s_shdr.s_shdr32.sh_size;
		} else {
			scn->s_offset = scn->s_rawoff =
			    scn->s_shdr.s_shdr64.sh_offset;
			scn->s_size = scn->s_shdr.s_shdr64.sh_size;
		}
	}

	e->e_flags |= LIBELF_F_SHDRS_LOADED;

	return (1);
}
Пример #4
0
/*
 * Pre-coalesce handler is invoked when the message is a response to
 * the fragmented multi vector request - 'get' or 'gets' and all the
 * responses to the fragmented request vector hasn't been received
 */
void
memcache_pre_coalesce(struct msg *r)
{
    struct msg *pr = r->peer; /* peer request */
    struct mbuf *mbuf;

    ASSERT(!r->request);
    ASSERT(pr->request);

    if (pr->frag_id == 0) {
        /* do nothing, if not a response to a fragmented request */
        return;
    }

    switch (r->type) {

    case MSG_RSP_MC_VALUE:
    case MSG_RSP_MC_END:

        /*
         * Readjust responses of the fragmented message vector by not
         * including the end marker for all but the last response
         */

        if (pr->last_fragment) {
            break;
        }

        ASSERT(r->end != NULL);

        for (;;) {
            mbuf = STAILQ_LAST(&r->mhdr, mbuf, next);
            ASSERT(mbuf != NULL);

            /*
             * We cannot assert that end marker points to the last mbuf
             * Consider a scenario where end marker points to the
             * penultimate mbuf and the last mbuf only contains spaces
             * and CRLF: mhdr -> [...END] -> [\r\n]
             */

            if (r->end >= mbuf->pos && r->end < mbuf->last) {
                /* end marker is within this mbuf */
                r->mlen -= (uint32_t)(mbuf->last - r->end);
                mbuf->last = r->end;
                break;
            }

            /* end marker is not in this mbuf */
            r->mlen -= mbuf_length(mbuf);
            mbuf_remove(&r->mhdr, mbuf);
            mbuf_put(mbuf);
        }

        break;

    default:
        /*
         * Valid responses for a fragmented requests are MSG_RSP_MC_VALUE or,
         * MSG_RSP_MC_END. For an invalid response, we send out SERVER_ERRROR
         * with EINVAL errno
         */
        mbuf = STAILQ_FIRST(&r->mhdr);
        log_hexdump(LOG_ERR, mbuf->pos, mbuf_length(mbuf), "rsp fragment "
                    "with unknown type %d", r->type);
        pr->error = 1;
        pr->err = EINVAL;
        break;
    }
}
Пример #5
0
/* Async. stream output */
static void
fwe_as_output(struct fwe_softc *fwe, struct ifnet *ifp)
{
	struct mbuf *m;
	struct fw_xfer *xfer;
	struct fw_xferq *xferq;
	struct fw_pkt *fp;
	int i = 0;

	xfer = NULL;
	xferq = fwe->fd.fc->atq;
	while ((xferq->queued < xferq->maxq - 1) &&
			(ifp->if_snd.ifq_head != NULL)) {
		FWE_LOCK(fwe);
		xfer = STAILQ_FIRST(&fwe->xferlist);
		if (xfer == NULL) {
#if 0
			printf("if_fwe: lack of xfer\n");
#endif
			FWE_UNLOCK(fwe);
			break;
		}
		STAILQ_REMOVE_HEAD(&fwe->xferlist, link);
		FWE_UNLOCK(fwe);

		IF_DEQUEUE(&ifp->if_snd, m);
		if (m == NULL) {
			FWE_LOCK(fwe);
			STAILQ_INSERT_HEAD(&fwe->xferlist, xfer, link);
			FWE_UNLOCK(fwe);
			break;
		}
#if defined(__DragonFly__) || __FreeBSD_version < 500000
		if (ifp->if_bpf != NULL)
			bpf_mtap(ifp, m);
#else
		BPF_MTAP(ifp, m);
#endif

		/* keep ip packet alignment for alpha */
		M_PREPEND(m, ETHER_ALIGN, M_NOWAIT);
		fp = &xfer->send.hdr;
		*(uint32_t *)&xfer->send.hdr = *(int32_t *)&fwe->pkt_hdr;
		fp->mode.stream.len = m->m_pkthdr.len;
		xfer->mbuf = m;
		xfer->send.pay_len = m->m_pkthdr.len;

		if (fw_asyreq(fwe->fd.fc, -1, xfer) != 0) {
			/* error */
			ifp->if_oerrors ++;
			/* XXX set error code */
			fwe_output_callback(xfer);
		} else {
			ifp->if_opackets ++;
			i++;
		}
	}
#if 0
	if (i > 1)
		printf("%d queued\n", i);
#endif
	if (i > 0)
		xferq->start(fwe->fd.fc);
}
Пример #6
0
void
output_listing(char *ifilename)
{
	char buf[1024];
	FILE *ifile;
	struct instruction *cur_instr;
	patch_t *cur_patch;
	symbol_node_t *cur_func;
	int *func_values;
	int instrcount;
	int instrptr;
	int line;
	int func_count;
	int skip_addr;

	instrcount = 0;
	instrptr = 0;
	line = 1;
	skip_addr = 0;
	if ((ifile = fopen(ifilename, "r")) == NULL) {
		perror(ifilename);
		stop(NULL, EX_DATAERR);
	}

	/*
	 * Determine which options to apply to this listing.
	 */
	for (func_count = 0, cur_func = SLIST_FIRST(&patch_functions);
	    cur_func != NULL;
	    cur_func = SLIST_NEXT(cur_func, links))
		func_count++;

	func_values = NULL;
	if (func_count != 0) {
		func_values = (int *)malloc(func_count * sizeof(int));

		if (func_values == NULL)
			stop("Could not malloc", EX_OSERR);
		
		func_values[0] = 0; /* FALSE func */
		func_count--;

		/*
		 * Ask the user to fill in the return values for
		 * the rest of the functions.
		 */
		
		
		for (cur_func = SLIST_FIRST(&patch_functions);
		     cur_func != NULL && SLIST_NEXT(cur_func, links) != NULL;
		     cur_func = SLIST_NEXT(cur_func, links), func_count--) {
			int input;
			
			fprintf(stdout, "\n(%s)\n", cur_func->symbol->name);
			fprintf(stdout,
				"Enter the return value for "
				"this expression[T/F]:");

			while (1) {

				input = getchar();
				input = toupper(input);

				if (input == 'T') {
					func_values[func_count] = 1;
					break;
				} else if (input == 'F') {
					func_values[func_count] = 0;
					break;
				}
			}
			if (isatty(fileno(stdin)) == 0)
				putchar(input);
		}
		fprintf(stdout, "\nThanks!\n");
	}

	/* Now output the listing */
	cur_patch = STAILQ_FIRST(&patches);
	for (cur_instr = STAILQ_FIRST(&seq_program);
	     cur_instr != NULL;
	     cur_instr = STAILQ_NEXT(cur_instr, links), instrcount++) {

		if (check_patch(&cur_patch, instrcount,
				&skip_addr, func_values) == 0) {
			/* Don't count this instruction as it is in a patch
			 * that was removed.
			 */
                        continue;
		}

		while (line < cur_instr->srcline) {
			fgets(buf, sizeof(buf), ifile);
				fprintf(listfile, "             \t%s", buf);
				line++;
		}
		fprintf(listfile, "%04x %02x%02x%02x%02x", instrptr,
#ifdef __LITTLE_ENDIAN
			cur_instr->format.bytes[0],
			cur_instr->format.bytes[1],
			cur_instr->format.bytes[2],
			cur_instr->format.bytes[3]);
#else
			cur_instr->format.bytes[3],
			cur_instr->format.bytes[2],
			cur_instr->format.bytes[1],
			cur_instr->format.bytes[0]);
#endif
		/*
		 * Macro expansions can cause several instructions
		 * to be output for a single source line.  Only
		 * advance the line once in these cases.
		 */
		if (line == cur_instr->srcline) {
			fgets(buf, sizeof(buf), ifile);
			fprintf(listfile, "\t%s", buf);
			line++;
		} else {
			fprintf(listfile, "\n");
		}
		instrptr++;
	}
	/* Dump the remainder of the file */
	while(fgets(buf, sizeof(buf), ifile) != NULL)
		fprintf(listfile, "             %s", buf);

	fclose(ifile);
}
Пример #7
0
/*
 * We got an interrupt. Check type of interrupt and call the specific
 * device interrupt handling routine.
 */
void
mbaintr(void *mba)
{
	struct mba_softc * const sc = mba;
	struct mba_device *md;
	struct buf *bp;
	int itype, attn, anr;

	itype = MBA_RCSR(MBA_SR);
	MBA_WCSR(MBA_SR, itype);

	attn = MBA_RCSR(MUREG(0, MU_AS)) & 0xff;
	MBA_WCSR(MUREG(0, MU_AS), attn);

	if (sc->sc_state == SC_AUTOCONF)
		return;	/* During autoconfig */

	md = STAILQ_FIRST(&sc->sc_xfers);
	bp = bufq_peek(md->md_q);
	/*
	 * A data-transfer interrupt. Current operation is finished,
	 * call that device's finish routine to see what to do next.
	 */
	if (sc->sc_state == SC_ACTIVE) {
		sc->sc_state = SC_IDLE;
		switch ((*md->md_finish)(md, itype, &attn)) {

		case XFER_FINISH:
			/*
			 * Transfer is finished. Take buffer of drive
			 * queue, and take drive of adapter queue.
			 * If more to transfer, start the adapter again
			 * by calling mbastart().
			 */
			(void)bufq_get(md->md_q);
			STAILQ_REMOVE_HEAD(&sc->sc_xfers, md_link);
			if (bufq_peek(md->md_q) != NULL) {
				STAILQ_INSERT_TAIL(&sc->sc_xfers, md, md_link);
			}
	
			bp->b_resid = 0;
			biodone(bp);
			if (!STAILQ_EMPTY(&sc->sc_xfers))
				mbastart(sc);
			break;

		case XFER_RESTART:
			/*
			 * Something went wrong with the transfer. Try again.
			 */
			mbastart(sc);
			break;
		}
	}

	while (attn) {
		anr = ffs(attn) - 1;
		attn &= ~(1 << anr);
		if (sc->sc_md[anr]->md_attn == 0)
			panic("Should check for new MBA device %d", anr);
		(*sc->sc_md[anr]->md_attn)(sc->sc_md[anr]);
	}
}
Пример #8
0
int
dfs_process_radarevent(struct ath_softc *sc, HAL_CHANNEL *chan)
{
	struct ath_dfs *dfs=sc->sc_dfs;
	struct ath_hal *ah=sc->sc_ah;
	struct dfs_event re,*event;
	struct dfs_state *rs=NULL;
	struct dfs_filtertype *ft;
	struct dfs_filter *rf;
	int found, retval=0,p, empty;
	int events_processed=0;
    u_int32_t tabledepth,rfilt, index;
	u_int64_t deltafull_ts = 0,this_ts, deltaT;
	HAL_CHANNEL *thischan;
	HAL_PHYERR_PARAM pe;
    struct dfs_pulseline *pl;
    static u_int32_t  test_ts  = 0;
    static u_int32_t  diff_ts  = 0;

        int ext_chan_event_flag=0;

	if (dfs == NULL) {
		DFS_DPRINTK(sc, ATH_DEBUG_DFS, "%s: sc_sfs is NULL\n",
			__func__);
		return 0;
	}
	if ( ! (sc->sc_curchan.privFlags & CHANNEL_DFS)) {
	        DFS_DPRINTK(sc, ATH_DEBUG_DFS2, "%s: radar event on non-DFS chan\n",
                        __func__);
                dfs_reset_radarq(sc);
                dfs_reset_alldelaylines(sc);
        	return 0;
        }

    pl = dfs->pulses;

 	/* TEST : Simulate radar bang, make sure we add the channel to NOL (bug 29968) */
        if (dfs->dfs_bangradar) {
                    /* bangradar will always simulate radar found on the primary channel */
		     rs = &dfs->dfs_radar[dfs->dfs_curchan_radindex];
 		     dfs->dfs_bangradar = 0; /* reset */
	             DFS_DPRINTK(sc, ATH_DEBUG_DFS, "%s: bangradar\n", __func__);
 		     retval = 1;                    
                     goto dfsfound;
 	 }


	ATH_DFSQ_LOCK(dfs);
	empty = STAILQ_EMPTY(&(dfs->dfs_radarq));
	ATH_DFSQ_UNLOCK(dfs);

	while ((!empty) && (!retval) && (events_processed < MAX_EVENTS)) {
		ATH_DFSQ_LOCK(dfs);
		event = STAILQ_FIRST(&(dfs->dfs_radarq));
		if (event != NULL)
			STAILQ_REMOVE_HEAD(&(dfs->dfs_radarq), re_list);
		ATH_DFSQ_UNLOCK(dfs);

		if (event == NULL) {
			empty = 1;
			break;
		}
                events_processed++;
                re = *event;

		OS_MEMZERO(event, sizeof(struct dfs_event));
		ATH_DFSEVENTQ_LOCK(dfs);
		STAILQ_INSERT_TAIL(&(dfs->dfs_eventq), event, re_list);
		ATH_DFSEVENTQ_UNLOCK(dfs);

		found = 0;
		if (re.re_chanindex < DFS_NUM_RADAR_STATES)
			rs = &dfs->dfs_radar[re.re_chanindex];
		else {
			ATH_DFSQ_LOCK(dfs);
			empty = STAILQ_EMPTY(&(dfs->dfs_radarq));
			ATH_DFSQ_UNLOCK(dfs);
			continue;
		}
		if (rs->rs_chan.privFlags & CHANNEL_INTERFERENCE) {
			ATH_DFSQ_LOCK(dfs);
			empty = STAILQ_EMPTY(&(dfs->dfs_radarq));
			ATH_DFSQ_UNLOCK(dfs);
			continue;
		}

		if (dfs->dfs_rinfo.rn_lastfull_ts == 0) {
			/*
			 * Either not started, or 64-bit rollover exactly to zero
			 * Just prepend zeros to the 15-bit ts
			 */
			dfs->dfs_rinfo.rn_ts_prefix = 0;
			this_ts = (u_int64_t) re.re_ts;
		} else {
                         /* WAR 23031- patch duplicate ts on very short pulses */
                        /* This pacth has two problems in linux environment.
                         * 1)The time stamp created and hence PRI depends entirely on the latency.
                         *   If the latency is high, it possibly can split two consecutive
                         *   pulses in the same burst so far away (the same amount of latency)
                         *   that make them look like they are from differenct bursts. It is
                         *   observed to happen too often. It sure makes the detection fail.
                         * 2)Even if the latency is not that bad, it simply shifts the duplicate
                         *   timestamps to a new duplicate timestamp based on how they are processed.
                         *   This is not worse but not good either.
                         *
                         *   Take this pulse as a good one and create a probable PRI later
                         */
                        if (re.re_dur == 0 && re.re_ts == dfs->dfs_rinfo.rn_last_unique_ts) {
                                debug_dup[debug_dup_cnt++] = '1';
                                DFS_DPRINTK(sc, ATH_DEBUG_DFS1, "\n %s deltaT is 0 \n", __func__);
                        } else {
                                dfs->dfs_rinfo.rn_last_unique_ts = re.re_ts;
                                debug_dup[debug_dup_cnt++] = '0';
                        }
                        if (debug_dup_cnt >= 32){
                                 debug_dup_cnt = 0;
                        }


			if (re.re_ts <= dfs->dfs_rinfo.rn_last_ts) {
				dfs->dfs_rinfo.rn_ts_prefix += 
					(((u_int64_t) 1) << DFS_TSSHIFT);
				/* Now, see if it's been more than 1 wrap */
				deltafull_ts = re.re_full_ts - dfs->dfs_rinfo.rn_lastfull_ts;
				if (deltafull_ts > 
				    ((u_int64_t)((DFS_TSMASK - dfs->dfs_rinfo.rn_last_ts) + 1 + re.re_ts)))
					deltafull_ts -= (DFS_TSMASK - dfs->dfs_rinfo.rn_last_ts) + 1 + re.re_ts;
				deltafull_ts = deltafull_ts >> DFS_TSSHIFT;
				if (deltafull_ts > 1) {
					dfs->dfs_rinfo.rn_ts_prefix += 
						((deltafull_ts - 1) << DFS_TSSHIFT);
				}
			} else {
				deltafull_ts = re.re_full_ts - dfs->dfs_rinfo.rn_lastfull_ts;
				if (deltafull_ts > (u_int64_t) DFS_TSMASK) {
					deltafull_ts = deltafull_ts >> DFS_TSSHIFT;
					dfs->dfs_rinfo.rn_ts_prefix += 
						((deltafull_ts - 1) << DFS_TSSHIFT);
				}
			}
Пример #9
0
struct block_space *
get_bs(struct chip_swap *swap, uint32_t block, uint8_t writing)
{
	struct block_state *blk_state, *old_blk_state = NULL;
	struct block_space *blk_space;

	if (swap == NULL || (block >= swap->nof_blks))
		return (NULL);

	blk_state = &swap->blk_state[block];
	nand_debug(NDBG_SIM,"blk_state %x\n", blk_state->status);

	if (blk_state->status & BLOCK_ALLOCATED) {
		blk_space = blk_state->blk_sp;
	} else {
		blk_space = SLIST_FIRST(&swap->free_bs);
		if (blk_space) {
			SLIST_REMOVE_HEAD(&swap->free_bs, free_link);
			STAILQ_INSERT_TAIL(&swap->used_bs, blk_space,
			    used_link);
		} else {
			blk_space = STAILQ_FIRST(&swap->used_bs);
			old_blk_state = blk_space->blk_state;
			STAILQ_REMOVE_HEAD(&swap->used_bs, used_link);
			STAILQ_INSERT_TAIL(&swap->used_bs, blk_space,
			    used_link);
			if (old_blk_state->status & BLOCK_DIRTY) {
				swap_file_write(swap, old_blk_state);
				old_blk_state->status &= ~BLOCK_DIRTY;
				old_blk_state->status |= BLOCK_SWAPPED;
			}
		}
	}

	if (blk_space == NULL)
		return (NULL);

	if (old_blk_state != NULL) {
		old_blk_state->status &= ~BLOCK_ALLOCATED;
		old_blk_state->blk_sp = NULL;
	}

	blk_state->blk_sp = blk_space;
	blk_space->blk_state = blk_state;

	if (!(blk_state->status & BLOCK_ALLOCATED)) {
		if (blk_state->status & BLOCK_SWAPPED)
			swap_file_read(swap, blk_state);
		else
			memset(blk_space->blk_ptr, 0xff, swap->blk_size);
		blk_state->status |= BLOCK_ALLOCATED;
	}

	if (writing)
		blk_state->status |= BLOCK_DIRTY;

	nand_debug(NDBG_SIM,"get_bs returned %p[%p] state %x\n", blk_space,
	    blk_space->blk_ptr, blk_state->status);

	return (blk_space);
}
Пример #10
0
static nc_thread_memory_block_t *nc_allocate_memory_block_mu(
    nc_thread_memory_block_type_t type,
    int required_size) {
  struct tailhead *head;
  nc_thread_memory_block_t *node;
  /* Assume the lock is held!!! */
  if (type >= MAX_MEMORY_TYPE)
    return NULL;
  head = &__nc_thread_memory_blocks[type];

  /* We need to know the size even if we find a free node - to memset it to 0 */
  switch (type) {
     case THREAD_STACK_MEMORY:
       required_size = required_size + kStackAlignment - 1;
       break;
     case TLS_AND_TDB_MEMORY:
       break;
     case MAX_MEMORY_TYPE:
     default:
       return NULL;
  }

  if (!STAILQ_EMPTY(head)) {
    /* Try to get one from queue. */
    nc_thread_memory_block_t *node = STAILQ_FIRST(head);

    /*
     * On average the memory blocks will be marked as not used in the same order
     * as they are added to the queue, therefore there is no need to check the
     * next queue entries if the first one is still in use.
     */
    if (0 == node->is_used && node->size >= required_size) {
      /*
       * This will only re-use the first node possibly, and could be
       * improved to provide the stack with a best-fit algorithm if needed.
       * TODO: we should scan all nodes to see if there is one that fits
       *   before allocating another.
       *   http://code.google.com/p/nativeclient/issues/detail?id=1569
       */
      int size = node->size;
      STAILQ_REMOVE_HEAD(head, entries);
      --__nc_memory_block_counter[type];

      memset(node, 0,sizeof(*node));
      node->size = size;
      node->is_used = 1;
      return node;
    }

    while (__nc_memory_block_counter[type] > __nc_kMaxCachedMemoryBlocks) {
      /*
       * We have too many blocks in the queue - try to release some.
       * The maximum number of memory blocks to keep in the queue
       * is almost arbitrary and can be tuned.
       * The main limitation is that if we keep too many
       * blocks in the queue, the NaCl app will run out of memory,
       * since the default thread stack size is 512K.
       * TODO(gregoryd): we might give up reusing stack entries once we
       * support variable stack size.
       */
      nc_thread_memory_block_t *tmp = STAILQ_FIRST(head);
      if (0 == tmp->is_used) {
        STAILQ_REMOVE_HEAD(head, entries);
        --__nc_memory_block_counter[type];
        free(tmp);
      } else {
        /*
         * Stop once we find a block that is still in use,
         * since probably there is no point to continue.
         */
        break;
      }
    }

  }
  /* No available blocks of the required type/size - allocate one. */
  node = malloc(MEMORY_BLOCK_ALLOCATION_SIZE(required_size));
  if (NULL != node) {
    memset(node, 0, sizeof(*node));
    node->size = required_size;
    node->is_used = 1;
  }
  return node;
}
Пример #11
0
Elf_Data *
elf_getdata(Elf_Scn *s, Elf_Data *ed)
{
	Elf *e;
	unsigned int sh_type;
	int elfclass, elftype;
	size_t count, fsz, msz;
	struct _Libelf_Data *d;
	uint64_t sh_align, sh_offset, sh_size;
	int (*xlate)(unsigned char *_d, size_t _dsz, unsigned char *_s,
	    size_t _c, int _swap);

	d = (struct _Libelf_Data *) ed;

	if (s == NULL || (e = s->s_elf) == NULL ||
	    (d != NULL && s != d->d_scn)) {
		LIBELF_SET_ERROR(ARGUMENT, 0);
		return (NULL);
	}

	assert(e->e_kind == ELF_K_ELF);

	if (d == NULL && (d = STAILQ_FIRST(&s->s_data)) != NULL)
		return (&d->d_data);

	if (d != NULL)
		return (&STAILQ_NEXT(d, d_next)->d_data);

	if (e->e_rawfile == NULL) {
		/*
		 * In the ELF_C_WRITE case, there is no source that
		 * can provide data for the section.
		 */
		LIBELF_SET_ERROR(ARGUMENT, 0);
		return (NULL);
	}

	elfclass = e->e_class;

	assert(elfclass == ELFCLASS32 || elfclass == ELFCLASS64);

	if (elfclass == ELFCLASS32) {
		sh_type   = s->s_shdr.s_shdr32.sh_type;
		sh_offset = (uint64_t) s->s_shdr.s_shdr32.sh_offset;
		sh_size   = (uint64_t) s->s_shdr.s_shdr32.sh_size;
		sh_align  = (uint64_t) s->s_shdr.s_shdr32.sh_addralign;
	} else {
		sh_type   = s->s_shdr.s_shdr64.sh_type;
		sh_offset = s->s_shdr.s_shdr64.sh_offset;
		sh_size   = s->s_shdr.s_shdr64.sh_size;
		sh_align  = s->s_shdr.s_shdr64.sh_addralign;
	}

	if (sh_type == SHT_NULL) {
		LIBELF_SET_ERROR(SECTION, 0);
		return (NULL);
	}

	if ((elftype = _libelf_xlate_shtype(sh_type)) < ELF_T_FIRST ||
	    elftype > ELF_T_LAST || (sh_type != SHT_NOBITS &&
	    sh_offset + sh_size > (uint64_t) e->e_rawsize)) {
		LIBELF_SET_ERROR(SECTION, 0);
		return (NULL);
	}

	if ((fsz = (elfclass == ELFCLASS32 ? elf32_fsize : elf64_fsize)
            (elftype, (size_t) 1, e->e_version)) == 0) {
		LIBELF_SET_ERROR(UNIMPL, 0);
		return (NULL);
	}

	if (sh_size % fsz) {
		LIBELF_SET_ERROR(SECTION, 0);
		return (NULL);
	}

	if (sh_size / fsz > SIZE_MAX) {
		LIBELF_SET_ERROR(RANGE, 0);
		return (NULL);
	}

	count = (size_t) (sh_size / fsz);

	msz = _libelf_msize(elftype, elfclass, e->e_version);

	if (count > 0 && msz > SIZE_MAX / count) {
		LIBELF_SET_ERROR(RANGE, 0);
		return (NULL);
	}

	assert(msz > 0);
	assert(count <= SIZE_MAX);
	assert(msz * count <= SIZE_MAX);

	if ((d = _libelf_allocate_data(s)) == NULL)
		return (NULL);

	d->d_data.d_buf     = NULL;
	d->d_data.d_off     = 0;
	d->d_data.d_align   = sh_align;
	d->d_data.d_size    = msz * count;
	d->d_data.d_type    = elftype;
	d->d_data.d_version = e->e_version;

	if (sh_type == SHT_NOBITS || sh_size == 0) {
	        STAILQ_INSERT_TAIL(&s->s_data, d, d_next);
		return (&d->d_data);
        }

	if ((d->d_data.d_buf = malloc(msz * count)) == NULL) {
		(void) _libelf_release_data(d);
		LIBELF_SET_ERROR(RESOURCE, 0);
		return (NULL);
	}

	d->d_flags  |= LIBELF_F_DATA_MALLOCED;

	xlate = _libelf_get_translator(elftype, ELF_TOMEMORY, elfclass);
	if (!(*xlate)(d->d_data.d_buf, (size_t) d->d_data.d_size,
	    e->e_rawfile + sh_offset, count,
	    e->e_byteorder != LIBELF_PRIVATE(byteorder))) {
		_libelf_release_data(d);
		LIBELF_SET_ERROR(DATA, 0);
		return (NULL);
	}

	STAILQ_INSERT_TAIL(&s->s_data, d, d_next);

	return (&d->d_data);
}
Пример #12
0
/*
 * Copy one response from src to dst and return bytes copied
 */
static rstatus_t
memcache_copy_bulk(struct msg *dst, struct msg *src)
{
    struct mbuf *mbuf, *nbuf;
    uint8_t *p;
    uint32_t len = 0;
    uint32_t bytes = 0;
    uint32_t i = 0;

    for (mbuf = STAILQ_FIRST(&src->mhdr);
         mbuf && mbuf_empty(mbuf);
         mbuf = STAILQ_FIRST(&src->mhdr)) {

        mbuf_remove(&src->mhdr, mbuf);
        mbuf_put(mbuf);
    }

    mbuf = STAILQ_FIRST(&src->mhdr);
    if (mbuf == NULL) {
        return NC_OK;           /* key not exists */
    }
    p = mbuf->pos;

    /*
     * get : VALUE key 0 len\r\nval\r\n
     * gets: VALUE key 0 len cas\r\nval\r\n
     */
    ASSERT(*p == 'V');
    for (i = 0; i < 3; i++) {                 /*  eat 'VALUE key 0 '  */
        for (; *p != ' ';) {
            p++;
        }
        p++;
    }

    len = 0;
    for (; p < mbuf->last && isdigit(*p); p++) {
        len = len * 10 + (uint32_t)(*p - '0');
    }

    for (; p < mbuf->last && ('\r' != *p); p++) { /* eat cas for gets */
        ;
    }

    len += CRLF_LEN * 2;
    len += (p - mbuf->pos);

    bytes = len;

    /* copy len bytes to dst */
    for (; mbuf;) {
        if (mbuf_length(mbuf) <= len) {   /* steal this mbuf from src to dst */
            nbuf = STAILQ_NEXT(mbuf, next);
            mbuf_remove(&src->mhdr, mbuf);
            mbuf_insert(&dst->mhdr, mbuf);
            len -= mbuf_length(mbuf);
            mbuf = nbuf;
        } else {                        /* split it */
            nbuf = mbuf_get();
            if (nbuf == NULL) {
                return NC_ENOMEM;
            }
            mbuf_copy(nbuf, mbuf->pos, len);
            mbuf_insert(&dst->mhdr, nbuf);
            mbuf->pos += len;
            break;
        }
    }

    dst->mlen += bytes;
    src->mlen -= bytes;
    log_debug(LOG_VVERB, "memcache_copy_bulk copy bytes: %d", bytes);
    return NC_OK;
}
Пример #13
0
/*
 * read the comment in proto/nc_redis.c
 */
static rstatus_t
memcache_fragment_retrieval(struct msg *r, uint32_t ncontinuum,
                            struct msg_tqh *frag_msgq,
                            uint32_t key_step)
{
    struct mbuf *mbuf;
    struct msg **sub_msgs;
    uint32_t i;
    rstatus_t status;

    sub_msgs = nc_zalloc(ncontinuum * sizeof(*sub_msgs));
    if (sub_msgs == NULL) {
        return NC_ENOMEM;
    }

    ASSERT(r->frag_seq == NULL);
    r->frag_seq = nc_alloc(array_n(r->keys) * sizeof(*r->frag_seq));
    if (r->frag_seq == NULL) {
        nc_free(sub_msgs);
        return NC_ENOMEM;
    }

    mbuf = STAILQ_FIRST(&r->mhdr);
    mbuf->pos = mbuf->start;

    /*
     * This code is based on the assumption that 'gets ' is located
     * in a contiguous location.
     * This is always true because we have capped our MBUF_MIN_SIZE at 512 and
     * whenever we have multiple messages, we copy the tail message into a new mbuf
     */
    for (; *(mbuf->pos) != ' ';) {          /* eat get/gets  */
        mbuf->pos++;
    }
    mbuf->pos++;

    r->frag_id = msg_gen_frag_id();
    r->nfrag = 0;
    r->frag_owner = r;

    for (i = 0; i < array_n(r->keys); i++) {        /* for each  key */
        struct msg *sub_msg;
        struct keypos *kpos = array_get(r->keys, i);
        uint32_t idx = msg_backend_idx(r, kpos->start, kpos->end - kpos->start);

        if (sub_msgs[idx] == NULL) {
            sub_msgs[idx] = msg_get(r->owner, r->request, r->redis);
            if (sub_msgs[idx] == NULL) {
                nc_free(sub_msgs);
                return NC_ENOMEM;
            }
        }
        r->frag_seq[i] = sub_msg = sub_msgs[idx];

        sub_msg->narg++;
        status = memcache_append_key(sub_msg, kpos->start, kpos->end - kpos->start);
        if (status != NC_OK) {
            nc_free(sub_msgs);
            return status;
        }
    }

    for (i = 0; i < ncontinuum; i++) {     /* prepend mget header, and forward it */
        struct msg *sub_msg = sub_msgs[i];
        if (sub_msg == NULL) {
            continue;
        }

        /* prepend get/gets */
        if (r->type == MSG_REQ_MC_GET) {
            status = msg_prepend(sub_msg, (uint8_t *)"get ", 4);
        } else if (r->type == MSG_REQ_MC_GETS) {
            status = msg_prepend(sub_msg, (uint8_t *)"gets ", 5);
        }
        if (status != NC_OK) {
            nc_free(sub_msgs);
            return status;
        }

        /* append \r\n */
        status = msg_append(sub_msg, (uint8_t *)CRLF, CRLF_LEN);
        if (status != NC_OK) {
            nc_free(sub_msgs);
            return status;
        }

        sub_msg->type = r->type;
        sub_msg->frag_id = r->frag_id;
        sub_msg->frag_owner = r->frag_owner;

        TAILQ_INSERT_TAIL(frag_msgq, sub_msg, m_tqe);
        r->nfrag++;
    }

    nc_free(sub_msgs);
    return NC_OK;
}
Пример #14
0
static int
pci_ioctl(struct dev_ioctl_args *ap)
{
	device_t pcidev, brdev;
	void *confdata;
	const char *name;
	struct devlist *devlist_head;
	struct pci_conf_io *cio;
	struct pci_devinfo *dinfo;
	struct pci_io *io;
	struct pci_bar_io *bio;
	struct pci_match_conf *pattern_buf;
	struct resource_list_entry *rle;
	uint32_t value;
	size_t confsz, iolen, pbufsz;
	int error, ionum, i, num_patterns;
#ifdef PRE7_COMPAT
	struct pci_conf_old conf_old;
	struct pci_io iodata;
	struct pci_io_old *io_old;
	struct pci_match_conf_old *pattern_buf_old;

	io_old = NULL;
	pattern_buf_old = NULL;

	if (!(ap->a_fflag & FWRITE) && ap->a_cmd != PCIOCGETBAR &&
	    ap->a_cmd != PCIOCGETCONF && ap->a_cmd != PCIOCGETCONF_OLD)
		return EPERM;
#else
	if (!(ap->a_fflag & FWRITE) && ap->a_cmd != PCIOCGETBAR && ap->a_cmd != PCIOCGETCONF)
		return EPERM;
#endif

	switch(ap->a_cmd) {
#ifdef PRE7_COMPAT
	case PCIOCGETCONF_OLD:
		/* FALLTHROUGH */
#endif
	case PCIOCGETCONF:
		cio = (struct pci_conf_io *)ap->a_data;

		pattern_buf = NULL;
		num_patterns = 0;
		dinfo = NULL;

		cio->num_matches = 0;

		/*
		 * If the user specified an offset into the device list,
		 * but the list has changed since they last called this
		 * ioctl, tell them that the list has changed.  They will
		 * have to get the list from the beginning.
		 */
		if ((cio->offset != 0)
		 && (cio->generation != pci_generation)){
			cio->status = PCI_GETCONF_LIST_CHANGED;
			error = 0;
			break;
		}

		/*
		 * Check to see whether the user has asked for an offset
		 * past the end of our list.
		 */
		if (cio->offset >= pci_numdevs) {
			cio->status = PCI_GETCONF_LAST_DEVICE;
			error = 0;
			break;
		}

		/* get the head of the device queue */
		devlist_head = &pci_devq;

		/*
		 * Determine how much room we have for pci_conf structures.
		 * Round the user's buffer size down to the nearest
		 * multiple of sizeof(struct pci_conf) in case the user
		 * didn't specify a multiple of that size.
		 */
#ifdef PRE7_COMPAT
		if (ap->a_cmd == PCIOCGETCONF_OLD)
			confsz = sizeof(struct pci_conf_old);
		else
#endif
			confsz = sizeof(struct pci_conf);
		iolen = min(cio->match_buf_len - (cio->match_buf_len % confsz),
		    pci_numdevs * confsz);

		/*
		 * Since we know that iolen is a multiple of the size of
		 * the pciconf union, it's okay to do this.
		 */
		ionum = iolen / confsz;

		/*
		 * If this test is true, the user wants the pci_conf
		 * structures returned to match the supplied entries.
		 */
		if ((cio->num_patterns > 0) && (cio->num_patterns < pci_numdevs)
		 && (cio->pat_buf_len > 0)) {
			/*
			 * pat_buf_len needs to be:
			 * num_patterns * sizeof(struct pci_match_conf)
			 * While it is certainly possible the user just
			 * allocated a large buffer, but set the number of
			 * matches correctly, it is far more likely that
			 * their kernel doesn't match the userland utility
			 * they're using.  It's also possible that the user
			 * forgot to initialize some variables.  Yes, this
			 * may be overly picky, but I hazard to guess that
			 * it's far more likely to just catch folks that
			 * updated their kernel but not their userland.
			 */
#ifdef PRE7_COMPAT
			if (ap->a_cmd == PCIOCGETCONF_OLD)
				pbufsz = sizeof(struct pci_match_conf_old);
			else
#endif
				pbufsz = sizeof(struct pci_match_conf);
			if (cio->num_patterns * pbufsz != cio->pat_buf_len) {
				/* The user made a mistake, return an error. */
				cio->status = PCI_GETCONF_ERROR;
				error = EINVAL;
				break;
			}

			/*
			 * Allocate a buffer to hold the patterns.
			 */
#ifdef PRE7_COMPAT
			if (ap->a_cmd == PCIOCGETCONF_OLD) {
				pattern_buf_old = kmalloc(cio->pat_buf_len,
				    M_TEMP, M_WAITOK);
				error = copyin(cio->patterns,
				    pattern_buf_old, cio->pat_buf_len);
			} else
#endif
			{
				pattern_buf = kmalloc(cio->pat_buf_len, M_TEMP,
				    M_WAITOK);
				error = copyin(cio->patterns, pattern_buf,
				    cio->pat_buf_len);
			}
			if (error != 0) {
				error = EINVAL;
				goto getconfexit;
			}
			num_patterns = cio->num_patterns;
		} else if ((cio->num_patterns > 0)
			|| (cio->pat_buf_len > 0)) {
			/*
			 * The user made a mistake, spit out an error.
			 */
			cio->status = PCI_GETCONF_ERROR;
			error = EINVAL;
			break;
		}

		/*
		 * Go through the list of devices and copy out the devices
		 * that match the user's criteria.
		 */
		for (cio->num_matches = 0, error = 0, i = 0,
		     dinfo = STAILQ_FIRST(devlist_head);
		     (dinfo != NULL) && (cio->num_matches < ionum)
		     && (error == 0) && (i < pci_numdevs) && (dinfo != NULL);
		     dinfo = STAILQ_NEXT(dinfo, pci_links), i++) {

			if (i < cio->offset)
				continue;

			/* Populate pd_name and pd_unit */
			name = NULL;
			if (dinfo->cfg.dev)
				name = device_get_name(dinfo->cfg.dev);
			if (name) {
				strncpy(dinfo->conf.pd_name, name,
					sizeof(dinfo->conf.pd_name));
				dinfo->conf.pd_name[PCI_MAXNAMELEN] = 0;
				dinfo->conf.pd_unit =
					device_get_unit(dinfo->cfg.dev);
			} else {
				dinfo->conf.pd_name[0] = '\0';
				dinfo->conf.pd_unit = 0;
			}

#ifdef PRE7_COMPAT
			if ((ap->a_cmd == PCIOCGETCONF_OLD &&
			    (pattern_buf_old == NULL ||
			    pci_conf_match_old(pattern_buf_old, num_patterns,
			    &dinfo->conf) == 0)) ||
			    (ap->a_cmd == PCIOCGETCONF &&
			    (pattern_buf == NULL ||
			    pci_conf_match(pattern_buf, num_patterns,
			    &dinfo->conf) == 0))) {
#else
			if (pattern_buf == NULL ||
			    pci_conf_match(pattern_buf, num_patterns,
			    &dinfo->conf) == 0) {
#endif
				/*
				 * If we've filled up the user's buffer,
				 * break out at this point.  Since we've
				 * got a match here, we'll pick right back
				 * up at the matching entry.  We can also
				 * tell the user that there are more matches
				 * left.
				 */
				if (cio->num_matches >= ionum)
					break;

#ifdef PRE7_COMPAT
				if (ap->a_cmd == PCIOCGETCONF_OLD) {
					conf_old.pc_sel.pc_bus =
					    dinfo->conf.pc_sel.pc_bus;
					conf_old.pc_sel.pc_dev =
					    dinfo->conf.pc_sel.pc_dev;
					conf_old.pc_sel.pc_func =
					    dinfo->conf.pc_sel.pc_func;
					conf_old.pc_hdr = dinfo->conf.pc_hdr;
					conf_old.pc_subvendor =
					    dinfo->conf.pc_subvendor;
					conf_old.pc_subdevice =
					    dinfo->conf.pc_subdevice;
					conf_old.pc_vendor =
					    dinfo->conf.pc_vendor;
					conf_old.pc_device =
					    dinfo->conf.pc_device;
					conf_old.pc_class =
					    dinfo->conf.pc_class;
					conf_old.pc_subclass =
					    dinfo->conf.pc_subclass;
					conf_old.pc_progif =
					    dinfo->conf.pc_progif;
					conf_old.pc_revid =
					    dinfo->conf.pc_revid;
					strncpy(conf_old.pd_name,
					    dinfo->conf.pd_name,
					    sizeof(conf_old.pd_name));
					conf_old.pd_name[PCI_MAXNAMELEN] = 0;
					conf_old.pd_unit =
					    dinfo->conf.pd_unit;
					confdata = &conf_old;
				} else
#endif
					confdata = &dinfo->conf;
				/* Only if we can copy it out do we count it. */
				if (!(error = copyout(confdata,
				    (caddr_t)cio->matches +
				    confsz * cio->num_matches, confsz)))
					cio->num_matches++;
			}
		}

		/*
		 * Set the pointer into the list, so if the user is getting
		 * n records at a time, where n < pci_numdevs,
		 */
		cio->offset = i;

		/*
		 * Set the generation, the user will need this if they make
		 * another ioctl call with offset != 0.
		 */
		cio->generation = pci_generation;

		/*
		 * If this is the last device, inform the user so he won't
		 * bother asking for more devices.  If dinfo isn't NULL, we
		 * know that there are more matches in the list because of
		 * the way the traversal is done.
		 */
		if (dinfo == NULL)
			cio->status = PCI_GETCONF_LAST_DEVICE;
		else
			cio->status = PCI_GETCONF_MORE_DEVS;

getconfexit:
		if (pattern_buf != NULL)
			kfree(pattern_buf, M_TEMP);
#ifdef PRE7_COMPAT
		if (pattern_buf_old != NULL)
			kfree(pattern_buf_old, M_TEMP);
#endif

		break;

#ifdef PRE7_COMPAT
	case PCIOCREAD_OLD:
	case PCIOCWRITE_OLD:
		io_old = (struct pci_io_old *)ap->a_data;
		iodata.pi_sel.pc_domain = 0;
		iodata.pi_sel.pc_bus = io_old->pi_sel.pc_bus;
		iodata.pi_sel.pc_dev = io_old->pi_sel.pc_dev;
		iodata.pi_sel.pc_func = io_old->pi_sel.pc_func;
		iodata.pi_reg = io_old->pi_reg;
		iodata.pi_width = io_old->pi_width;
		iodata.pi_data = io_old->pi_data;
		ap->a_data = (caddr_t)&iodata;
		/* FALLTHROUGH */
#endif
	case PCIOCREAD:
	case PCIOCWRITE:
		io = (struct pci_io *)ap->a_data;
		switch(io->pi_width) {
		case 4:
		case 2:
		case 1:
			/* Make sure register is in bounds and aligned. */
			if (io->pi_reg < 0 ||
			    io->pi_reg + io->pi_width > PCI_REGMAX + 1 ||
			    io->pi_reg & (io->pi_width - 1)) {
				error = EINVAL;
				break;
			}
			/*
			 * Assume that the user-level bus number is
			 * in fact the physical PCI bus number.
			 * Look up the grandparent, i.e. the bridge device,
			 * so that we can issue configuration space cycles.
			 */
			pcidev = pci_find_dbsf(io->pi_sel.pc_domain,
			    io->pi_sel.pc_bus, io->pi_sel.pc_dev,
			    io->pi_sel.pc_func);
			if (pcidev) {
				brdev = device_get_parent(
				    device_get_parent(pcidev));

#ifdef PRE7_COMPAT
				if (ap->a_cmd == PCIOCWRITE || ap->a_cmd == PCIOCWRITE_OLD)
#else
				if (ap->a_cmd == PCIOCWRITE)
#endif
					PCIB_WRITE_CONFIG(brdev,
							  io->pi_sel.pc_bus,
							  io->pi_sel.pc_dev,
							  io->pi_sel.pc_func,
							  io->pi_reg,
							  io->pi_data,
							  io->pi_width);
#ifdef PRE7_COMPAT
				else if (ap->a_cmd == PCIOCREAD_OLD)
					io_old->pi_data =
						PCIB_READ_CONFIG(brdev,
							  io->pi_sel.pc_bus,
							  io->pi_sel.pc_dev,
							  io->pi_sel.pc_func,
							  io->pi_reg,
							  io->pi_width);
#endif
				else
					io->pi_data =
						PCIB_READ_CONFIG(brdev,
							  io->pi_sel.pc_bus,
							  io->pi_sel.pc_dev,
							  io->pi_sel.pc_func,
							  io->pi_reg,
							  io->pi_width);
				error = 0;
			} else {
#ifdef COMPAT_FREEBSD4
				if (cmd == PCIOCREAD_OLD) {
					io_old->pi_data = -1;
					error = 0;
				} else
#endif
					error = ENODEV;
			}
			break;
		default:
			error = EINVAL;
			break;
		}
		break;

	case PCIOCGETBAR:
		bio = (struct pci_bar_io *)ap->a_data;

		/*
		 * Assume that the user-level bus number is
		 * in fact the physical PCI bus number.
		 */
		pcidev = pci_find_dbsf(bio->pbi_sel.pc_domain,
		    bio->pbi_sel.pc_bus, bio->pbi_sel.pc_dev,
		    bio->pbi_sel.pc_func);
		if (pcidev == NULL) {
			error = ENODEV;
			break;
		}
		dinfo = device_get_ivars(pcidev);
		
		/*
		 * Look for a resource list entry matching the requested BAR.
		 *
		 * XXX: This will not find BARs that are not initialized, but
		 * maybe that is ok?
		 */
		rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY,
		    bio->pbi_reg);
		if (rle == NULL)
			rle = resource_list_find(&dinfo->resources,
			    SYS_RES_IOPORT, bio->pbi_reg);
		if (rle == NULL || rle->res == NULL) {
			error = EINVAL;
			break;
		}

		/*
		 * Ok, we have a resource for this BAR.  Read the lower
		 * 32 bits to get any flags.
		 */
		value = pci_read_config(pcidev, bio->pbi_reg, 4);
		if (PCI_BAR_MEM(value)) {
			if (rle->type != SYS_RES_MEMORY) {
				error = EINVAL;
				break;
			}
			value &= ~PCIM_BAR_MEM_BASE;
		} else {
			if (rle->type != SYS_RES_IOPORT) {
				error = EINVAL;
				break;
			}
			value &= ~PCIM_BAR_IO_BASE;
		}
		bio->pbi_base = rman_get_start(rle->res) | value;
		bio->pbi_length = rman_get_size(rle->res);

		/*
		 * Check the command register to determine if this BAR
		 * is enabled.
		 */
		value = pci_read_config(pcidev, PCIR_COMMAND, 2);
		if (rle->type == SYS_RES_MEMORY)
			bio->pbi_enabled = (value & PCIM_CMD_MEMEN) != 0;
		else
			bio->pbi_enabled = (value & PCIM_CMD_PORTEN) != 0;
		error = 0;
		break;
	case PCIOCATTACHED:
		error = 0;
		io = (struct pci_io *)ap->a_data;
		pcidev = pci_find_dbsf(io->pi_sel.pc_domain, io->pi_sel.pc_bus,
				       io->pi_sel.pc_dev, io->pi_sel.pc_func);
		if (pcidev != NULL)
			io->pi_data = device_is_attached(pcidev);
		else
			error = ENODEV;
		break;
	default:
		error = ENOTTY;
		break;
	}

	return (error);
}
Пример #15
0
static void
dnode_req_forward(struct context *ctx, struct conn *conn, struct msg *msg)
{
    struct server_pool *pool;
    uint8_t *key;
    uint32_t keylen;

    if (log_loggable(LOG_DEBUG)) {
       log_debug(LOG_DEBUG, "dnode_req_forward entering ");
    }
    log_debug(LOG_DEBUG, "DNODE REQ RECEIVED %s %d dmsg->id %u",
              conn_get_type_string(conn), conn->sd, msg->dmsg->id);

    ASSERT(conn->type == CONN_DNODE_PEER_CLIENT);

    pool = conn->owner;
    key = NULL;
    keylen = 0;

    log_debug(LOG_DEBUG, "conn %p adding message %d:%d", conn, msg->id, msg->parent_id);
    dictAdd(conn->outstanding_msgs_dict, &msg->id, msg);

    if (!string_empty(&pool->hash_tag)) {
        struct string *tag = &pool->hash_tag;
        uint8_t *tag_start, *tag_end;

        tag_start = dn_strchr(msg->key_start, msg->key_end, tag->data[0]);
        if (tag_start != NULL) {
            tag_end = dn_strchr(tag_start + 1, msg->key_end, tag->data[1]);
            if (tag_end != NULL) {
                key = tag_start + 1;
                keylen = (uint32_t)(tag_end - key);
            }
        }
    }

    if (keylen == 0) {
        key = msg->key_start;
        keylen = (uint32_t)(msg->key_end - msg->key_start);
    }

    ASSERT(msg->dmsg != NULL);
    if (msg->dmsg->type == DMSG_REQ) {
       local_req_forward(ctx, conn, msg, key, keylen);
    } else if (msg->dmsg->type == DMSG_REQ_FORWARD) {
        struct mbuf *orig_mbuf = STAILQ_FIRST(&msg->mhdr);
        struct datacenter *dc = server_get_dc(pool, &pool->dc);
        uint32_t rack_cnt = array_n(&dc->racks);
        uint32_t rack_index;
        for(rack_index = 0; rack_index < rack_cnt; rack_index++) {
            struct rack *rack = array_get(&dc->racks, rack_index);
            //log_debug(LOG_DEBUG, "forwarding to rack  '%.*s'",
            //            rack->name->len, rack->name->data);
            struct msg *rack_msg;
            if (string_compare(rack->name, &pool->rack) == 0 ) {
                rack_msg = msg;
            } else {
                rack_msg = msg_get(conn, msg->request, __FUNCTION__);
                if (rack_msg == NULL) {
                    log_debug(LOG_VERB, "whelp, looks like yer screwed now, buddy. no inter-rack messages for you!");
                    continue;
                }

                if (msg_clone(msg, orig_mbuf, rack_msg) != DN_OK) {
                    msg_put(rack_msg);
                    continue;
                }
                rack_msg->swallow = true;
            }

            if (log_loggable(LOG_DEBUG)) {
               log_debug(LOG_DEBUG, "forwarding request from conn '%s' to rack '%.*s' dc '%.*s' ",
                           dn_unresolve_peer_desc(conn->sd), rack->name->len, rack->name->data, rack->dc->len, rack->dc->data);
            }

            remote_req_forward(ctx, conn, rack_msg, rack, key, keylen);
        }
    }
}
Пример #16
0
static int
pkg_create_matches(int argc, char **argv, match_t match, pkg_formats fmt,
    const char * const outdir, const char * const rootdir, bool overwrite)
{
	int i, ret = EPKG_OK, retcode = EPKG_OK;
	struct pkgdb *db = NULL;
	struct pkgdb_it *it = NULL;
	struct pkg *pkg = NULL;
	struct pkg_head head = STAILQ_HEAD_INITIALIZER(head);
	struct pkg_entry *e = NULL;
	const char *name, *version;
	char pkgpath[MAXPATHLEN];
	int query_flags = PKG_LOAD_DEPS | PKG_LOAD_FILES | 
	    PKG_LOAD_CATEGORIES | PKG_LOAD_DIRS | PKG_LOAD_SCRIPTS |
	    PKG_LOAD_OPTIONS | PKG_LOAD_MTREE | PKG_LOAD_LICENSES |
	    PKG_LOAD_USERS | PKG_LOAD_GROUPS | PKG_LOAD_SHLIBS;
	const char *format;
	bool foundone;

	if (pkgdb_open(&db, PKGDB_DEFAULT) != EPKG_OK) {
		pkgdb_close(db);
		return (EX_IOERR);
	}

	switch (fmt) {
	case TXZ:
		format = "txz";
		break;
	case TBZ:
		format = "tbz";
		break;
	case TGZ:
		format = "tgz";
		break;
	case TAR:
		format = "tar";
		break;
	}

	for (i = 0; i < argc || match == MATCH_ALL; i++) {
		if (match == MATCH_ALL) {
			printf("Loading package list...\n");
			if ((it = pkgdb_query(db, NULL, match)) == NULL)
				goto cleanup;
			match = !MATCH_ALL;
		} else
			if ((it = pkgdb_query(db, argv[i], match)) == NULL)
				goto cleanup;

		foundone = false;
		while ((ret = pkgdb_it_next(it, &pkg, query_flags)) == EPKG_OK) {
			if ((e = malloc(sizeof(struct pkg_entry))) == NULL)
				err(1, "malloc(pkg_entry)");
			e->pkg = pkg;
			pkg = NULL;
			STAILQ_INSERT_TAIL(&head, e, next);
			foundone = true;
		}
		if (!foundone)
			warnx("No installed package matching \"%s\" found\n",
			    argv[i]);

		pkgdb_it_free(it);
		if (ret != EPKG_END)
			retcode++;
	}

	while (!STAILQ_EMPTY(&head)) {
		e = STAILQ_FIRST(&head);
		STAILQ_REMOVE_HEAD(&head, next);

		pkg_get(e->pkg, PKG_NAME, &name, PKG_VERSION, &version);
		if (!overwrite) {
			snprintf(pkgpath, MAXPATHLEN, "%s/%s-%s.%s", outdir,
			    name, version, format);
			if (access(pkgpath, F_OK) == 0) {
				printf("%s-%s already packaged skipping...\n",
				    name, version);
				pkg_free(e->pkg);
				free(e);
				continue;
			}
		}
		printf("Creating package for %s-%s\n", name, version);
		if (pkg_create_installed(outdir, fmt, rootdir, e->pkg) !=
		    EPKG_OK)
			retcode++;
		pkg_free(e->pkg);
		free(e);
	}

cleanup:
	pkgdb_close(db);

	return (retcode);
}
Пример #17
0
static void
output_code()
{
	struct instruction *cur_instr;
	patch_t *cur_patch;
	critical_section_t *cs;
	symbol_node_t *cur_node;
	int instrcount;

	instrcount = 0;
	fprintf(ofile,
"/*\n"
" * DO NOT EDIT - This file is automatically generated\n"
" *		 from the following source files:\n"
" *\n"
"%s */\n", versions);

	fprintf(ofile, "static const uint8_t seqprog[] = {\n");
	for (cur_instr = STAILQ_FIRST(&seq_program);
	     cur_instr != NULL;
	     cur_instr = STAILQ_NEXT(cur_instr, links)) {

		fprintf(ofile, "%s\t0x%02x, 0x%02x, 0x%02x, 0x%02x",
			cur_instr == STAILQ_FIRST(&seq_program) ? "" : ",\n",
#ifdef __LITTLE_ENDIAN
			cur_instr->format.bytes[0],
			cur_instr->format.bytes[1],
			cur_instr->format.bytes[2],
			cur_instr->format.bytes[3]);
#else
			cur_instr->format.bytes[3],
			cur_instr->format.bytes[2],
			cur_instr->format.bytes[1],
			cur_instr->format.bytes[0]);
#endif
		instrcount++;
	}
	fprintf(ofile, "\n};\n\n");

	if (patch_arg_list == NULL)
		stop("Patch argument list not defined",
		     EX_DATAERR);

	/*
	 *  Output patch information.  Patch functions first.
	 */
	fprintf(ofile,
"typedef int %spatch_func_t (%s);\n", prefix, patch_arg_list);

	for (cur_node = SLIST_FIRST(&patch_functions);
	     cur_node != NULL;
	     cur_node = SLIST_NEXT(cur_node,links)) {
		fprintf(ofile,
"static %spatch_func_t %spatch%d_func;\n"
"\n"
"static int\n"
"%spatch%d_func(%s)\n"
"{\n"
"	return (%s);\n"
"}\n\n",
			prefix,
			prefix,
			cur_node->symbol->info.condinfo->func_num,
			prefix,
			cur_node->symbol->info.condinfo->func_num,
			patch_arg_list,
			cur_node->symbol->name);
	}

	fprintf(ofile,
"static const struct patch {\n"
"	%spatch_func_t		*patch_func;\n"
"	uint32_t		 begin		:10,\n"
"				 skip_instr	:10,\n"
"				 skip_patch	:12;\n"
"} patches[] = {\n", prefix);

	for (cur_patch = STAILQ_FIRST(&patches);
	     cur_patch != NULL;
	     cur_patch = STAILQ_NEXT(cur_patch,links)) {
		fprintf(ofile, "%s\t{ %spatch%d_func, %d, %d, %d }",
			cur_patch == STAILQ_FIRST(&patches) ? "" : ",\n",
			prefix,
			cur_patch->patch_func, cur_patch->begin,
			cur_patch->skip_instr, cur_patch->skip_patch);
	}

	fprintf(ofile, "\n};\n\n");

	fprintf(ofile,
"static const struct cs {\n"
"	uint16_t	begin;\n"
"	uint16_t	end;\n"
"} critical_sections[] = {\n");

	for (cs = TAILQ_FIRST(&cs_tailq);
	     cs != NULL;
	     cs = TAILQ_NEXT(cs, links)) {
		fprintf(ofile, "%s\t{ %d, %d }",
			cs == TAILQ_FIRST(&cs_tailq) ? "" : ",\n",
			cs->begin_addr, cs->end_addr);
	}

	fprintf(ofile, "\n};\n\n");

	fprintf(ofile,
"static const int num_critical_sections = sizeof(critical_sections)\n"
"				       / sizeof(*critical_sections);\n");

	fprintf(stderr, "%s: %d instructions used\n", appname, instrcount);
}
Пример #18
0
static int
fw_close (struct dev_close_args *ap)
{
	cdev_t dev = ap->a_head.a_dev;
	struct firewire_softc *sc;
	struct firewire_comm *fc;
	struct fw_drv1 *d;
	int unit = DEV2UNIT(dev);
	struct fw_xfer *xfer;
	struct fw_bind *fwb;
	int err = 0;

	if (DEV_FWMEM(dev))
		return fwmem_close(ap);

	sc = devclass_get_softc(firewire_devclass, unit);
	fc = sc->fc;
	d = (struct fw_drv1 *)dev->si_drv1;

	if (d->ir != NULL) {
		struct fw_xferq *ir = d->ir;

		if ((ir->flag & FWXFERQ_OPEN) == 0)
			return (EINVAL);
		if (ir->flag & FWXFERQ_RUNNING) {
			ir->flag &= ~FWXFERQ_RUNNING;
			fc->irx_disable(fc, ir->dmach);
		}
		/* free extbuf */
		fwdev_freebuf(ir);
		/* drain receiving buffer */
		for (xfer = STAILQ_FIRST(&ir->q);
			xfer != NULL; xfer = STAILQ_FIRST(&ir->q)) {
			ir->queued --;
			STAILQ_REMOVE_HEAD(&ir->q, link);

			xfer->resp = 0;
			fw_xfer_done(xfer);
		}
		/* remove binding */
		for (fwb = STAILQ_FIRST(&ir->binds); fwb != NULL;
				fwb = STAILQ_FIRST(&ir->binds)) {
			STAILQ_REMOVE(&fc->binds, fwb, fw_bind, fclist);
			STAILQ_REMOVE_HEAD(&ir->binds, chlist);
			kfree(fwb, M_FW);
		}
		ir->flag &= ~(FWXFERQ_OPEN |
			FWXFERQ_MODEMASK | FWXFERQ_CHTAGMASK);
		d->ir = NULL;

	}
	if (d->it != NULL) {
		struct fw_xferq *it = d->it;

		if ((it->flag & FWXFERQ_OPEN) == 0)
			return (EINVAL);
		if (it->flag & FWXFERQ_RUNNING) {
			it->flag &= ~FWXFERQ_RUNNING;
			fc->itx_disable(fc, it->dmach);
		}
		/* free extbuf */
		fwdev_freebuf(it);
		it->flag &= ~(FWXFERQ_OPEN |
			FWXFERQ_MODEMASK | FWXFERQ_CHTAGMASK);
		d->it = NULL;
	}
	kfree(dev->si_drv1, M_FW);
	dev->si_drv1 = NULL;

	return err;
}
Пример #19
0
Elf_Data *
elf_getdata(Elf_Scn *s, Elf_Data *d)
{
	Elf *e;
	size_t fsz, msz, count;
	int elfclass, elftype;
	unsigned int sh_type;
	uint64_t sh_align, sh_offset, sh_size;
	void (*xlate)(char *_d, char *_s, size_t _c, int _swap);

	if (s == NULL || (e = s->s_elf) == NULL || e->e_kind != ELF_K_ELF ||
	    (d != NULL && s != d->d_scn)) {
		LIBELF_SET_ERROR(ARGUMENT, 0);
		return (NULL);
	}

	if (d == NULL && (d = STAILQ_FIRST(&s->s_data)) != NULL)
		return (d);

	if (d != NULL)
		return (STAILQ_NEXT(d, d_next));

	if (e->e_rawfile == NULL) {
		LIBELF_SET_ERROR(SEQUENCE, 0);
		return (NULL);
	}

	elfclass = e->e_class;

	assert(elfclass == ELFCLASS32 || elfclass == ELFCLASS64);

	if (elfclass == ELFCLASS32) {
		sh_type   = s->s_shdr.s_shdr32.sh_type;
		sh_offset = (uint64_t) s->s_shdr.s_shdr32.sh_offset;
		sh_size   = (uint64_t) s->s_shdr.s_shdr32.sh_size;
		sh_align  = (uint64_t) s->s_shdr.s_shdr32.sh_addralign;
	} else {
		sh_type   = s->s_shdr.s_shdr64.sh_type;
		sh_offset = s->s_shdr.s_shdr64.sh_offset;
		sh_size   = s->s_shdr.s_shdr64.sh_size;
		sh_align  = s->s_shdr.s_shdr64.sh_addralign;
	}

	if ((elftype = _libelf_xlate_shtype(sh_type)) < ELF_T_FIRST ||
	    elftype > ELF_T_LAST || (sh_type != SHT_NOBITS &&
	    sh_offset + sh_size > (uint64_t) e->e_rawsize)) {
		LIBELF_SET_ERROR(SECTION, 0);
		return (NULL);
	}

	if ((fsz = (elfclass == ELFCLASS32 ? elf32_fsize : elf64_fsize)(elftype,
		 (size_t) 1, e->e_version)) == 0) {
		LIBELF_SET_ERROR(UNIMPL, 0);
		return (NULL);
	}


	if (sh_size % fsz) {
		LIBELF_SET_ERROR(SECTION, 0);
		return (NULL);
	}

	count = sh_size / fsz;

	msz = _libelf_msize(elftype, elfclass, e->e_version);

	assert(msz > 0);

	if ((d = _libelf_allocate_data(s)) == NULL)
		return (NULL);

	d->d_buf     = NULL;
	d->d_off     = 0;
	d->d_align   = sh_align;
	d->d_size    = msz * count;
	d->d_type    = elftype;
	d->d_version = e->e_version;

	if (sh_type == SHT_NOBITS)
		return (d);

	d->d_flags  |= LIBELF_F_MALLOCED;

	if ((d->d_buf = malloc(msz*count)) == NULL) {
		(void) _libelf_release_data(d);
		LIBELF_SET_ERROR(RESOURCE, 0);
		return (NULL);
	}

	STAILQ_INSERT_TAIL(&s->s_data, d, d_next);

	xlate = _libelf_get_translator(elftype, ELF_TOMEMORY, elfclass);
	(*xlate)(d->d_buf, e->e_rawfile + sh_offset, count, e->e_byteorder !=
	    _libelf_host_byteorder());

	return (d);
}
Пример #20
0
/*
 * read request.
 */
static int
fw_read (struct dev_read_args *ap)
{
	cdev_t dev = ap->a_head.a_dev;
	struct uio *uio = ap->a_uio;
	struct firewire_softc *sc;
	struct fw_xferq *ir;
	struct fw_xfer *xfer;
	int err = 0, slept = 0;
	int unit = DEV2UNIT(dev);
	struct fw_pkt *fp;

	if (DEV_FWMEM(dev))
		return physread(ap);

	sc = devclass_get_softc(firewire_devclass, unit);

	ir = ((struct fw_drv1 *)dev->si_drv1)->ir;
	if (ir == NULL || ir->buf == NULL)
		return (EIO);

readloop:
	xfer = STAILQ_FIRST(&ir->q);
	if (ir->stproc == NULL) {
		/* iso bulkxfer */
		ir->stproc = STAILQ_FIRST(&ir->stvalid);
		if (ir->stproc != NULL) {
			crit_enter();
			STAILQ_REMOVE_HEAD(&ir->stvalid, link);
			crit_exit();
			ir->queued = 0;
		}
	}
	if (xfer == NULL && ir->stproc == NULL) {
		/* no data avaliable */
		if (slept == 0) {
			slept = 1;
			ir->flag |= FWXFERQ_WAKEUP;
			err = tsleep(ir, FWPRI, "fw_read", hz);
			ir->flag &= ~FWXFERQ_WAKEUP;
			if (err == 0)
				goto readloop;
		} else if (slept == 1)
			err = EIO;
		return err;
	} else if(xfer != NULL) {
#if 0 /* XXX broken */
		/* per packet mode or FWACT_CH bind?*/
		crit_enter();
		ir->queued --;
		STAILQ_REMOVE_HEAD(&ir->q, link);
		crit_exit();
		fp = &xfer->recv.hdr;
		if (sc->fc->irx_post != NULL)
			sc->fc->irx_post(sc->fc, fp->mode.ld);
		err = uiomove((void *)fp, 1 /* XXX header size */, uio);
		/* XXX copy payload too */
		/* XXX we should recycle this xfer */
#endif
		fw_xfer_free( xfer);
	} else if(ir->stproc != NULL) {
		/* iso bulkxfer */
		fp = (struct fw_pkt *)fwdma_v_addr(ir->buf, 
				ir->stproc->poffset + ir->queued);
		if(sc->fc->irx_post != NULL)
			sc->fc->irx_post(sc->fc, fp->mode.ld);
		if(fp->mode.stream.len == 0){
			err = EIO;
			return err;
		}
		err = uiomove((caddr_t)fp,
			fp->mode.stream.len + sizeof(u_int32_t), uio);
		ir->queued ++;
		if(ir->queued >= ir->bnpacket){
			crit_enter();
			STAILQ_INSERT_TAIL(&ir->stfree, ir->stproc, link);
			crit_exit();
			sc->fc->irx_enable(sc->fc, ir->dmach);
			ir->stproc = NULL;
		}
		if (uio->uio_resid >= ir->psize) {
			slept = -1;
			goto readloop;
		}
	}
	return err;
}
Пример #21
0
void
rtwn_bulk_rx_callback(struct usb_xfer *xfer, usb_error_t error)
{
	struct rtwn_usb_softc *uc = usbd_xfer_softc(xfer);
	struct rtwn_softc *sc = &uc->uc_sc;
	struct ieee80211com *ic = &sc->sc_ic;
	struct ieee80211_node *ni;
	struct mbuf *m = NULL, *next;
	struct rtwn_data *data;
	int8_t nf, rssi;

	RTWN_ASSERT_LOCKED(sc);

	switch (USB_GET_STATE(xfer)) {
	case USB_ST_TRANSFERRED:
		data = STAILQ_FIRST(&uc->uc_rx_active);
		if (data == NULL)
			goto tr_setup;
		STAILQ_REMOVE_HEAD(&uc->uc_rx_active, next);
		m = rtwn_report_intr(uc, xfer, data);
		STAILQ_INSERT_TAIL(&uc->uc_rx_inactive, data, next);
		/* FALLTHROUGH */
	case USB_ST_SETUP:
tr_setup:
		data = STAILQ_FIRST(&uc->uc_rx_inactive);
		if (data == NULL) {
			KASSERT(m == NULL, ("mbuf isn't NULL"));
			goto finish;
		}
		STAILQ_REMOVE_HEAD(&uc->uc_rx_inactive, next);
		STAILQ_INSERT_TAIL(&uc->uc_rx_active, data, next);
		usbd_xfer_set_frame_data(xfer, 0, data->buf,
		    usbd_xfer_max_len(xfer));
		usbd_transfer_submit(xfer);

		/*
		 * To avoid LOR we should unlock our private mutex here to call
		 * ieee80211_input() because here is at the end of a USB
		 * callback and safe to unlock.
		 */
		while (m != NULL) {
			next = m->m_next;
			m->m_next = NULL;

			ni = rtwn_rx_frame(sc, m, &rssi);

			RTWN_UNLOCK(sc);

			nf = RTWN_NOISE_FLOOR;
			if (ni != NULL) {
				if (ni->ni_flags & IEEE80211_NODE_HT)
					m->m_flags |= M_AMPDU;
				(void)ieee80211_input(ni, m, rssi - nf, nf);
				ieee80211_free_node(ni);
			} else {
				(void)ieee80211_input_all(ic, m,
				    rssi - nf, nf);
			}
			RTWN_LOCK(sc);
			m = next;
		}
		break;
	default:
		/* needs it to the inactive queue due to a error. */
		data = STAILQ_FIRST(&uc->uc_rx_active);
		if (data != NULL) {
			STAILQ_REMOVE_HEAD(&uc->uc_rx_active, next);
			STAILQ_INSERT_TAIL(&uc->uc_rx_inactive, data, next);
		}
		if (error != USB_ERR_CANCELLED) {
			usbd_xfer_set_stall(xfer);
			counter_u64_add(ic->ic_ierrors, 1);
			goto tr_setup;
		}
		break;
	}
finish:
	/* Finished receive; age anything left on the FF queue by a little bump */
	/*
	 * XXX TODO: just make this a callout timer schedule so we can
	 * flush the FF staging queue if we're approaching idle.
	 */
#ifdef	IEEE80211_SUPPORT_SUPERG
	if (!(sc->sc_flags & RTWN_FW_LOADED) ||
	    sc->sc_ratectl != RTWN_RATECTL_NET80211)
		rtwn_cmd_sleepable(sc, NULL, 0, rtwn_ff_flush_all);
#endif

	/* Kick-start more transmit in case we stalled */
	rtwn_start(sc);
}
Пример #22
0
void
ath_process_phyerr(struct ath_softc *sc, struct ath_buf *bf, struct ath_rx_status *rxs, u_int64_t fulltsf)
{
#define EXT_CH_RADAR_FOUND 0x02
#define PRI_CH_RADAR_FOUND 0x01
#define EXT_CH_RADAR_EARLY_FOUND 0x04
        struct ath_dfs *dfs=sc->sc_dfs;
	HAL_CHANNEL *chan=&sc->sc_curchan;
	struct dfs_event *event;
	u_int8_t rssi;
	u_int8_t ext_rssi=0;
        u_int8_t pulse_bw_info=0, pulse_length_ext=0, pulse_length_pri=0;
	u_int32_t dur=0;
        u_int16_t datalen;
        int pri_found=1, ext_found=0, dc_found=0, early_ext=0, slope=0, add_dur=0;

	int empty;
        u_int32_t *last_word_ptr, *secondlast_word_ptr;
        u_int8_t *byte_ptr, last_byte_0, last_byte_1, last_byte_2, last_byte_3; 
        u_int8_t secondlast_byte_0, secondlast_byte_1, secondlast_byte_2, secondlast_byte_3; 

	if (((rxs->rs_phyerr != HAL_PHYERR_RADAR)) &&
	    ((rxs->rs_phyerr != HAL_PHYERR_FALSE_RADAR_EXT)))  {
		DFS_DPRINTK(sc, ATH_DEBUG_DFS3, "%s: rs_phyer=0x%x not a radar error\n",__func__, rxs->rs_phyerr);
        	return;
        }

        /* 
           At this time we have a radar pulse that we need to examine and queue. 
           But if dfs_process_radarevent already detected radar and set  
           CHANNEL_INTERFERENCE flag then do not queue any more radar data.
           When we are in a new channel this flag will be clear and we will
           start queueing data for new channel. (EV74162)
        */

        if (chan->priv_flags & CHANNEL_INTERFERENCE) {
                DFS_DPRINTK(sc, ATH_DEBUG_DFS1, "%s: Radar already found in the channel, do not queue radar data\n", __func__);
                return;
        } 

	if (dfs == NULL) {
		DFS_DPRINTK(sc, ATH_DEBUG_DFS, "%s: sc_dfs is NULL\n",__func__);
		return;
	}
        dfs->ath_dfs_stats.total_phy_errors++;
        datalen = rxs->rs_datalen;
        /* WAR: Never trust combined RSSI on radar pulses for <=
         * OWL2.0. For short pulses only the chain 0 rssi is present
         * and remaining descriptor data is all 0x80, for longer
         * pulses the descriptor is present, but the combined value is
         * inaccurate. This HW capability is queried in dfs_attach and stored in
         * the sc_dfs_combined_rssi_ok flag.*/

        if (sc->sc_dfs->sc_dfs_combined_rssi_ok) {
                rssi = (u_int8_t) rxs->rs_rssi;
        } else {
            rssi = (u_int8_t) rxs->rs_rssi_ctl0;
        }

        ext_rssi = (u_int8_t) rxs->rs_rssi_ext0;


        /* hardware stores this as 8 bit signed value.
         * we will cap it at 0 if it is a negative number 
         */

        if (rssi & 0x80)
            rssi = 0;

        if (ext_rssi & 0x80)
            ext_rssi = 0;

        last_word_ptr = (u_int32_t *)(((u_int8_t*)bf->bf_vdata) + datalen - (datalen%4));


        secondlast_word_ptr = last_word_ptr-1;

        byte_ptr = (u_int8_t*)last_word_ptr; 
        last_byte_0=(*(byte_ptr) & 0xff); 
        last_byte_1=(*(byte_ptr+1) & 0xff); 
        last_byte_2=(*(byte_ptr+2) & 0xff); 
        last_byte_3=(*(byte_ptr+3) & 0xff); 

        byte_ptr = (u_int8_t*)secondlast_word_ptr; 
        secondlast_byte_0=(*(byte_ptr) & 0xff); 
        secondlast_byte_1=(*(byte_ptr+1) & 0xff); 
        secondlast_byte_2=(*(byte_ptr+2) & 0xff); 
        secondlast_byte_3=(*(byte_ptr+3) & 0xff); 
       
        /* If radar can be detected on the extension channel (for SOWL onwards), we have to read radar data differently as the HW supplies bwinfo and duration for both primary and extension channel.*/
        if (sc->sc_dfs->sc_dfs_ext_chan_ok) {
        
        /* If radar can be detected on the extension channel, datalen zero pulses are bogus, discard them.*/
        if (!datalen) {
            dfs->ath_dfs_stats.datalen_discards++;
            return;
        }
        switch((datalen & 0x3)) {
        case 0:
            pulse_bw_info = secondlast_byte_3;
            pulse_length_ext = secondlast_byte_2;
            pulse_length_pri = secondlast_byte_1;
            break;
        case 1:
            pulse_bw_info = last_byte_0;
            pulse_length_ext = secondlast_byte_3;
            pulse_length_pri = secondlast_byte_2;
            break;
        case 2:
            pulse_bw_info = last_byte_1;
            pulse_length_ext = last_byte_0;
            pulse_length_pri = secondlast_byte_3;
           break;
        case 3:
            pulse_bw_info = last_byte_2;
            pulse_length_ext = last_byte_1;
            pulse_length_pri = last_byte_0;
            break;
        default:
            DFS_DPRINTK(sc, ATH_DEBUG_DFS, "datalen mod4=%d\n", (datalen%4));
        }

        /* Only the last 3 bits of the BW info are relevant, they indicate
        which channel the radar was detected in.*/
        pulse_bw_info &= 0x07;
        /* If pulse on DC, both primary and extension flags will be set */
        if (((pulse_bw_info & EXT_CH_RADAR_FOUND) && (pulse_bw_info & PRI_CH_RADAR_FOUND))) {

            /* Conducted testing, when pulse is on DC, both pri and ext durations are reported to be same
               Radiated testing, when pulse is on DC, different pri and ext durations are reported, so take the larger of the two */
            if (pulse_length_ext >= pulse_length_pri) {
                dur = pulse_length_ext;
                ext_found = 1;
            } else {
                dur = pulse_length_pri;
                pri_found = 1;
            }
            dfs->ath_dfs_stats.dc_phy_errors++;         

        } else {
        if (pulse_bw_info & EXT_CH_RADAR_FOUND) {
            dur = pulse_length_ext;
            pri_found = 0;
            ext_found = 1;
            dfs->ath_dfs_stats.ext_phy_errors++;         
        } 
        if (pulse_bw_info & PRI_CH_RADAR_FOUND) {
            dur = pulse_length_pri;
            pri_found = 1;
            ext_found = 0;
            dfs->ath_dfs_stats.pri_phy_errors++;         
        } 
        if (pulse_bw_info & EXT_CH_RADAR_EARLY_FOUND) {
             dur = pulse_length_ext;
             pri_found = 0;
             ext_found = 1; early_ext = 1;
             dfs->ath_dfs_stats.early_ext_phy_errors++;         
	     DFS_DPRINTK(sc, ATH_DEBUG_DFS2, "EARLY ext channel dur=%u rssi=%u datalen=%d\n",dur, rssi, datalen);
        } 
        if (!pulse_bw_info) {
	    DFS_DPRINTK(sc, ATH_DEBUG_DFS3, "ERROR channel dur=%u rssi=%u pulse_bw_info=0x%x datalen MOD 4 = %d\n",dur, rssi, pulse_bw_info, (datalen & 0x3));
            /* Bogus bandwidth info received in descriptor, 
            so ignore this PHY error */
            dfs->ath_dfs_stats.bwinfo_errors++;
            return; 
        }
    }

    if (sc->sc_dfs->sc_dfs_use_enhancement) {
        /*
         * for osprey (and Merlin) bw_info has implication for selecting RSSI value
         */

        switch (pulse_bw_info & 0x03) {
        case 0x00:
            /* No radar in ctrl or ext channel */
            rssi = 0;
            break;
        case 0x01:
            /* radar in ctrl channel */
            DFS_DPRINTK(sc, ATH_DEBUG_DFS3, "RAW RSSI: rssi=%u ext_rssi=%u\n", rssi, ext_rssi);
            if (ext_rssi >= (rssi + 3)) {
                /* cannot use ctrl channel RSSI if extension channel is stronger */
                rssi = 0;
            }
            break;
        case 0x02:
            /* radar in extension channel */
	    
            DFS_DPRINTK(sc, ATH_DEBUG_DFS3, "RAW RSSI: rssi=%u ext_rssi=%u\n", rssi, ext_rssi);
            if (rssi >= (ext_rssi + 12)) {
                /* cannot use extension channel RSSI if control channel is stronger */
                rssi = 0;
            } else {
                rssi = ext_rssi;
            }
            break;
        case 0x03:
           /* when both are present use stronger one */
            if (rssi < ext_rssi) {
                rssi = ext_rssi;
            }
            break;
        } 
    } else {
        /* Always use combined RSSI reported, unless RSSI reported on 
           extension is stronger */
        if ((ext_rssi > rssi) && (ext_rssi < 128)) {
                    rssi = ext_rssi;
        }
    }
        DFS_DPRINTK(sc, ATH_DEBUG_DFS1, "pulse_bw_info=0x%x pulse_length_ext=%u pulse_length_pri=%u rssi=%u ext_rssi=%u phyerr=0x%x\n", pulse_bw_info, pulse_length_ext, pulse_length_pri, rssi, ext_rssi, rxs->rs_phyerr);

        /* HW has a known issue with chirping pulses injected at or around DC in 40MHz 
           mode. Such pulses are reported with much lower durations and SW then discards 
           them because they do not fit the minimum bin5 pulse duration.

           To work around this issue, if a pulse is within a 10us range of the 
           bin5 min duration, check if the pulse is chirping. If the pulse is chirping,
           bump up the duration to the minimum bin5 duration. 

           This makes sure that a valid chirping pulse will not be discarded because of 
           incorrect low duration.

            TBD - Is it possible to calculate the 'real' duration of the pulse using the 
            slope of the FFT data?
            TBD - Use FFT data to differentiate between radar pulses and false PHY errors.
            This will let us reduce the number of false alarms seen.

            BIN 5 chirping pulses are only for FCC or Japan MMK4 domain
        */
        if (((dfs->dfsdomain == DFS_FCC_DOMAIN) || (dfs->dfsdomain == DFS_MKK4_DOMAIN)) && 
            (dur >= MAYBE_BIN5_DUR) && (dur < MAX_BIN5_DUR)) {
            add_dur = dfs_check_chirping(sc, bf, rxs, pri_found, ext_found, &slope, &dc_found);
            if (add_dur) {
                DFS_DPRINTK(sc, ATH_DEBUG_DFS2, "old dur %d slope =%d\n", dur, slope);
                    // bump up to a random bin5 pulse duration
                    if (dur < MIN_BIN5_DUR) {
                        dur = dfs_get_random_bin5_dur(sc, fulltsf);
                    }
                DFS_DPRINTK(sc, ATH_DEBUG_DFS2, "new dur %d\n", dur);
            } else {
                dur = MAX_BIN5_DUR + 100;       /* set the duration so that it is rejected */
                DFS_DPRINTK(sc, ATH_DEBUG_DFS2,"is_chirping = %d dur=%d \n", add_dur, dur);
            }
        } else {
            /* we have a pulse that is either bigger than MAX_BIN5_DUR or 
             * less than MAYBE_BIN5_DUR 
             */
            if ((dfs->dfsdomain == DFS_FCC_DOMAIN) || (dfs->dfsdomain == DFS_MKK4_DOMAIN)) {
                if (dur >= MAX_BIN5_DUR) {
                    dur = MAX_BIN5_DUR + 50;        /* set the duration so that it is rejected */
                }
            }
        }
    } else {
        dfs->ath_dfs_stats.owl_phy_errors++;
     /* HW cannot detect extension channel radar so it only passes us primary channel radar data*/
            dur = (rxs->rs_datalen && bf->bf_vdata != NULL ?
               (u_int32_t)(*((u_int8_t *) bf->bf_vdata)) : 0) & 0xff;

            if ((rssi == 0) && (dur== 0)){
               return;
            }
            pri_found = 1;
            ext_found = 0;
        }

	ATH_DFSEVENTQ_LOCK(dfs);
	empty = STAILQ_EMPTY(&(dfs->dfs_eventq));
	ATH_DFSEVENTQ_UNLOCK(dfs);
	if (empty) {
		return;
        }

	if ((chan->channel_flags & CHANNEL_108G) == CHANNEL_108G) {
	        if (!(dfs->dfs_proc_phyerr & DFS_AR_EN)) {
                		DFS_DPRINTK(sc, ATH_DEBUG_DFS2, "%s: DFS_AR_EN not enabled\n",
				__func__);
                                return;
                }
		ATH_DFSEVENTQ_LOCK(dfs);
		event = STAILQ_FIRST(&(dfs->dfs_eventq));
		if (event == NULL) {
			ATH_DFSEVENTQ_UNLOCK(dfs);
			DFS_DPRINTK(sc, ATH_DEBUG_DFS, "%s: no more events space left\n",
				__func__);
			return;
		}
		STAILQ_REMOVE_HEAD(&(dfs->dfs_eventq), re_list);
		ATH_DFSEVENTQ_UNLOCK(dfs);
		event->re_rssi = rssi;
		event->re_dur = dur;
		event->re_full_ts = fulltsf;
		event->re_ts = (rxs->rs_tstamp) & DFS_TSMASK;
        	event->re_chanindex = dfs->dfs_curchan_radindex;
		ATH_ARQ_LOCK(dfs);
		STAILQ_INSERT_TAIL(&(dfs->dfs_arq), event, re_list);
		ATH_ARQ_UNLOCK(dfs);
	}
	else {
		if (chan->priv_flags & CHANNEL_DFS) {
	                if (!(dfs->dfs_proc_phyerr & DFS_RADAR_EN)) {
                		DFS_DPRINTK(sc, ATH_DEBUG_DFS3, "%s: DFS_RADAR_EN not enabled\n",
				__func__);
                                return;
                        }
                        /* rssi is not accurate for short pulses, so do not filter based on that for short duration pulses*/
                        if (sc->sc_dfs->sc_dfs_ext_chan_ok) {
			    if ((rssi < dfs->dfs_rinfo.rn_minrssithresh && (dur > 4))||
			        dur > (dfs->dfs_rinfo.rn_maxpulsedur) ) {
                                    dfs->ath_dfs_stats.rssi_discards++;
                		    DFS_DPRINTK(sc, ATH_DEBUG_DFS3, "Extension channel pulse is discarded %d, %d, %d, %d\n", 
						dur, dfs->dfs_rinfo.rn_maxpulsedur, rssi, dfs->dfs_rinfo.rn_minrssithresh);
				    return;
                            }
                        } else {

			    if (rssi < dfs->dfs_rinfo.rn_minrssithresh ||
			        dur > dfs->dfs_rinfo.rn_maxpulsedur) {
                                    dfs->ath_dfs_stats.rssi_discards++;
				    return;
                            }
                        }

			ATH_DFSEVENTQ_LOCK(dfs);
			event = STAILQ_FIRST(&(dfs->dfs_eventq));
			if (event == NULL) {
				ATH_DFSEVENTQ_UNLOCK(dfs);
				DFS_DPRINTK(sc, ATH_DEBUG_DFS, "%s: no more events space left\n",
					__func__);
				return;
			}
			STAILQ_REMOVE_HEAD(&(dfs->dfs_eventq), re_list);
			ATH_DFSEVENTQ_UNLOCK(dfs);
			event->re_dur = dur;
			event->re_full_ts = fulltsf;
			event->re_ts = (rxs->rs_tstamp) & DFS_TSMASK;
			event->re_rssi = rssi;
                        if (pri_found == 1) {
        		    event->re_chanindex = dfs->dfs_curchan_radindex;
                        } else {
                            if (dfs->dfs_extchan_radindex == -1) { 
                                DFS_DPRINTK(sc, ATH_DEBUG_DFS3, "%s - phyerr on ext channel\n", __func__);
                            }
        		    event->re_chanindex = dfs->dfs_extchan_radindex;
                            DFS_DPRINTK(sc, ATH_DEBUG_DFS3, "%s New extension channel event is added to queue\n",__func__);

                        }
			ATH_DFSQ_LOCK(dfs);
			STAILQ_INSERT_TAIL(&(dfs->dfs_radarq), event, re_list);
			ATH_DFSQ_UNLOCK(dfs);
		}
    }
#undef EXT_CH_RADAR_FOUND
#undef PRI_CH_RADAR_FOUND
#undef EXT_CH_RADAR_EARLY_FOUND
}
Пример #23
0
void ath_cont_data(struct ath_softc *sc ,int val,ath_callback ath_draintxq, ath_callback ath_stoprecv)
{
  static struct sk_buff *skb = NULL;
  struct ieee80211_frame *hdr;
  static struct ieee80211com *ic;
  static struct ath_buf *bf,*prev,*first;
  static struct ath_desc *ds;
  static struct ath_hal *ah;
  static STAILQ_HEAD(tpc_buf,ath_buf) tmp_q;
  static int is_inited=0;
  struct ath_txq *txq;
  const HAL_RATE_TABLE *rt;
  u_int8_t *p;
  u_int32_t flags, txrate, r,i;
  u_int16_t hdrlen, framelen, dmalen,delay=0;
#define	MIN(a,b)	((a) < (b) ? (a) : (b))

  if(ath_hal_getdiagstate(sc->sc_ah, 19,0,10,NULL,NULL) == AH_FALSE)
  {
	  printk("HAL does not support TX99 mode \n");
	  printk("compile HAL with AH_PRIVATE_DIAG turned on \n");
      return;
  } 
  if(is_inited == 0)
  {
    STAILQ_INIT(&tmp_q);
    is_inited=1;
  }
	
  /* enter CONT_DATA mode */
  if (val && skb==NULL) {
    skb = ath_alloc_skb(4096, 32);
    if (skb == NULL)
      goto out;

    /* build output packet */
    hdr = (struct ieee80211_frame *)skb_put(skb, sizeof(*hdr));
    IEEE80211_ADDR_COPY(&hdr->i_addr1, test_addr);
    IEEE80211_ADDR_COPY(&hdr->i_addr2, test_addr);
    IEEE80211_ADDR_COPY(&hdr->i_addr3, test_addr);
    hdr->i_dur[0] = 0x0;
    hdr->i_dur[1] = 0x0;
    hdr->i_seq[0] = 0x5a;
    hdr->i_seq[1] = 0x5a;
    hdr->i_fc[0] = IEEE80211_FC0_TYPE_DATA;
    hdr->i_fc[1] = 0;
    hdrlen = sizeof(*hdr);
    for(r=0; r<2000; ) {
      p = skb_put(skb, sizeof(PN9Data));
      memcpy(p, PN9Data, sizeof(PN9Data));
      r += sizeof(PN9Data);
    }
    framelen = hdrlen + r + IEEE80211_CRC_LEN;

    ic = &sc->sc_ic;
    ah = sc->sc_ah;
	rt = sc->sc_currates;
	if (rt==NULL) {
	  printk("no rate table\n");
	  goto out;
	}
	txrate = rt->info[rt->rateCount-1].rateCode;	/* send at highest rate */
	{
	  int rix;
	  if (sc->sc_txrx99.txrate==0)
	    sc->sc_txrx99.txrate = 6000;
	  for(rix=0; rix<rt->rateCount; rix++) {
	    if (rt->info[rix].rateKbps==sc->sc_txrx99.txrate) {
	      txrate = rt->info[rix].rateCode;
	      printk("txrate set to %dKbps\n", sc->sc_txrx99.txrate);
	      break;
	    }
	  }
	}
    ath_draintxq(sc);
    prev=first=NULL;
	printk("txpower set to %d\n", sc->sc_txrx99.txpower);
    /* send 20 frames for the Power Amp to settle down */
    for(i=0;i<20;++i)
    {
	ATH_TXBUF_LOCK_BH(sc);						

	bf = STAILQ_FIRST(&sc->sc_txbuf);
	if (bf != NULL) {
	  STAILQ_REMOVE_HEAD(&sc->sc_txbuf,bf_list);
	}
	ATH_TXBUF_UNLOCK_BH(sc);				
	
	if (bf==NULL) {
	    printk("no tx buf\n");
	    goto out; 
	}
        if(!i)  first=bf;
	framelen = skb->len + IEEE80211_CRC_LEN;
	dmalen = skb->len;
	txq = sc->sc_ac2q[WME_AC_VO];

	bf->bf_skbaddr = bus_map_single(sc->sc_bdev, skb->data, framelen, BUS_DMA_TODEVICE);
	bf->bf_skb = skb;
	bf->bf_node = 0;
	flags = HAL_TXDESC_CLRDMASK;  
	ds = bf->bf_desc;
        if(prev)
	  prev->bf_desc->ds_link = bf->bf_daddr;		/* link from prev desc  */
	ds->ds_data = bf->bf_skbaddr;

	r = ath_hal_setuptxdesc(ah, ds, framelen, hdrlen,
				HAL_PKT_TYPE_NORMAL,
				sc->sc_txrx99.txpower,
				txrate,			    /* tx rate */
				1,			    /* max retries */
				HAL_TXKEYIX_INVALID,	    /* no WEP */
				1,			    /* select Omni Antenna 0 */
				flags,
				0,			    /* rts/cts rate */
				0			    /* rts/cts duration */
				);

	if (r==AH_FALSE) {
	  printk("fail setuptxdesc r(%d)\n", r);
	  goto out;
	}  

	r = ath_hal_filltxdesc(ah, ds, skb->len, AH_TRUE, AH_TRUE,ds);
	if (r==AH_FALSE) {
	  printk("fail fill tx desc r(%d)\n", r);
	  goto out;
	}
	ath_hal_setupxtxdesc(ah, ds
			     , txrate, 15	/* series 1 */
			     , txrate, 15	/* series 2 */
			     , txrate, 15	/* series 3 */
			     );

        /* insert the buffers in to tmp_q */
        STAILQ_INSERT_HEAD(&tmp_q,bf,bf_list);

        prev=bf;
      }
      ath_hal_intrset(ah, 0);	    	/* disable interrupts */
      //sc->sc_imask = HAL_INT_RX | HAL_INT_TX
      //		  | HAL_INT_RXEOL | HAL_INT_RXORN
      //		  | HAL_INT_FATAL | HAL_INT_GLOBAL;
      sc->sc_imask = 0; 
      //ath_hal_intrset(ah, sc->sc_imask);

      bf->bf_desc->ds_link = 0;
      r = ath_hal_puttxbuf(ah, txq->axq_qnum, first->bf_daddr);
      ath_hal_txstart(ah, txq->axq_qnum);

      while(ath_hal_txprocdesc(ah,bf->bf_desc)  == HAL_EINPROGRESS)
      {
        udelay(2000);
        ++delay;
      }
      
      /* sleep for 20ms */
      udelay(20000);
      printk("took %d msec to transmit the 20 frames \n",2*delay);

      /* start TX99 mode */
      ath_stoprecv(sc);		    	/* stop recv side */
      bf->bf_desc->ds_link = first->bf_daddr;		/* link to self */
      ath_hal_getdiagstate(ah, 19,(void *) sc->sc_txrx99.prefetch,9,NULL,NULL);
      ath_hal_getdiagstate(ah, 19, (void *)txq->axq_qnum, val,NULL,NULL);
      r = ath_hal_puttxbuf(ah, txq->axq_qnum, first->bf_daddr);
      ath_hal_txstart(ah, txq->axq_qnum);
  }

  /* leave CONT_DATA mode, reset the chip */
  if (val==0 && skb) {
    int j=0;
    ath_hal_getdiagstate(ah, 19, 0, 0,NULL,NULL);
    /* insert the buffers back into txbuf list */

	ATH_TXBUF_LOCK_BH(sc);						

        bf = STAILQ_FIRST(&tmp_q);
        while(bf)
	{
	  bf->bf_skb=NULL;
	     STAILQ_REMOVE_HEAD(&tmp_q,bf_list);
             STAILQ_INSERT_HEAD(&sc->sc_txbuf,bf,bf_list);
             bf = STAILQ_FIRST(&tmp_q);
	     ++j;
	}
	ATH_TXBUF_UNLOCK_BH(sc);				
        printk("inserted back %d buffers \n",j);
    ic->ic_reset(ic->ic_dev);
    skb = NULL;
    bf = NULL;
  }

  if (val==7 && skb) {
    ath_hal_getdiagstate(ah, 19, ds, 7,NULL,NULL);
  }

  sc->sc_txrx99.tx99mode=val;
 out:
  return;
#undef MIN
}
Пример #24
0
static PyObject* cstuff_DirectionMap_fill(cstuff_DirectionMap* self, PyObject *args) {
	PyObject *iterator, *item, *obj;
	int row, col, row2, col2, stride, pos;
	double value, limit;

	queue_s queue = STAILQ_HEAD_INITIALIZER(queue);
	entry_s *entry;

	if (!PyArg_ParseTuple(args, "Od", &obj, &limit)) {
		return NULL;
	}

	iterator = PyObject_GetIter(obj);
	if (iterator == NULL) {
		return NULL;
	}

	while ((item = PyIter_Next(iterator))) {
		if (!PyArg_ParseTuple(item, "ii", &row, &col)) {
			Py_DECREF(item);
			Py_DECREF(iterator);
			cleanup_s(&queue);
			return NULL;
		}
		if (row >= g_rows || col >= g_cols || row < 0 || col < 0) {
			RAISE("Invalid input data - one of the targets is out od map bounds");
			Py_DECREF(iterator);
			cleanup_s(&queue);
			return NULL;
		}
		Py_DECREF(item);
		put_entry_s(row, col, 0.0, row*g_cols+col, &queue, self);
	}
	Py_DECREF(iterator);

	if (PyErr_Occurred()) {
		cleanup_s(&queue);
		return NULL;
	}

	while (!STAILQ_EMPTY(&queue)) {
		entry = STAILQ_FIRST(&queue);
		STAILQ_REMOVE_HEAD(&queue, hook);
		STAILQ_INSERT_TAIL(&free_entries_s, entry, hook);

		row = entry->row;
		col = entry->col;
		value = entry->value;
		stride = row*g_cols;
		value += 1.0;

		if ((limit > 0) && (value > limit)) continue;

		col2 = FMOD((col-1), g_cols);
		pos = stride+col2;
		DO_STUFF(row, col2);

		col2 = FMOD((col+1), g_cols);
		pos = stride+col2;
		DO_STUFF(row, col2);

		row2 = FMOD((row-1), g_rows);
		pos = row2*g_cols+col;
		DO_STUFF(row2, col);

		row2 = FMOD((row+1), g_rows);
		pos = row2*g_cols+col;
		DO_STUFF(row2, col);
	}

	cleanup_s(&queue);
	Py_RETURN_NONE;
}
Пример #25
0
/* Async. stream output */
static void
fwe_as_input(struct fw_xferq *xferq)
{
	struct mbuf *m, *m0;
	struct ifnet *ifp;
	struct fwe_softc *fwe;
	struct fw_bulkxfer *sxfer;
	struct fw_pkt *fp;
	u_char *c;
#if defined(__DragonFly__) || __FreeBSD_version < 500000
	struct ether_header *eh;
#endif

	fwe = (struct fwe_softc *)xferq->sc;
	ifp = fwe->eth_softc.ifp;

	/* We do not need a lock here because the bottom half is serialized */
	while ((sxfer = STAILQ_FIRST(&xferq->stvalid)) != NULL) {
		STAILQ_REMOVE_HEAD(&xferq->stvalid, link);
		fp = mtod(sxfer->mbuf, struct fw_pkt *);
		if (fwe->fd.fc->irx_post != NULL)
			fwe->fd.fc->irx_post(fwe->fd.fc, fp->mode.ld);
		m = sxfer->mbuf;

		/* insert new rbuf */
		sxfer->mbuf = m0 = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
		if (m0 != NULL) {
			m0->m_len = m0->m_pkthdr.len = m0->m_ext.ext_size;
			STAILQ_INSERT_TAIL(&xferq->stfree, sxfer, link);
		} else
			printf("%s: m_getcl failed\n", __FUNCTION__);

		if (sxfer->resp != 0 || fp->mode.stream.len <
		    ETHER_ALIGN + sizeof(struct ether_header)) {
			m_freem(m);
			ifp->if_ierrors ++;
			continue;
		}

		m->m_data += HDR_LEN + ETHER_ALIGN;
		c = mtod(m, u_char *);
#if defined(__DragonFly__) || __FreeBSD_version < 500000
		eh = (struct ether_header *)c;
		m->m_data += sizeof(struct ether_header);
		m->m_len = m->m_pkthdr.len = fp->mode.stream.len - ETHER_ALIGN
		    - sizeof(struct ether_header);
#else
		m->m_len = m->m_pkthdr.len = fp->mode.stream.len - ETHER_ALIGN;
#endif
		m->m_pkthdr.rcvif = ifp;
#if 0
		FWEDEBUG(ifp, "%02x %02x %02x %02x %02x %02x\n"
			 "%02x %02x %02x %02x %02x %02x\n"
			 "%02x %02x %02x %02x\n"
			 "%02x %02x %02x %02x\n"
			 "%02x %02x %02x %02x\n"
			 "%02x %02x %02x %02x\n",
			 c[0], c[1], c[2], c[3], c[4], c[5],
			 c[6], c[7], c[8], c[9], c[10], c[11],
			 c[12], c[13], c[14], c[15],
			 c[16], c[17], c[18], c[19],
			 c[20], c[21], c[22], c[23],
			 c[20], c[21], c[22], c[23]
		 );
#endif
#if defined(__DragonFly__) || __FreeBSD_version < 500000
		ether_input(ifp, eh, m);
#else
		(*ifp->if_input)(ifp, m);
#endif
		ifp->if_ipackets ++;
	}
	if (STAILQ_FIRST(&xferq->stfree) != NULL)
		fwe->fd.fc->irx_enable(fwe->fd.fc, fwe->dma_ch);
}
Пример #26
0
void pccard_check_cis_quirks(device_t dev)
{
	struct pccard_softc *sc = (struct pccard_softc *)
	    device_get_softc(dev);
	int wiped = 0;
	int i, j;
	struct pccard_function *pf, *pf_next, *pf_last;
	struct pccard_config_entry *cfe, *cfe_next;

	pf = NULL;
	pf_last = NULL;

	for (i=0; i<n_pccard_cis_quirks; i++) {
		if ((sc->card.manufacturer == pccard_cis_quirks[i].manufacturer) &&
			(sc->card.product == pccard_cis_quirks[i].product) &&
			(((sc->card.manufacturer != PCCARD_VENDOR_INVALID) &&
			  (sc->card.product != PCCARD_PRODUCT_INVALID)) ||
			 ((sc->card.manufacturer == PCCARD_VENDOR_INVALID) &&
			  (sc->card.product == PCCARD_PRODUCT_INVALID) &&
			  sc->card.cis1_info[0] &&
			  (strcmp(sc->card.cis1_info[0],
					  pccard_cis_quirks[i].cis1_info[0]) == 0) &&
			  sc->card.cis1_info[1] &&
			  (strcmp(sc->card.cis1_info[1],
					  pccard_cis_quirks[i].cis1_info[1]) == 0)))) {
			if (!wiped) {
				if (pccard_verbose) {
					device_printf(dev, "using CIS quirks for ");
					for (j = 0; j < 4; j++) {
						if (sc->card.cis1_info[j] == NULL)
							break;
						if (j)
							printf(", ");
						printf("%s", sc->card.cis1_info[j]);
					}
					printf("\n");
				}

				for (pf = STAILQ_FIRST(&sc->card.pf_head); pf != NULL;
				     pf = pf_next) {
					for (cfe = STAILQ_FIRST(&pf->cfe_head); cfe != NULL;
					     cfe = cfe_next) {
						cfe_next = STAILQ_NEXT(cfe, cfe_list);
						free(cfe, M_DEVBUF);
					}
					pf_next = STAILQ_NEXT(pf, pf_list);
					free(pf, M_DEVBUF);
				}

				STAILQ_INIT(&sc->card.pf_head);
				wiped = 1;
			}

			if (pf_last == pccard_cis_quirks[i].pf) {
				cfe = malloc(sizeof(*cfe), M_DEVBUF, M_NOWAIT);
				*cfe = *pccard_cis_quirks[i].cfe;

				STAILQ_INSERT_TAIL(&pf->cfe_head, cfe, cfe_list);
			} else {
				pf = malloc(sizeof(*pf), M_DEVBUF, M_NOWAIT);
				*pf = *pccard_cis_quirks[i].pf;
				STAILQ_INIT(&pf->cfe_head);

				cfe = malloc(sizeof(*cfe), M_DEVBUF, M_NOWAIT);
				*cfe = *pccard_cis_quirks[i].cfe;

				STAILQ_INSERT_TAIL(&pf->cfe_head, cfe, cfe_list);
				STAILQ_INSERT_TAIL(&sc->card.pf_head, pf, pf_list);

				pf_last = pccard_cis_quirks[i].pf;
			}
		}
	}
}
Пример #27
0
static void
cuda_intr(void *arg)
{
	device_t        dev;
	struct cuda_softc *sc;

	int i, ending, restart_send, process_inbound;
	uint8_t reg;

        dev = (device_t)arg;
	sc = device_get_softc(dev);

	mtx_lock(&sc->sc_mutex);

	restart_send = 0;
	process_inbound = 0;
	reg = cuda_read_reg(sc, vIFR);
	if ((reg & vSR_INT) != vSR_INT) {
		mtx_unlock(&sc->sc_mutex);
		return;
	}

	cuda_write_reg(sc, vIFR, 0x7f);	/* Clear interrupt */

switch_start:
	switch (sc->sc_state) {
	case CUDA_IDLE:
		/*
		 * This is an unexpected packet, so grab the first (dummy)
		 * byte, set up the proper vars, and tell the chip we are
		 * starting to receive the packet by setting the TIP bit.
		 */
		sc->sc_in[1] = cuda_read_reg(sc, vSR);

		if (cuda_intr_state(sc) == 0) {
			/* must have been a fake start */

			if (sc->sc_waiting) {
				/* start over */
				DELAY(150);
				sc->sc_state = CUDA_OUT;
				sc->sc_sent = 0;
				cuda_out(sc);
				cuda_write_reg(sc, vSR, sc->sc_out[1]);
				cuda_ack_off(sc);
				cuda_tip(sc);
			}
			break;
		}

		cuda_in(sc);
		cuda_tip(sc);

		sc->sc_received = 1;
		sc->sc_state = CUDA_IN;
		break;

	case CUDA_IN:
		sc->sc_in[sc->sc_received] = cuda_read_reg(sc, vSR);
		ending = 0;

		if (sc->sc_received > 255) {
			/* bitch only once */
			if (sc->sc_received == 256) {
				device_printf(dev,"input overflow\n");
				ending = 1;
			}
		} else
			sc->sc_received++;

		/* intr off means this is the last byte (end of frame) */
		if (cuda_intr_state(sc) == 0) {
			ending = 1;
		} else {
			cuda_toggle_ack(sc);			
		}
		
		if (ending == 1) {	/* end of message? */
			struct cuda_packet *pkt;

			/* reset vars and signal the end of this frame */
			cuda_idle(sc);

			/* Queue up the packet */
			pkt = STAILQ_FIRST(&sc->sc_freeq);
			if (pkt != NULL) {
				/* If we have a free packet, process it */

				pkt->len = sc->sc_received - 2;
				pkt->type = sc->sc_in[1];
				memcpy(pkt->data, &sc->sc_in[2], pkt->len);

				STAILQ_REMOVE_HEAD(&sc->sc_freeq, pkt_q);
				STAILQ_INSERT_TAIL(&sc->sc_inq, pkt, pkt_q);

				process_inbound = 1;
			}

			sc->sc_state = CUDA_IDLE;
			sc->sc_received = 0;

			/*
			 * If there is something waiting to be sent out,
			 * set everything up and send the first byte.
			 */
			if (sc->sc_waiting == 1) {
				DELAY(1500);	/* required */
				sc->sc_sent = 0;
				sc->sc_state = CUDA_OUT;

				/*
				 * If the interrupt is on, we were too slow
				 * and the chip has already started to send
				 * something to us, so back out of the write
				 * and start a read cycle.
				 */
				if (cuda_intr_state(sc)) {
					cuda_in(sc);
					cuda_idle(sc);
					sc->sc_sent = 0;
					sc->sc_state = CUDA_IDLE;
					sc->sc_received = 0;
					DELAY(150);
					goto switch_start;
				}

				/*
				 * If we got here, it's ok to start sending
				 * so load the first byte and tell the chip
				 * we want to send.
				 */
				cuda_out(sc);
				cuda_write_reg(sc, vSR,
				    sc->sc_out[sc->sc_sent]);
				cuda_ack_off(sc);
				cuda_tip(sc);
			}
		}
		break;

	case CUDA_OUT:
		i = cuda_read_reg(sc, vSR);	/* reset SR-intr in IFR */

		sc->sc_sent++;
		if (cuda_intr_state(sc)) {	/* ADB intr low during write */
			cuda_in(sc);	/* make sure SR is set to IN */
			cuda_idle(sc);
			sc->sc_sent = 0;	/* must start all over */
			sc->sc_state = CUDA_IDLE;	/* new state */
			sc->sc_received = 0;
			sc->sc_waiting = 1;	/* must retry when done with
						 * read */
			DELAY(150);
			goto switch_start;	/* process next state right
						 * now */
			break;
		}
		if (sc->sc_out_length == sc->sc_sent) {	/* check for done */
			sc->sc_waiting = 0;	/* done writing */
			sc->sc_state = CUDA_IDLE;	/* signal bus is idle */
			cuda_in(sc);
			cuda_idle(sc);
		} else {
			/* send next byte */
			cuda_write_reg(sc, vSR, sc->sc_out[sc->sc_sent]);
			cuda_toggle_ack(sc);	/* signal byte ready to
							 * shift */
		}
		break;

	case CUDA_NOTREADY:
		break;

	default:
		break;
	}

	mtx_unlock(&sc->sc_mutex);

	if (process_inbound)
		cuda_send_inbound(sc);

	mtx_lock(&sc->sc_mutex);
	/* If we have another packet waiting, set it up */
	if (!sc->sc_waiting && sc->sc_state == CUDA_IDLE)
		cuda_send_outbound(sc);

	mtx_unlock(&sc->sc_mutex);

}
Пример #28
0
static int
pkg_create_matches(int argc, char **argv, match_t match, pkg_formats fmt,
    const char * const outdir, bool overwrite)
{
	int i, ret = EPKG_OK, retcode = EPKG_OK;
	struct pkg *pkg = NULL;
	struct pkgdb *db = NULL;
	struct pkgdb_it *it = NULL;
	int query_flags = PKG_LOAD_DEPS | PKG_LOAD_FILES |
	    PKG_LOAD_CATEGORIES | PKG_LOAD_DIRS | PKG_LOAD_SCRIPTS |
	    PKG_LOAD_OPTIONS | PKG_LOAD_LICENSES |
	    PKG_LOAD_USERS | PKG_LOAD_GROUPS | PKG_LOAD_SHLIBS_REQUIRED |
	    PKG_LOAD_SHLIBS_PROVIDED | PKG_LOAD_ANNOTATIONS;
	struct pkg_head head = STAILQ_HEAD_INITIALIZER(head);
	struct pkg_entry *e = NULL;
	char pkgpath[MAXPATHLEN];
	const char *format = NULL;
	bool foundone;

	if (pkgdb_open(&db, PKGDB_DEFAULT) != EPKG_OK) {
		pkgdb_close(db);
		return (EX_IOERR);
	}
	/* XXX: get rid of hardcoded timeouts */
	if (pkgdb_obtain_lock(db, PKGDB_LOCK_READONLY) != EPKG_OK) {
		pkgdb_close(db);
		warnx("Cannot get a read lock on a database, it is locked by another process");
		return (EX_TEMPFAIL);
	}

	switch (fmt) {
	case TXZ:
		format = "txz";
		break;
	case TBZ:
		format = "tbz";
		break;
	case TGZ:
		format = "tgz";
		break;
	case TAR:
		format = "tar";
		break;
	}

	for (i = 0; i < argc || match == MATCH_ALL; i++) {
		if (match == MATCH_ALL) {
			printf("Loading the package list...\n");
			if ((it = pkgdb_query(db, NULL, match)) == NULL)
				goto cleanup;
			match = !MATCH_ALL;
		} else
			if ((it = pkgdb_query(db, argv[i], match)) == NULL)
				goto cleanup;

		foundone = false;
		while ((ret = pkgdb_it_next(it, &pkg, query_flags)) == EPKG_OK) {
			if ((e = malloc(sizeof(struct pkg_entry))) == NULL)
				err(1, "malloc(pkg_entry)");
			e->pkg = pkg;
			pkg = NULL;
			STAILQ_INSERT_TAIL(&head, e, next);
			foundone = true;
		}
		if (!foundone) {
			warnx("No installed package matching \"%s\" found\n",
			    argv[i]);
			retcode++;
		}

		pkgdb_it_free(it);
		if (ret != EPKG_END)
			retcode++;
	}

	while (!STAILQ_EMPTY(&head)) {
		e = STAILQ_FIRST(&head);
		STAILQ_REMOVE_HEAD(&head, next);

		if (!overwrite) {
			pkg_snprintf(pkgpath, sizeof(pkgpath), "%S/%n-%v.%S",
			    outdir, e->pkg, e->pkg, format);
			if (access(pkgpath, F_OK) == 0) {
				pkg_printf("%n-%v already packaged, skipping...\n",
				    e->pkg, e->pkg);
				pkg_free(e->pkg);
				free(e);
				continue;
			}
		}
		pkg_printf("Creating package for %n-%v\n", e->pkg, e->pkg);
		if (pkg_create_installed(outdir, fmt, e->pkg) !=
		    EPKG_OK)
			retcode++;
		pkg_free(e->pkg);
		free(e);
	}

cleanup:
	pkgdb_release_lock(db, PKGDB_LOCK_READONLY);
	pkgdb_close(db);

	return (retcode);
}
Пример #29
0
void
bios_getsmap(void)
{
	struct smap_buf		buf;
	STAILQ_HEAD(smap_head, smap_buf) head =
	    STAILQ_HEAD_INITIALIZER(head);
	struct smap_buf		*cur, *next;
	u_int			n, x;

	STAILQ_INIT(&head);
	n = 0;
	x = 0;
	v86.ebx = 0;
	do {
		v86.ctl = V86_FLAGS;
		v86.addr = 0x15;
		v86.eax = 0xe820;	/* int 0x15 function 0xe820 */
		v86.ecx = SMAP_BUFSIZE;
		v86.edx = SMAP_SIG;
		v86.es = VTOPSEG(&buf);
		v86.edi = VTOPOFF(&buf);
		v86int();
		if (V86_CY(v86.efl) || v86.eax != SMAP_SIG ||
		    v86.ecx < sizeof(buf.smap) || v86.ecx > SMAP_BUFSIZE)
			break;

		next = malloc(sizeof(*next));
		if (next == NULL)
			break;
		next->smap = buf.smap;
		if (v86.ecx == SMAP_BUFSIZE) {
			next->xattr = buf.xattr;
			x++;
		}
		STAILQ_INSERT_TAIL(&head, next, bufs);
		n++;
	} while (v86.ebx != 0);
	smaplen = n;

	if (smaplen > 0) {
		smapbase = malloc(smaplen * sizeof(*smapbase));
		if (smapbase != NULL) {
			n = 0;
			STAILQ_FOREACH(cur, &head, bufs)
				smapbase[n++] = cur->smap;
		}
		if (smaplen == x) {
			smapattr = malloc(smaplen * sizeof(*smapattr));
			if (smapattr != NULL) {
				n = 0;
				STAILQ_FOREACH(cur, &head, bufs)
					smapattr[n++] = cur->xattr &
					    SMAP_XATTR_MASK;
			}
		} else
			smapattr = NULL;
		cur = STAILQ_FIRST(&head);
		while (cur != NULL) {
			next = STAILQ_NEXT(cur, bufs);
			free(cur);
			cur = next;
		}
	}
}
Пример #30
0
void
dfs_process_phyerr(struct ieee80211com *ic, void *buf, u_int16_t datalen,
    u_int8_t r_rssi, u_int8_t r_ext_rssi, u_int32_t r_rs_tstamp,
    u_int64_t r_fulltsf)
{
   struct ath_dfs *dfs = (struct ath_dfs *)ic->ic_dfs;
   struct ieee80211_channel *chan=ic->ic_curchan;
   struct dfs_event *event;
   struct dfs_phy_err e;
   int empty;

   if (dfs == NULL) {
      VOS_TRACE(VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_ERROR,
                      "%s: sc_dfs is NULL\n", __func__);
      return;
   }

   dfs->dfs_phyerr_count++;
   dump_phyerr_contents(buf, datalen);
   /*
    * XXX The combined_rssi_ok support has been removed.
    * This was only clear for Owl.
    *
    * XXX TODO: re-add this; it requires passing in the ctl/ext
    * RSSI set from the RX status descriptor.
    *
    * XXX TODO TODO: this may be done for us from the legacy
    * phy error path in ath_dev; please review that code.
    */

   /*
    * At this time we have a radar pulse that we need to examine and
    * queue. But if dfs_process_radarevent already detected radar and set
    * CHANNEL_INTERFERENCE flag then do not queue any more radar data.
    * When we are in a new channel this flag will be clear and we will
    * start queueing data for new channel. (EV74162)
    */
   if (dfs->dfs_debug_mask & ATH_DEBUG_DFS_PHYERR_PKT)
      dump_phyerr_contents(buf, datalen);

   if (chan == NULL) {
      VOS_TRACE(VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_ERROR,
              "%s: chan is NULL\n", __func__);
      return;
   }

   if (IEEE80211_IS_CHAN_RADAR(chan)) {
         DFS_DPRINTK(dfs, ATH_DEBUG_DFS1,
          "%s: Radar already found in the channel, "
          " do not queue radar data\n",
          __func__);
      return;
   }

   dfs->ath_dfs_stats.total_phy_errors++;
   DFS_DPRINTK(dfs, ATH_DEBUG_DFS2,
       "%s[%d] phyerr %d len %d\n",
       __func__, __LINE__,
       dfs->ath_dfs_stats.total_phy_errors, datalen);

   /*
    * hardware stores this as 8 bit signed value.
    * we will cap it at 0 if it is a negative number
    */
   if (r_rssi & 0x80)
      r_rssi = 0;

   if (r_ext_rssi & 0x80)
      r_ext_rssi = 0;

   OS_MEMSET(&e, 0, sizeof(e));

   /*
    * This is a bit evil - instead of just passing in
    * the chip version, the existing code uses a set
    * of HAL capability bits to determine what is
    * possible.
    *
    * The way I'm decoding it is thus:
    *
    * + DFS enhancement? Merlin or later
    * + DFS extension channel? Sowl or later. (Howl?)
    * + otherwise, Owl (and legacy.)
    */
   if (dfs->dfs_caps.ath_chip_is_bb_tlv) {
      if (dfs_process_phyerr_bb_tlv(dfs, buf, datalen, r_rssi,
          r_ext_rssi, r_rs_tstamp, r_fulltsf, &e) == 0) {
                        dfs->dfs_phyerr_reject_count++;
         return;
                    } else {
                        if (dfs->dfs_phyerr_freq_min > e.freq)
                            dfs->dfs_phyerr_freq_min = e. freq;

                        if (dfs->dfs_phyerr_freq_max < e.freq)
                            dfs->dfs_phyerr_freq_max = e. freq;
                    }
   } else if (dfs->dfs_caps.ath_dfs_use_enhancement) {
      if (dfs_process_phyerr_merlin(dfs, buf, datalen, r_rssi,
          r_ext_rssi, r_rs_tstamp, r_fulltsf, &e) == 0){
                  return;
                }
   } else if (dfs->dfs_caps.ath_dfs_ext_chan_ok) {
      if (dfs_process_phyerr_sowl(dfs, buf, datalen, r_rssi,
          r_ext_rssi, r_rs_tstamp, r_fulltsf, &e) == 0){
                        return;
                }
   } else {
      if (dfs_process_phyerr_owl(dfs, buf, datalen, r_rssi,
          r_ext_rssi, r_rs_tstamp, r_fulltsf, &e) == 0){
         return;
                }
   }

   /*
    * If the hardware supports radar reporting on the extension channel
    * it will supply FFT data for longer radar pulses.
    *
    * TLV chips don't go through this software check - the hardware
    * check should be enough.  If we want to do software checking
    * later on then someone will have to craft an FFT parser
    * suitable for the TLV FFT data format.
    */
   if ((! dfs->dfs_caps.ath_chip_is_bb_tlv) &&
       dfs->dfs_caps.ath_dfs_ext_chan_ok) {
      /*
       * HW has a known issue with chirping pulses injected at or
       * around DC in 40MHz mode. Such pulses are reported with
       * much lower durations and SW then discards them because
       * they do not fit the minimum bin5 pulse duration.
       *
       * To work around this issue, if a pulse is within a 10us
       * range of the bin5 min duration, check if the pulse is
       * chirping. If the pulse is chirping, bump up the duration
       * to the minimum bin5 duration.
       *
       * This makes sure that a valid chirping pulse will not be
       * discarded because of incorrect low duration.
       *
       * TBD - Is it possible to calculate the 'real' duration of
       * the pulse using the slope of the FFT data?
       *
       * TBD - Use FFT data to differentiate between radar pulses
       * and false PHY errors.
       * This will let us reduce the number of false alarms seen.
       *
       * BIN 5 chirping pulses are only for FCC or Japan MMK4 domain
       */
      if (((dfs->dfsdomain == DFS_FCC_DOMAIN) ||
          (dfs->dfsdomain == DFS_MKK4_DOMAIN)) &&
          (e.dur >= MAYBE_BIN5_DUR) && (e.dur < MAX_BIN5_DUR)) {
         int add_dur;
         int slope = 0, dc_found = 0;

         /*
          * Set the event chirping flags; as we're doing
          * an actual chirp check.
          */
         e.do_check_chirp = 1;
         e.is_hw_chirp = 0;
         e.is_sw_chirp = 0;

         /*
          * dfs_check_chirping() expects is_pri and is_ext
          * to be '1' for true and '0' for false for now,
          * as the function itself uses these values in
          * constructing things rather than testing them
          * for 'true' or 'false'.
          */
         add_dur = dfs_check_chirping(dfs, buf, datalen,
             (e.is_pri ? 1 : 0),
             (e.is_ext ? 1 : 0),
             &slope, &dc_found);
         if (add_dur) {
            DFS_DPRINTK(dfs, ATH_DEBUG_DFS_PHYERR,
                "old dur %d slope =%d\n", e.dur, slope);
            e.is_sw_chirp = 1;
            // bump up to a random bin5 pulse duration
            if (e.dur < MIN_BIN5_DUR) {
               e.dur = dfs_get_random_bin5_dur(dfs,
                   e.fulltsf);
            }
            DFS_DPRINTK(dfs, ATH_DEBUG_DFS_PHYERR,
                "new dur %d\n", e.dur);
         } else {
            /* set the duration so that it is rejected */
            e.is_sw_chirp = 0;
            e.dur = MAX_BIN5_DUR + 100;
            DFS_DPRINTK(dfs, ATH_DEBUG_DFS_PHYERR,
                "is_chirping = %d dur=%d \n",
                add_dur, e.dur);
         }
      } else {
         /*
          * We have a pulse that is either bigger than
          * MAX_BIN5_DUR or * less than MAYBE_BIN5_DUR
          */
         if ((dfs->dfsdomain == DFS_FCC_DOMAIN) ||
             (dfs->dfsdomain == DFS_MKK4_DOMAIN)) {
            /*
             * XXX Would this result in very large pulses
             *     wrapping around to become short pulses?
             */
            if (e.dur >= MAX_BIN5_DUR) {
               /*
                * set the duration so that it is
                * rejected
                */
               e.dur = MAX_BIN5_DUR + 50;
            }
         }
      }
   }

   /*
    * Add the parsed, checked and filtered entry to the radar pulse
    * event list.  This is then checked by dfs_radar_processevent().
    *
    * XXX TODO: some filtering is still done below this point - fix
    * XXX this!
    */
   ATH_DFSEVENTQ_LOCK(dfs);
   empty = STAILQ_EMPTY(&(dfs->dfs_eventq));
   ATH_DFSEVENTQ_UNLOCK(dfs);
   if (empty) {
      return;
   }

   /*
    * If the channel is a turbo G channel, then the event is
    * for the adaptive radio (AR) pattern matching rather than
    * radar detection.
    */
   if ((chan->ic_flags & CHANNEL_108G) == CHANNEL_108G) {
      if (!(dfs->dfs_proc_phyerr & DFS_AR_EN)) {
         DFS_DPRINTK(dfs, ATH_DEBUG_DFS2,
             "%s: DFS_AR_EN not enabled\n",
             __func__);
         return;
      }
      ATH_DFSEVENTQ_LOCK(dfs);
      event = STAILQ_FIRST(&(dfs->dfs_eventq));
      if (event == NULL) {
         ATH_DFSEVENTQ_UNLOCK(dfs);
         DFS_DPRINTK(dfs, ATH_DEBUG_DFS,
             "%s: no more events space left\n",
             __func__);
         return;
      }
      STAILQ_REMOVE_HEAD(&(dfs->dfs_eventq), re_list);
      ATH_DFSEVENTQ_UNLOCK(dfs);
      event->re_rssi = e.rssi;
      event->re_dur = e.dur;
      event->re_full_ts = e.fulltsf;
      event->re_ts = (e.rs_tstamp) & DFS_TSMASK;
      event->re_chanindex = dfs->dfs_curchan_radindex;
      event->re_flags = 0;

      /*
       * Handle chirp flags.
       */
      if (e.do_check_chirp) {
         event->re_flags |= DFS_EVENT_CHECKCHIRP;
         if (e.is_hw_chirp)
            event->re_flags |= DFS_EVENT_HW_CHIRP;
         if (e.is_sw_chirp)
            event->re_flags |= DFS_EVENT_SW_CHIRP;
      }

      ATH_ARQ_LOCK(dfs);
      STAILQ_INSERT_TAIL(&(dfs->dfs_arq), event, re_list);
      ATH_ARQ_UNLOCK(dfs);
   } else {
      if (IEEE80211_IS_CHAN_DFS(chan)) {
         if (!(dfs->dfs_proc_phyerr & DFS_RADAR_EN)) {
            DFS_DPRINTK(dfs, ATH_DEBUG_DFS3,
                "%s: DFS_RADAR_EN not enabled\n",
                __func__);
                                return;
         }
         /*
          * rssi is not accurate for short pulses, so do
          * not filter based on that for short duration pulses
          *
          * XXX do this filtering above?
          */
         if (dfs->dfs_caps.ath_dfs_ext_chan_ok) {
            if ((e.rssi < dfs->dfs_rinfo.rn_minrssithresh &&
                (e.dur > 4)) ||
                e.dur > (dfs->dfs_rinfo.rn_maxpulsedur) ) {
               dfs->ath_dfs_stats.rssi_discards++;
               DFS_DPRINTK(dfs, ATH_DEBUG_DFS1,
                   "Extension channel pulse is "
                   "discarded: dur=%d, "
                   "maxpulsedur=%d, rssi=%d, "
                   "minrssi=%d\n",
                  e.dur,
                  dfs->dfs_rinfo.rn_maxpulsedur,
                  e.rssi,
                  dfs->dfs_rinfo.rn_minrssithresh);
               return;
            }
         } else {
            if (e.rssi < dfs->dfs_rinfo.rn_minrssithresh ||
                e.dur > dfs->dfs_rinfo.rn_maxpulsedur) {
               /* XXX TODO add a debug statement? */
               dfs->ath_dfs_stats.rssi_discards++;
               return;
            }
         }

         /*
          * Add the event to the list, if there's space.
          */
         ATH_DFSEVENTQ_LOCK(dfs);
         event = STAILQ_FIRST(&(dfs->dfs_eventq));
         if (event == NULL) {
            ATH_DFSEVENTQ_UNLOCK(dfs);
            DFS_DPRINTK(dfs, ATH_DEBUG_DFS,
                "%s: no more events space left\n",
                __func__);
            return;
         }
         STAILQ_REMOVE_HEAD(&(dfs->dfs_eventq), re_list);
         ATH_DFSEVENTQ_UNLOCK(dfs);

                        dfs->dfs_phyerr_queued_count++;
                        dfs->dfs_phyerr_w53_counter++;

         event->re_dur = e.dur;
         event->re_full_ts = e.fulltsf;
         event->re_ts = (e.rs_tstamp) & DFS_TSMASK;
         event->re_rssi = e.rssi;

         /*
          * Handle chirp flags.
          */
         if (e.do_check_chirp) {
            event->re_flags |= DFS_EVENT_CHECKCHIRP;
            if (e.is_hw_chirp)
               event->re_flags |= DFS_EVENT_HW_CHIRP;
            if (e.is_sw_chirp)
               event->re_flags |= DFS_EVENT_SW_CHIRP;
         }

         /*
          * Correctly set which channel is being reported on
          */
         if (e.is_pri) {
            event->re_chanindex = dfs->dfs_curchan_radindex;
         } else {
            if (dfs->dfs_extchan_radindex == -1) {
            DFS_DPRINTK(dfs, ATH_DEBUG_DFS_PHYERR,
                "%s - phyerr on ext channel\n", __func__);
            }
            event->re_chanindex = dfs->dfs_extchan_radindex;
            DFS_DPRINTK(dfs, ATH_DEBUG_DFS_PHYERR,
                "%s New extension channel event is added "
                "to queue\n",__func__);
         }
         ATH_DFSQ_LOCK(dfs);
         STAILQ_INSERT_TAIL(&(dfs->dfs_radarq), event, re_list);
         ATH_DFSQ_UNLOCK(dfs);
      }
   }

   /*
    * Schedule the radar/AR task as appropriate.
    *
    * XXX isn't a lock needed for ath_radar_tasksched?
    */

/*
*  Commenting out the dfs_process_ar_event() since the function is never
*  called at run time as dfs_arq will be empty and the function
*  dfs_process_ar_event is obsolete and function definition is removed
*  as part of dfs_ar.c file
*
*  if (!STAILQ_EMPTY(&dfs->dfs_arq))
*     // XXX shouldn't this be a task/timer too?
*     dfs_process_ar_event(dfs, ic->ic_curchan);
*/

   if (!STAILQ_EMPTY(&dfs->dfs_radarq) && !dfs->ath_radar_tasksched) {
      dfs->ath_radar_tasksched = 1;
      OS_SET_TIMER(&dfs->ath_dfs_task_timer, 0);
   }
#undef   EXT_CH_RADAR_FOUND
#undef   PRI_CH_RADAR_FOUND
#undef   EXT_CH_RADAR_EARLY_FOUND
}