Пример #1
0
static int hfa384x_submit_tx_urb(struct net_device *dev, struct sk_buff *skb, gfp_t flags)
{
	struct hostap_interface *iface = netdev_priv(dev);
	local_info_t *local = iface->local;
	struct hostap_usb_priv *hw_priv = local->hw_priv;
	int ret;

	BUG_ON(!skb);

	usb_fill_bulk_urb(&hw_priv->tx_urb, hw_priv->usb,
			hw_priv->endp_out, skb->data, ROUNDUP64(skb->len),
			hfa384x_usbout_callback, dev);

	print_hex_dump_bytes("out ", DUMP_PREFIX_OFFSET, skb->data, skb->len);

	/* FIXME: don't resubmit while we are at stall ??? */
	ret = usb_submit_urb(&hw_priv->tx_urb, flags);
	if (ret == -EPIPE) {
		printk(KERN_ERR "%s tx pipe stall!\n", dev->name);
		// FIXME;
	}
	if (ret)
		printk(KERN_ERR "%s tx submit %d\n", dev->name, ret);

	return ret;
}
Пример #2
0
VOID
KeStartAllProcessors (
    VOID
    )

/*++

Routine Description:

    This function is called during phase 1 initialization on the master boot
    processor to start all of the other registered processors.

Arguments:

    None.

Return Value:

    None.

--*/

{

#if !defined(NT_UP)

    ULONG AllocationSize;
    PUCHAR Base;
    PKPCR CurrentPcr = KeGetPcr();
    PVOID DataBlock;
    PVOID DpcStack;
    PKGDTENTRY64 GdtBase;
    ULONG GdtOffset;
    ULONG IdtOffset;
    UCHAR Index;
    PVOID KernelStack;
    ULONG LogicalProcessors;
    ULONG MaximumProcessors;
    PKNODE Node;
    UCHAR NodeNumber = 0;
    UCHAR Number;
    KIRQL OldIrql;
    PKNODE OldNode;
    PKNODE ParentNode;
    PKPCR PcrBase;
    PKPRCB Prcb;
    USHORT ProcessorId;
    KPROCESSOR_STATE ProcessorState;
    PKTSS64 SysTssBase;
    PKGDTENTRY64 TebBase;
    PETHREAD Thread;

    //
    // Ensure that prefetch instructions in the IPI path are patched out
    // if necessary before starting other processors.
    //

    OldIrql = KeRaiseIrqlToSynchLevel();
    KiIpiSendRequest(1, 0, 0, IPI_FLUSH_SINGLE);
    KeLowerIrql(OldIrql);

    //
    // Do not start additional processors if the relocate physical loader
    // switch has been specified.
    // 

    if (KeLoaderBlock->LoadOptions != NULL) {
        if (strstr(KeLoaderBlock->LoadOptions, "RELOCATEPHYSICAL") != NULL) {
            return;
        }
    }

    //
    // If this a multinode system and processor zero is not on node zero,
    // then move it to the appropriate node.
    //

    if (KeNumberNodes > 1) {
        if (NT_SUCCESS(KiQueryProcessorNode(0, &ProcessorId, &NodeNumber))) {
            if (NodeNumber != 0) {
                KiNode0.ProcessorMask = 0;
                KiNodeInit[0] = KiNode0;
                KeNodeBlock[0] = &KiNodeInit[0];
                KiNode0 = *KeNodeBlock[NodeNumber];
                KeNodeBlock[NodeNumber] = &KiNode0;
                KiNode0.ProcessorMask = 1;
            }

        } else {
            goto StartFailure;
        }
    }

    //
    // Calculate the size of the per processor data structures.
    //
    // This includes:
    //
    //   PCR (including the PRCB)
    //   System TSS
    //   Idle Thread Object
    //   Double Fault Stack
    //   Machine Check Stack
    //   NMI Stack
    //   Multinode structure
    //   GDT
    //   IDT
    //
    // A DPC and Idle stack are also allocated, but they are done separately.
    //

    AllocationSize = ROUNDUP64(sizeof(KPCR)) +
                     ROUNDUP64(sizeof(KTSS64)) +
                     ROUNDUP64(sizeof(ETHREAD)) +
                     ROUNDUP64(DOUBLE_FAULT_STACK_SIZE) +
                     ROUNDUP64(KERNEL_MCA_EXCEPTION_STACK_SIZE) +
                     ROUNDUP64(NMI_STACK_SIZE) +
                     ROUNDUP64(sizeof(KNODE));

    //
    // Save the offset of the GDT in the allocation structure and add in
    // the size of the GDT.
    //

    GdtOffset = AllocationSize;
    AllocationSize +=
            CurrentPcr->Prcb.ProcessorState.SpecialRegisters.Gdtr.Limit + 1;

    //
    // Save the offset of the IDT in the allocation structure and add in
    // the size of the IDT.
    //

    IdtOffset = AllocationSize;
    AllocationSize +=
            CurrentPcr->Prcb.ProcessorState.SpecialRegisters.Idtr.Limit + 1;

    //
    // If the registered number of processors is greater than the maximum
    // number of processors supported, then only allow the maximum number
    // of supported processors.
    //

    if (KeRegisteredProcessors > MAXIMUM_PROCESSORS) {
        KeRegisteredProcessors = MAXIMUM_PROCESSORS;
    }

    //
    // Set barrier that will prevent any other processor from entering the
    // idle loop until all processors have been started.
    //

    KiBarrierWait = 1;

    //
    // Initialize the fixed part of the processor state that will be used to
    // start processors. Each processor starts in the system initialization
    // code with address of the loader parameter block as an argument.
    //

    RtlZeroMemory(&ProcessorState, sizeof(KPROCESSOR_STATE));
    ProcessorState.ContextFrame.Rcx = (ULONG64)KeLoaderBlock;
    ProcessorState.ContextFrame.Rip = (ULONG64)KiSystemStartup;
    ProcessorState.ContextFrame.SegCs = KGDT64_R0_CODE;
    ProcessorState.ContextFrame.SegDs = KGDT64_R3_DATA | RPL_MASK;
    ProcessorState.ContextFrame.SegEs = KGDT64_R3_DATA | RPL_MASK;
    ProcessorState.ContextFrame.SegFs = KGDT64_R3_CMTEB | RPL_MASK;
    ProcessorState.ContextFrame.SegGs = KGDT64_R3_DATA | RPL_MASK;
    ProcessorState.ContextFrame.SegSs = KGDT64_R0_DATA;

    //
    // Check to determine if hyper-threading is really enabled. Intel chips
    // claim to be hyper-threaded with the number of logical processors
    // greater than one even when hyper-threading is disabled in the BIOS.
    //

    LogicalProcessors = KiLogicalProcessors;
    if (HalIsHyperThreadingEnabled() == FALSE) {
        LogicalProcessors = 1;
    }

    //
    // If the total number of logical processors has not been set with
    // the /NUMPROC loader option, then set the maximum number of logical
    // processors to the number of registered processors times the number
    // of logical processors per registered processor.
    //
    // N.B. The number of logical processors is never allowed to exceed
    //      the number of registered processors times the number of logical
    //      processors per physical processor.
    //

    MaximumProcessors = KeNumprocSpecified;
    if (MaximumProcessors == 0) {
        MaximumProcessors = KeRegisteredProcessors * LogicalProcessors;
    }

    //
    // Loop trying to start a new processors until a new processor can't be
    // started or an allocation failure occurs.
    //
    // N.B. The below processor start code relies on the fact a physical
    //      processor is started followed by all its logical processors.
    //      The HAL guarantees this by sorting the ACPI processor table
    //      by APIC id.
    //

    Index = 0;
    Number = 0;
    while ((Index < (MAXIMUM_PROCESSORS - 1)) &&
           ((ULONG)KeNumberProcessors < MaximumProcessors) &&
           ((ULONG)KeNumberProcessors / LogicalProcessors) < KeRegisteredProcessors) {

        //
        // If this is a multinode system and current processor does not
        // exist on any node, then skip it.
        //

        Index += 1;
        if (KeNumberNodes > 1) {
            if (!NT_SUCCESS(KiQueryProcessorNode(Index, &ProcessorId, &NodeNumber))) {
                continue;
            }
        }

        //
        // Increment the processor number.
        //

        Number += 1;

        //
        // Allocate memory for the new processor specific data. If the
        // allocation fails, then stop starting processors.
        //

        DataBlock = MmAllocateIndependentPages(AllocationSize, NodeNumber);
        if (DataBlock == NULL) {
            goto StartFailure;
        }

        //
        // Allocate a pool tag table for the new processor.
        //

        if (ExCreatePoolTagTable(Number, NodeNumber) == NULL) {
            goto StartFailure;
        }

        //
        // Zero the allocated memory.
        //

        Base = (PUCHAR)DataBlock;
        RtlZeroMemory(DataBlock, AllocationSize);

        //
        // Copy and initialize the GDT for the next processor.
        //

        KiCopyDescriptorMemory(&CurrentPcr->Prcb.ProcessorState.SpecialRegisters.Gdtr,
                               &ProcessorState.SpecialRegisters.Gdtr,
                               Base + GdtOffset);

        GdtBase = (PKGDTENTRY64)ProcessorState.SpecialRegisters.Gdtr.Base;

        //
        // Encode the processor number in the upper 6 bits of the compatibility
        // mode TEB descriptor.
        //

        TebBase = (PKGDTENTRY64)((PCHAR)GdtBase + KGDT64_R3_CMTEB);
        TebBase->Bits.LimitHigh = Number >> 2;
        TebBase->LimitLow = ((Number & 0x3) << 14) | (TebBase->LimitLow & 0x3fff);

        //
        // Copy and initialize the IDT for the next processor.
        //

        KiCopyDescriptorMemory(&CurrentPcr->Prcb.ProcessorState.SpecialRegisters.Idtr,
                               &ProcessorState.SpecialRegisters.Idtr,
                               Base + IdtOffset);

        //
        // Set the PCR base address for the next processor, set the processor
        // number, and set the processor speed.
        //
        // N.B. The PCR address is passed to the next processor by computing
        //      the containing address with respect to the PRCB.
        //

        PcrBase = (PKPCR)Base;
        PcrBase->ObsoleteNumber = Number;
        PcrBase->Prcb.Number = Number;
        PcrBase->Prcb.MHz = KeGetCurrentPrcb()->MHz;
        Base += ROUNDUP64(sizeof(KPCR));

        //
        // Set the system TSS descriptor base for the next processor.
        //

        SysTssBase = (PKTSS64)Base;
        KiSetDescriptorBase(KGDT64_SYS_TSS / 16, GdtBase, SysTssBase);
        Base += ROUNDUP64(sizeof(KTSS64));

        //
        // Initialize the panic stack address for double fault and NMI.
        //

        Base += DOUBLE_FAULT_STACK_SIZE;
        SysTssBase->Ist[TSS_IST_PANIC] = (ULONG64)Base;

        //
        // Initialize the machine check stack address.
        //

        Base += KERNEL_MCA_EXCEPTION_STACK_SIZE;
        SysTssBase->Ist[TSS_IST_MCA] = (ULONG64)Base;

        //
        // Initialize the NMI stack address.
        //

        Base += NMI_STACK_SIZE;
        SysTssBase->Ist[TSS_IST_NMI] = (ULONG64)Base;

        //
        // Idle Thread thread object.
        //

        Thread = (PETHREAD)Base;
        Base += ROUNDUP64(sizeof(ETHREAD));

        //
        // Set other special registers in the processor state.
        //

        ProcessorState.SpecialRegisters.Cr0 = ReadCR0();
        ProcessorState.SpecialRegisters.Cr3 = ReadCR3();
        ProcessorState.ContextFrame.EFlags = 0;
        ProcessorState.SpecialRegisters.Tr  = KGDT64_SYS_TSS;
        GdtBase[KGDT64_SYS_TSS / 16].Bytes.Flags1 = 0x89;
        ProcessorState.SpecialRegisters.Cr4 = ReadCR4();

        //
        // Allocate a kernel stack and a DPC stack for the next processor.
        //

        KernelStack = MmCreateKernelStack(FALSE, NodeNumber);
        if (KernelStack == NULL) {
            goto StartFailure;
        }

        DpcStack = MmCreateKernelStack(FALSE, NodeNumber);
        if (DpcStack == NULL) {
            goto StartFailure;
        }

        //
        // Initialize the kernel stack for the system TSS.
        //
        // N.B. System startup must be called with a stack pointer that is
        //      8 mod 16.
        //

        SysTssBase->Rsp0 = (ULONG64)KernelStack - sizeof(PVOID) * 4;
        ProcessorState.ContextFrame.Rsp = (ULONG64)KernelStack - 8;

        //
        // If this is the first processor on this node, then use the space
        // already allocated for the node. Otherwise, the space allocated
        // is not used.
        //

        Node = KeNodeBlock[NodeNumber];
        OldNode = Node;
        if (Node == &KiNodeInit[NodeNumber]) {
            Node = (PKNODE)Base;
            *Node = KiNodeInit[NodeNumber];
            KeNodeBlock[NodeNumber] = Node;
        }

        Base += ROUNDUP64(sizeof(KNODE));

        //
        // Set the parent node address.
        //

        PcrBase->Prcb.ParentNode = Node;

        //
        // Adjust the loader block so it has the next processor state. Ensure
        // that the kernel stack has space for home registers for up to four
        // parameters.
        //

        KeLoaderBlock->KernelStack = (ULONG64)DpcStack - (sizeof(PVOID) * 4);
        KeLoaderBlock->Thread = (ULONG64)Thread;
        KeLoaderBlock->Prcb = (ULONG64)(&PcrBase->Prcb);

        //
        // Attempt to start the next processor. If a processor cannot be
        // started, then deallocate memory and stop starting processors.
        //

        if (HalStartNextProcessor(KeLoaderBlock, &ProcessorState) == 0) {

            //
            // Restore the old node address in the node address array before
            // freeing the allocated data block (the node structure lies
            // within the data block).
            //

            *OldNode = *Node;
            KeNodeBlock[NodeNumber] = OldNode;
            ExDeletePoolTagTable(Number);
            MmFreeIndependentPages(DataBlock, AllocationSize);
            MmDeleteKernelStack(KernelStack, FALSE);
            MmDeleteKernelStack(DpcStack, FALSE);
            break;
        }

        Node->ProcessorMask |= AFFINITY_MASK(Number);

        //
        // Wait for processor to initialize.
        //

        while (*((volatile ULONG64 *)&KeLoaderBlock->Prcb) != 0) {
            KeYieldProcessor();
        }
    }

    //
    // All processors have been started. If this is a multinode system, then
    // allocate any missing node structures.
    //

    if (KeNumberNodes > 1) {
        for (Index = 0; Index < KeNumberNodes; Index += 1) {
            if (KeNodeBlock[Index] == &KiNodeInit[Index]) {
                Node = ExAllocatePoolWithTag(NonPagedPool, sizeof(KNODE), '  eK');
                if (Node != NULL) {
                    *Node = KiNodeInit[Index];
                    KeNodeBlock[Index] = Node;

                } else {
                    goto StartFailure;
                }
            }
        }

    } else if (KiNode0.ProcessorMask != KeActiveProcessors) {
        goto StartFailure;
    }

    //
    // Clear node structure address for nonexistent nodes.
    //

    for (Index = KeNumberNodes; Index < MAXIMUM_CCNUMA_NODES; Index += 1) {
        KeNodeBlock[Index] = NULL;
    }

    //
    // Copy the node color and shifted color to the PRCB of each processor.
    //

    for (Index = 0; Index < (ULONG)KeNumberProcessors; Index += 1) {
        Prcb = KiProcessorBlock[Index];
        ParentNode = Prcb->ParentNode;
        Prcb->NodeColor = ParentNode->Color;
        Prcb->NodeShiftedColor = ParentNode->MmShiftedColor;
        Prcb->SecondaryColorMask = MmSecondaryColorMask;
    }

    //
    // Reset the initialization bit in prefetch retry.
    //

    KiPrefetchRetry &= ~0x80;

    //
    // Reset and synchronize the performance counters of all processors, by
    // applying a null adjustment to the interrupt time.
    //

    KeAdjustInterruptTime(0);

    //
    // Allow all processors that were started to enter the idle loop and
    // begin execution.
    //

    KiBarrierWait = 0;

#endif //

    return;

    //
    // The failure to allocate memory or a unsuccessful status was returned
    // during the attempt to start processors. This is considered fatal since
    // something is very wrong.
    //

#if !defined(NT_UP)

StartFailure:
    KeBugCheckEx(PHASE1_INITIALIZATION_FAILED, 0, 0, 20, 0);

#endif

}
Пример #3
0
int
wi_write_record_usb(struct wi_softc *wsc, struct wi_ltv_gen *ltv)
{
	struct wi_usb_chain	*c;
	struct wi_usb_softc	*sc = wsc->wi_usb_cdata;
	struct wi_wridreq	*prid;
	int			total_len, rnd_len;
	int			err;
	struct wi_ltv_gen	p2ltv;
	u_int16_t		val = 0;
	int			i;

	DPRINTFN(5,("%s: %s: enter rid=%x wi_len %d copying %x\n",
	    sc->wi_usb_dev.dv_xname, __func__, ltv->wi_type, ltv->wi_len,
	    (ltv->wi_len-1)*2 ));

	/* Do we need to deal with these here, as in _io version?
	 * WI_PORTTYPE_IBSS -> WI_RID_PORTTYPE
	 * RID_TX_RATE munging
	 * RID_ENCRYPTION
	 * WI_RID_TX_CRYPT_KEY
	 * WI_RID_DEFLT_CRYPT_KEYS
	 */
	if (ltv->wi_type == WI_RID_PORTTYPE &&
	    letoh16(ltv->wi_val) == WI_PORTTYPE_IBSS) {
		/* Convert WI_PORTTYPE_IBSS to vendor IBSS port type. */
		p2ltv.wi_type = WI_RID_PORTTYPE;
		p2ltv.wi_len = 2;
		p2ltv.wi_val = wsc->wi_ibss_port;
		ltv = &p2ltv;
	} else if (wsc->sc_firmware_type != WI_LUCENT) {
		int v;

		switch (ltv->wi_type) {
		case WI_RID_TX_RATE:
			p2ltv.wi_type = WI_RID_TX_RATE;
			p2ltv.wi_len = 2;
			switch (letoh16(ltv->wi_val)) {
			case 1: v = 1; break;
			case 2: v = 2; break;
			case 3:	v = 15; break;
			case 5: v = 4; break;
			case 6: v = 3; break;
			case 7: v = 7; break;
			case 11: v = 8; break;
			default: return EINVAL;
			}
			p2ltv.wi_val = htole16(v);
			ltv = &p2ltv;
			break;
		case WI_RID_ENCRYPTION:
			p2ltv.wi_type = WI_RID_P2_ENCRYPTION;
			p2ltv.wi_len = 2;
			if (ltv->wi_val & htole16(0x01)) {
				val = PRIVACY_INVOKED;
				/*
				 * If using shared key WEP we must set the
				 * EXCLUDE_UNENCRYPTED bit.  Symbol cards
				 * need this bit set even when not using
				 * shared key. We can't just test for
				 * IEEE80211_AUTH_SHARED since Symbol cards
				 * have 2 shared key modes.
				 */
				if (wsc->wi_authtype != IEEE80211_AUTH_OPEN ||
				    wsc->sc_firmware_type == WI_SYMBOL)
					val |= EXCLUDE_UNENCRYPTED;

				switch (wsc->wi_crypto_algorithm) {
				case WI_CRYPTO_FIRMWARE_WEP:
					/*
					 * TX encryption is broken in
					 * Host AP mode.
					 */
					if (wsc->wi_ptype == WI_PORTTYPE_HOSTAP)
						val |= HOST_ENCRYPT;
					break;
				case WI_CRYPTO_SOFTWARE_WEP:
					val |= HOST_ENCRYPT|HOST_DECRYPT;
					break;
				}
				p2ltv.wi_val = htole16(val);
			} else
				p2ltv.wi_val = htole16(HOST_ENCRYPT | HOST_DECRYPT);
			ltv = &p2ltv;
			break;
		case WI_RID_TX_CRYPT_KEY:
			if (ltv->wi_val > WI_NLTV_KEYS)
				return (EINVAL);
			p2ltv.wi_type = WI_RID_P2_TX_CRYPT_KEY;
			p2ltv.wi_len = 2;
			p2ltv.wi_val = ltv->wi_val;
			ltv = &p2ltv;
			break;
		case WI_RID_DEFLT_CRYPT_KEYS: {
				int error;
				int keylen;
				struct wi_ltv_str ws;
				struct wi_ltv_keys *wk;

				wk = (struct wi_ltv_keys *)ltv;
				keylen = wk->wi_keys[wsc->wi_tx_key].wi_keylen;
				keylen = letoh16(keylen);

				for (i = 0; i < 4; i++) {
					bzero(&ws, sizeof(ws));
					ws.wi_len = (keylen > 5) ? 8 : 4;
					ws.wi_type = WI_RID_P2_CRYPT_KEY0 + i;
					bcopy(&wk->wi_keys[i].wi_keydat,
					    ws.wi_str, keylen);
					error = wi_write_record_usb(wsc,
					    (struct wi_ltv_gen *)&ws);
					if (error)
						return (error);
				}
			}
			return (0);
		}
	}

	wi_usb_tx_lock(sc);

	c = &sc->wi_usb_tx_chain[0];

	prid = c->wi_usb_buf;

	total_len = sizeof(prid->type) + sizeof(prid->frmlen) +
	    sizeof(prid->rid) + (ltv->wi_len-1)*2;
	rnd_len = ROUNDUP64(total_len);
	if (rnd_len > WI_USB_BUFSZ) {
		printf("write_record buf size err %x %x\n", 
		    rnd_len, WI_USB_BUFSZ);
		wi_usb_tx_unlock(sc);
		return EIO;
	}

	prid->type = htole16(WI_USB_WRIDREQ);
	prid->frmlen = htole16(ltv->wi_len);
	prid->rid  = htole16(ltv->wi_type);
	if (ltv->wi_len > 1)
		bcopy(&ltv->wi_val, &prid->data[0], (ltv->wi_len-1)*2);

	bzero(((char*)prid)+total_len, rnd_len - total_len);

	usbd_setup_xfer(c->wi_usb_xfer, sc->wi_usb_ep[WI_USB_ENDPT_TX],
	    c, c->wi_usb_buf, rnd_len, USBD_FORCE_SHORT_XFER | USBD_NO_COPY,
	    WI_USB_TX_TIMEOUT, wi_usb_txeof);

	err = wi_usb_do_transmit_sync(sc, c, &sc->ridresperr);

	if (err == 0)
		err = sc->ridresperr;

	sc->ridresperr = 0;

	wi_usb_tx_unlock(sc);

	DPRINTFN(5,("%s: %s: exit err=%x\n",
	    sc->wi_usb_dev.dv_xname, __func__, err));
	return err;
}
Пример #4
0
int
wi_cmd_usb(struct wi_softc *wsc, int cmd, int val0, int val1, int val2)
{
	struct wi_usb_chain	*c;
	struct wi_usb_softc	*sc = wsc->wi_usb_cdata;
	struct wi_cmdreq	*pcmd;
	int			total_len, rnd_len;
	int			err;

	DPRINTFN(5,("%s: %s: enter cmd=%x %x %x %x\n",
	    sc->wi_usb_dev.dv_xname, __func__, cmd, val0, val1, val2));

	if ((cmd & WI_CMD_CODE_MASK) == WI_CMD_TX) {
		return wi_send_packet(sc, val0);
	}


	if ((cmd & WI_CMD_CODE_MASK) == WI_CMD_INI) {
		/* free alloc_nicmem regions */
		while (sc->wi_usb_nummem) {
			sc->wi_usb_nummem--;
			free(sc->wi_usb_txmem[sc->wi_usb_nummem], M_DEVBUF);
			sc->wi_usb_txmem[sc->wi_usb_nummem] = NULL;
		}

#if 0
		/* if this is the first time, init, otherwise do not?? */
		if (sc->wi_resetonce) {
			return 0;
		} else
			sc->wi_resetonce = 1;
#endif
	}

	wi_usb_ctl_lock(sc);

	wi_usb_tx_lock(sc);

	c = &sc->wi_usb_tx_chain[0];
	pcmd = c->wi_usb_buf;


	total_len = sizeof (struct wi_cmdreq);
	rnd_len = ROUNDUP64(total_len);
	if (rnd_len > WI_USB_BUFSZ) {
		printf("read_record buf size err %x %x\n", 
		    rnd_len, WI_USB_BUFSZ);
		err = EIO;
		goto err_ret;
	}

	sc->cmdresp = cmd;
	sc->cmdresperr = 0;

	pcmd->type = htole16(WI_USB_CMDREQ);
	pcmd->cmd  = htole16(cmd);
	pcmd->param0  = htole16(val0);
	pcmd->param1  = htole16(val1);
	pcmd->param2  = htole16(val2);

	bzero(((char*)pcmd)+total_len, rnd_len - total_len);

	usbd_setup_xfer(c->wi_usb_xfer, sc->wi_usb_ep[WI_USB_ENDPT_TX],
	    c, c->wi_usb_buf, rnd_len, USBD_FORCE_SHORT_XFER | USBD_NO_COPY,
	    WI_USB_TX_TIMEOUT, wi_usb_txeof);

	err = wi_usb_do_transmit_sync(sc, c, &sc->cmdresperr);

	if (err == 0)
		err = sc->cmdresperr;

	sc->cmdresperr = 0;

err_ret:
	wi_usb_tx_unlock(sc);

	wi_usb_ctl_unlock(sc);

	DPRINTFN(5,("%s: %s: exit err=%x\n",
	    sc->wi_usb_dev.dv_xname, __func__, err));
	return err;
}
Пример #5
0
int
wi_read_record_usb(struct wi_softc *wsc, struct wi_ltv_gen *ltv)
{
	struct wi_usb_chain	*c;
	struct wi_usb_softc	*sc = wsc->wi_usb_cdata;
	struct wi_rridreq	*prid;
	int			total_len, rnd_len;
	int			err;
	struct wi_ltv_gen	*oltv = NULL, p2ltv;

	DPRINTFN(5,("%s: %s: enter rid=%x\n",
	    sc->wi_usb_dev.dv_xname, __func__, ltv->wi_type));

	/* Do we need to deal with these here, as in _io version?
	 * WI_RID_ENCRYPTION -> WI_RID_P2_ENCRYPTION
	 * WI_RID_TX_CRYPT_KEY -> WI_RID_P2_TX_CRYPT_KEY
	 */
	if (wsc->sc_firmware_type != WI_LUCENT) {
		oltv = ltv;
		switch (ltv->wi_type) {
		case WI_RID_ENCRYPTION:
			p2ltv.wi_type = WI_RID_P2_ENCRYPTION;
			p2ltv.wi_len = 2;
			ltv = &p2ltv;
			break;
		case WI_RID_TX_CRYPT_KEY:
			if (ltv->wi_val > WI_NLTV_KEYS)
				return (EINVAL);
			p2ltv.wi_type = WI_RID_P2_TX_CRYPT_KEY;
			p2ltv.wi_len = 2;
			ltv = &p2ltv;
			break;
		}
	}

	wi_usb_tx_lock(sc);

	c = &sc->wi_usb_tx_chain[0];
	prid = c->wi_usb_buf;

	total_len = sizeof(struct wi_rridreq);
	rnd_len = ROUNDUP64(total_len);

	if (rnd_len > WI_USB_BUFSZ) {
		printf("read_record buf size err %x %x\n", 
		    rnd_len, WI_USB_BUFSZ);
		wi_usb_tx_unlock(sc);
		return EIO;
	}

	sc->ridltv = ltv;
	sc->ridresperr = 0;

	prid->type = htole16(WI_USB_RRIDREQ);
	prid->frmlen = htole16(2);	/* variable size? */
	prid->rid  = htole16(ltv->wi_type);

	bzero(((char*)prid)+total_len, rnd_len - total_len);

	usbd_setup_xfer(c->wi_usb_xfer, sc->wi_usb_ep[WI_USB_ENDPT_TX],
	    c, c->wi_usb_buf, rnd_len, USBD_FORCE_SHORT_XFER | USBD_NO_COPY,
	    WI_USB_TX_TIMEOUT, wi_usb_txeof);

	DPRINTFN(10,("%s: %s: total_len=%x, wilen %d\n",
	    sc->wi_usb_dev.dv_xname, __func__, total_len, ltv->wi_len));

	err = wi_usb_do_transmit_sync(sc, c, &sc->ridresperr);

	/* Do we need to deal with these here, as in _io version?
	 *
	 * WI_RID_TX_RATE
	 * WI_RID_CUR_TX_RATE
	 * WI_RID_ENCRYPTION
	 * WI_RID_TX_CRYPT_KEY
	 * WI_RID_CNFAUTHMODE
	 */
	if (ltv->wi_type == WI_RID_PORTTYPE && wsc->wi_ptype == WI_PORTTYPE_IBSS
	    && ltv->wi_val == wsc->wi_ibss_port) {
		/*
		 * Convert vendor IBSS port type to WI_PORTTYPE_IBSS.
		 * Since Lucent uses port type 1 for BSS *and* IBSS we
		 * have to rely on wi_ptype to distinguish this for us.
		 */
		ltv->wi_val = htole16(WI_PORTTYPE_IBSS);
	} else if (wsc->sc_firmware_type != WI_LUCENT) {
		int v;

		switch (oltv->wi_type) {
		case WI_RID_TX_RATE:
		case WI_RID_CUR_TX_RATE:
			switch (letoh16(ltv->wi_val)) {
			case 1: v = 1; break;
			case 2: v = 2; break;
			case 3:	v = 6; break;
			case 4: v = 5; break;
			case 7: v = 7; break;
			case 8: v = 11; break;
			case 15: v = 3; break;
			default: v = 0x100 + letoh16(ltv->wi_val); break;
			}
			oltv->wi_val = htole16(v);
			break;
		case WI_RID_ENCRYPTION:
			oltv->wi_len = 2;
			if (ltv->wi_val & htole16(0x01))
				oltv->wi_val = htole16(1);
			else
				oltv->wi_val = htole16(0);
			break;
		case WI_RID_TX_CRYPT_KEY:
		case WI_RID_CNFAUTHMODE:
			oltv->wi_len = 2;
			oltv->wi_val = ltv->wi_val;
			break;
		}
	}

	if (err == 0)
		err = sc->ridresperr;

	sc->ridresperr = 0;

	wi_usb_tx_unlock(sc);

	DPRINTFN(5,("%s: %s: exit err=%x\n",
	    sc->wi_usb_dev.dv_xname, __func__, err));
	return err;
}
Пример #6
0
int
wi_send_packet(struct wi_usb_softc *sc, int id)
{
	struct wi_usb_chain	*c;
	struct wi_frame		*wibuf;
	int			total_len, rnd_len;
	int			err;

	c = &sc->wi_usb_tx_chain[0];

	DPRINTFN(10,("%s: %s: id=%x\n",
	    sc->wi_usb_dev.dv_xname, __func__, id));

	/* assemble packet from write_data buffer */
	if (id == 0 || id == 1) {
		/* tx_lock acquired before wi_start() */
		wibuf = sc->wi_usb_txmem[id];

		total_len = sizeof (struct wi_frame) +
		    letoh16(wibuf->wi_dat_len);
		rnd_len = ROUNDUP64(total_len);
		if ((total_len > sc->wi_usb_txmemsize[id]) ||
		   (rnd_len > WI_USB_BUFSZ )){
			printf("invalid packet len: %x memsz %x max %x\n",
			    total_len, sc->wi_usb_txmemsize[id], WI_USB_BUFSZ);

			err = EIO;
			goto err_ret;
		}

		sc->txresp = WI_CMD_TX;
		sc->txresperr = 0;

		bcopy(wibuf, c->wi_usb_buf, total_len);

		bzero(((char *)c->wi_usb_buf)+total_len,
		    rnd_len - total_len);

		/* zero old packet for next TX */
		bzero(wibuf, total_len);

		total_len = rnd_len;

		DPRINTFN(5,("%s: %s: id=%x len=%x\n",
		    sc->wi_usb_dev.dv_xname, __func__, id, total_len));

		usbd_setup_xfer(c->wi_usb_xfer, sc->wi_usb_ep[WI_USB_ENDPT_TX],
		    c, c->wi_usb_buf, rnd_len,
		    USBD_FORCE_SHORT_XFER | USBD_NO_COPY,
		    WI_USB_TX_TIMEOUT, wi_usb_txeof_frm);

		err = usbd_transfer(c->wi_usb_xfer);
		if (err != USBD_IN_PROGRESS && err != USBD_NORMAL_COMPLETION) {
			printf("%s: %s: error=%s\n",
			    sc->wi_usb_dev.dv_xname, __func__,
			    usbd_errstr(err));
			/* Stop the interface from process context. */
			wi_usb_stop(sc);
			err = EIO;
		} else {
			err = 0;
		}

		DPRINTFN(5,("%s: %s: exit err=%x\n",
		    sc->wi_usb_dev.dv_xname, __func__, err));
err_ret:
		return err;
	}
	printf("%s:%s: invalid packet id sent %x\n",
	    sc->wi_usb_dev.dv_xname, __func__, id);
	return 0;
}
Пример #7
0
__private_extern__ int
get_pcblist_n(short proto, struct sysctl_req *req, struct inpcbinfo *pcbinfo)
{
	int error = 0;
	int i, n;
	struct inpcb *inp, **inp_list = NULL;
	inp_gen_t gencnt;
	struct xinpgen xig;
	void *buf = NULL;
	size_t item_size = ROUNDUP64(sizeof (struct xinpcb_n)) +
	    ROUNDUP64(sizeof (struct xsocket_n)) +
	    2 * ROUNDUP64(sizeof (struct xsockbuf_n)) +
	    ROUNDUP64(sizeof (struct xsockstat_n));

	if (proto == IPPROTO_TCP)
		item_size += ROUNDUP64(sizeof (struct xtcpcb_n));

	/*
	 * The process of preparing the PCB list is too time-consuming and
	 * resource-intensive to repeat twice on every request.
	 */
	lck_rw_lock_exclusive(pcbinfo->ipi_lock);
	if (req->oldptr == USER_ADDR_NULL) {
		n = pcbinfo->ipi_count;
		req->oldidx = 2 * (sizeof (xig)) + (n + n/8) * item_size;
		goto done;
	}

	if (req->newptr != USER_ADDR_NULL) {
		error = EPERM;
		goto done;
	}

	/*
	 * OK, now we're committed to doing something.
	 */
	gencnt = pcbinfo->ipi_gencnt;
	n = pcbinfo->ipi_count;

	bzero(&xig, sizeof (xig));
	xig.xig_len = sizeof (xig);
	xig.xig_count = n;
	xig.xig_gen = gencnt;
	xig.xig_sogen = so_gencnt;
	error = SYSCTL_OUT(req, &xig, sizeof (xig));
	if (error) {
		goto done;
	}
	/*
	 * We are done if there is no pcb
	 */
	if (n == 0) {
		goto done;
	}

	buf = _MALLOC(item_size, M_TEMP, M_WAITOK);
	if (buf == NULL) {
		error = ENOMEM;
		goto done;
	}

	inp_list = _MALLOC(n * sizeof (*inp_list), M_TEMP, M_WAITOK);
	if (inp_list == NULL) {
		error = ENOMEM;
		goto done;
	}

	for (inp = pcbinfo->ipi_listhead->lh_first, i = 0; inp && i < n;
	    inp = inp->inp_list.le_next) {
		if (inp->inp_gencnt <= gencnt &&
		    inp->inp_state != INPCB_STATE_DEAD)
			inp_list[i++] = inp;
	}
	n = i;

	error = 0;
	for (i = 0; i < n; i++) {
		inp = inp_list[i];
		if (inp->inp_gencnt <= gencnt &&
		    inp->inp_state != INPCB_STATE_DEAD) {
			struct xinpcb_n *xi = (struct xinpcb_n *)buf;
			struct xsocket_n *xso = (struct xsocket_n *)
			    ADVANCE64(xi, sizeof (*xi));
			struct xsockbuf_n *xsbrcv = (struct xsockbuf_n *)
			    ADVANCE64(xso, sizeof (*xso));
			struct xsockbuf_n *xsbsnd = (struct xsockbuf_n *)
			    ADVANCE64(xsbrcv, sizeof (*xsbrcv));
			struct xsockstat_n *xsostats = (struct xsockstat_n *)
			    ADVANCE64(xsbsnd, sizeof (*xsbsnd));

			bzero(buf, item_size);

			inpcb_to_xinpcb_n(inp, xi);
			sotoxsocket_n(inp->inp_socket, xso);
			sbtoxsockbuf_n(inp->inp_socket ?
			    &inp->inp_socket->so_rcv : NULL, xsbrcv);
			sbtoxsockbuf_n(inp->inp_socket ?
			    &inp->inp_socket->so_snd : NULL, xsbsnd);
			sbtoxsockstat_n(inp->inp_socket, xsostats);
			if (proto == IPPROTO_TCP) {
				struct  xtcpcb_n *xt = (struct xtcpcb_n *)
				    ADVANCE64(xsostats, sizeof (*xsostats));

				/*
				 * inp->inp_ppcb, can only be NULL on
				 * an initialization race window.
				 * No need to lock.
				 */
				if (inp->inp_ppcb == NULL)
					continue;

				tcpcb_to_xtcpcb_n((struct tcpcb *)
				    inp->inp_ppcb, xt);
			}
			error = SYSCTL_OUT(req, buf, item_size);
		}
	}
	if (!error) {
		/*
		 * Give the user an updated idea of our state.
		 * If the generation differs from what we told
		 * her before, she knows that something happened
		 * while we were processing this request, and it
		 * might be necessary to retry.
		 */
		bzero(&xig, sizeof (xig));
		xig.xig_len = sizeof (xig);
		xig.xig_gen = pcbinfo->ipi_gencnt;
		xig.xig_sogen = so_gencnt;
		xig.xig_count = pcbinfo->ipi_count;
		error = SYSCTL_OUT(req, &xig, sizeof (xig));
	}
done:
	lck_rw_done(pcbinfo->ipi_lock);
	if (inp_list != NULL)
		FREE(inp_list, M_TEMP);
	if (buf != NULL)
		FREE(buf, M_TEMP);
	return (error);
}
Пример #8
0
/*=========================================================================*/
glink_err_type xport_rpm_isr( xport_rpm_ctx_type *ctx_ptr )
{
  uint32 write_ind, read_ind;
  boolean stop_processing = FALSE;

  if (ctx_ptr->reset == TRUE)
  {
    /* reset flag has been set after SSR, notify link up */
    ctx_ptr->reset = FALSE;
    ctx_ptr->xport_if.glink_core_if_ptr->link_up((glink_transport_if_type *)ctx_ptr);
  }

  glink_os_cs_acquire(ctx_ptr->rx_link_lock);

  /* Process pending commands and data */
  write_ind = ctx_ptr->rx_desc->write_ind;
  read_ind = ctx_ptr->rx_desc->read_ind;

  XPORT_RPM_LOG("RPM ISR write ind", ctx_ptr->pcfg->remote_ss, write_ind);
  XPORT_RPM_LOG("RPM ISR read ind", ctx_ptr->pcfg->remote_ss, read_ind);

  /* Ensure the index is 64-bit aligned */
  if ((write_ind & 0x7) != 0)
  {
    dprintf(CRITICAL,"%s:%d: Write Index is not aligned: %u\n",__func__, __LINE__, write_ind);
    ASSERT(0);
  }

  while (write_ind != read_ind && !stop_processing)
  {
    uint32 cmd = MSGRAM_READ32(ctx_ptr, read_ind);
    uint32 cid = XPORT_RPM_GET_CHANNEL_ID(cmd); // most commands have channel ID
    uint32 cmd_arg;

    /* it can't wrap aroud here so just inceremt the index */
    read_ind += sizeof(cmd);

    XPORT_RPM_LOG("Cmd Rx", ctx_ptr->pcfg->remote_ss, cmd);

    switch (XPORT_RPM_GET_CMD_ID(cmd))
    {
      case XPORT_RPM_CMD_VERSION_REQ:

        cmd_arg = MSGRAM_READ32(ctx_ptr, read_ind);

        /* no need to increment read_ind here since it will be rounded up */

        ctx_ptr->xport_if.glink_core_if_ptr->rx_cmd_version(
          (glink_transport_if_type *)ctx_ptr,
          XPORT_RPM_GET_VERSION(cmd), cmd_arg);
        break;

      case XPORT_RPM_CMD_VERSION_ACK:

        cmd_arg = MSGRAM_READ32(ctx_ptr, read_ind);

        /* no need to increment read_ind here since it will be rounded up */

        ctx_ptr->xport_if.glink_core_if_ptr->rx_cmd_version_ack(
          (glink_transport_if_type *)ctx_ptr,
          XPORT_RPM_GET_VERSION(cmd), cmd_arg);
        break;

      case XPORT_RPM_CMD_OPEN_CHANNEL:
        cmd_arg = MSGRAM_READ32(ctx_ptr, read_ind);
        cmd_arg = ROUNDUP64(cmd_arg);

        read_ind += sizeof(cmd_arg);

        /* channel name should fit into the FIFO */
        if (cmd_arg == 0 || cmd_arg >= ctx_ptr->rx_fifo_size)
        {
          dprintf(CRITICAL, "%s:%d: Invalid name length: %u", __func__, __LINE__, cmd_arg);
          ASSERT(0);
        }
        else
        {
          char  temp_string[ROUNDUP64(GLINK_CH_NAME_LEN)] = {0};
          uint32 num_copied_chars = 0;
          uint32  *string_ptr;

          string_ptr = ( uint32 * )&temp_string[0];
          while( ( num_copied_chars < cmd_arg ) && ( num_copied_chars < sizeof( temp_string ) ) )
          {
            CHECK_INDEX_WRAP_AROUND( read_ind, ctx_ptr->rx_fifo_size );
            *( string_ptr++ ) = MSGRAM_READ32( ctx_ptr, read_ind );

            num_copied_chars += sizeof( uint32 );
            read_ind += sizeof( uint32 );
          }

          /* add all the unread stuff */
          read_ind += cmd_arg - num_copied_chars;

          /* make sure the last character is NULL */
          temp_string[ sizeof( temp_string ) - 1 ] = 0;

          ctx_ptr->xport_if.glink_core_if_ptr->rx_cmd_ch_remote_open(
            (glink_transport_if_type *)ctx_ptr, cid, temp_string, GLINK_XPORT_RPM);
        }

        break;

      case XPORT_RPM_CMD_CLOSE_CHANNEL:

        /* no need to increment read_ind here since it will be rounded up */
        ctx_ptr->xport_if.glink_core_if_ptr->rx_cmd_ch_remote_close(
          (glink_transport_if_type *)ctx_ptr, cid);

        break;

      case XPORT_RPM_CMD_OPEN_CHANNEL_ACK:

        /* no need to increment read_ind here since it will be rounded up */
        ctx_ptr->xport_if.glink_core_if_ptr->rx_cmd_ch_open_ack(
            (glink_transport_if_type *)ctx_ptr, cid, GLINK_XPORT_RPM);

        break;

      case XPORT_RPM_CMD_CLOSE_CHANNEL_ACK:
        /* no need to increment read_ind here since it will be rounded up */
        ctx_ptr->xport_if.glink_core_if_ptr->rx_cmd_ch_close_ack(
          (glink_transport_if_type *)ctx_ptr, cid);

        break;

      case XPORT_RPM_CMD_TX_DATA:
      {
        glink_rx_intent_type desc;
        memset( &desc, sizeof( glink_rx_intent_type), 0 );

        read_ind += sizeof(cmd_arg);

        CHECK_INDEX_WRAP_AROUND(read_ind, ctx_ptr->rx_fifo_size);

        cmd_arg = MSGRAM_READ32(ctx_ptr, read_ind);

        /* packet data should fit into the FIFO */
        if (cmd_arg >= ctx_ptr->rx_fifo_size)
        {
          dprintf(CRITICAL, "%s:%d: Invalid packet length: %u",__func__, __LINE__, cmd_arg);
          ASSERT(0);
        }

        read_ind += 2*sizeof(cmd_arg);

        CHECK_INDEX_WRAP_AROUND(read_ind, ctx_ptr->rx_fifo_size);

        ctx_ptr->pkt_start_ind = read_ind;
        ctx_ptr->pkt_size = cmd_arg;

        desc.size = cmd_arg;
        desc.used = cmd_arg;
        desc.pkt_sz = cmd_arg;
        desc.iovec = ctx_ptr;
        desc.vprovider = xport_rpm_pkt_provider;

        read_ind += cmd_arg;

        ctx_ptr->xport_if.glink_core_if_ptr->rx_put_pkt_ctx(
          (glink_transport_if_type *)ctx_ptr, cid,
          &desc, TRUE);

        /* If interrupt was disabled then stop delivering messages */
        stop_processing = ctx_ptr->irq_mask;

        break;
      }

      case XPORT_RPM_CMD_TX_SIGNALS:

        cmd_arg = MSGRAM_READ32(ctx_ptr, read_ind);

        /* no need to increment read_ind here since it will be rounded up */

        ctx_ptr->xport_if.glink_core_if_ptr->rx_cmd_remote_sigs(
          (glink_transport_if_type *)ctx_ptr,
          cid, cmd_arg);
        break;

      default:
        dprintf(CRITICAL, "%s:%d: Invalid Command: %u\n",__func__, __LINE__, cmd);
        ASSERT(0);
        break;
    }

    /* Update read index only if transport has not been reset  */
    if( !ctx_ptr->reset )
    {
      read_ind = ROUNDUP64(read_ind);

      if (read_ind >= ctx_ptr->rx_fifo_size)
      {
        read_ind -= ctx_ptr->rx_fifo_size;
      }

      /* Update the shared read index */
      ctx_ptr->rx_desc->read_ind = read_ind;
    }
    else
    {
      stop_processing = TRUE;
    }
  }

  glink_os_cs_release(ctx_ptr->rx_link_lock);

  return GLINK_STATUS_SUCCESS;
}
Пример #9
0
/*=========================================================================*/
static glink_err_type xport_rpm_send_cmd
(
  xport_rpm_ctx_type  *ctx_ptr,
  uint32              *cmd,
  uint32               cmd_size,
  uint32              *data,
  uint32               data_size
)
{
  uint32 total_size = cmd_size + data_size;
  uint32 reserve_size = ROUNDUP64(total_size);
  uint32 write_ind, read_ind, avail_size;

  glink_os_cs_acquire(ctx_ptr->tx_link_lock);

  /* Transport is in reset */
  if( ctx_ptr->reset )
  {
    glink_os_cs_release(ctx_ptr->tx_link_lock);
    return GLINK_STATUS_SUCCESS;
  }

  write_ind = ctx_ptr->tx_desc->write_ind;
  read_ind = ctx_ptr->tx_desc->read_ind;
  avail_size = write_ind < read_ind ? read_ind - write_ind :
               ctx_ptr->tx_fifo_size - write_ind + read_ind;

  if (reserve_size + sizeof(uint64) > avail_size)
  {
    glink_os_cs_release(ctx_ptr->tx_link_lock);
    return GLINK_STATUS_OUT_OF_RESOURCES;
  }

  XPORT_RPM_LOG("send cmd", ctx_ptr->pcfg->remote_ss, cmd[0]);

  write_ind
    = xport_rpm_write_msgram( ctx_ptr, write_ind,
                              cmd, ROUNDUP32( cmd_size ) );

  if (data != NULL)
  {
    write_ind
      = xport_rpm_write_msgram( ctx_ptr, write_ind,
                                data, ROUNDUP32( data_size ) );
  }

  /* add alignment bytes to Tx FIFO */
  write_ind += (reserve_size - total_size) & (~3);

  if (write_ind >= ctx_ptr->tx_fifo_size)
  {
    write_ind -= ctx_ptr->tx_fifo_size;
  }

  ctx_ptr->tx_desc->write_ind = write_ind;

  xport_rpm_send_event(ctx_ptr);

  glink_os_cs_release(ctx_ptr->tx_link_lock);

  return GLINK_STATUS_SUCCESS;
}
Пример #10
0
void
systmpr(uint32_t proto,
	char *name, int af)
{
	const char *mibvar;
	size_t len;
	char *buf, *next;
	struct xsystmgen *xig, *oxig;
	struct xgen_n *xgn;
	int which = 0;
	struct xsocket_n *so = NULL;
	struct xsockbuf_n *so_rcv = NULL;
	struct xsockbuf_n *so_snd = NULL;
	struct xsockstat_n *so_stat = NULL;
	struct xkctl_reg *kctl = NULL;
	struct xkctlpcb *kcb = NULL;
	struct xkevtpcb *kevb = NULL;
	int first = 1;
	
	switch (proto) {
		case SYSPROTO_EVENT:
                        mibvar = "net.systm.kevt.pcblist";
			break;
		case SYSPROTO_CONTROL:
			mibvar = "net.systm.kctl.pcblist";
			break;
		case 0:
			mibvar = "net.systm.kctl.reg_list";
			break;
		default:
			mibvar = NULL;
			break;
	}
	if (mibvar == NULL)
		return;
	len = 0;
	if (sysctlbyname(mibvar, 0, &len, 0, 0) < 0) {
		if (errno != ENOENT)
			warn("sysctl: %s", mibvar);
		return;
	}
	if ((buf = malloc(len)) == 0) {
		warn("malloc %lu bytes", (u_long)len);
		return;
	}
	if (sysctlbyname(mibvar, buf, &len, 0, 0) < 0) {
		warn("sysctl: %s", mibvar);
		free(buf);
		return;
	}
	/*
	 * Bail-out to avoid logic error in the loop below when
	 * there is in fact no more control block to process
	 */
	if (len <= sizeof(struct xsystmgen)) {
		free(buf);
		return;
	}
	oxig = xig = (struct xsystmgen *)buf;
	for (next = buf + ROUNDUP64(xig->xg_len); next < buf + len;
	     next += ROUNDUP64(xgn->xgn_len)) {
		xgn = (struct xgen_n*)next;
		if (xgn->xgn_len <= sizeof(struct xsystmgen))
			break;
		
		if ((which & xgn->xgn_kind) == 0) {
			which |= xgn->xgn_kind;
			switch (xgn->xgn_kind) {
				case XSO_SOCKET:
					so = (struct xsocket_n *)xgn;
					break;
				case XSO_RCVBUF:
					so_rcv = (struct xsockbuf_n *)xgn;
					break;
				case XSO_SNDBUF:
					so_snd = (struct xsockbuf_n *)xgn;
					break;
				case XSO_STATS:
					so_stat = (struct xsockstat_n *)xgn;
					break;
				case XSO_KCREG:
					kctl = (struct xkctl_reg *)xgn;
					break;
				case XSO_KCB:
					kcb = (struct xkctlpcb *)xgn;
					break;
				case XSO_EVT:
					kevb = (struct xkevtpcb *)xgn;
					break;
				default:
					printf("unexpected kind %d\n", xgn->xgn_kind);
					break;
			}
		} else {
			if (vflag)
				printf("got %d twice\n", xgn->xgn_kind);
		}
		
		if (which == ALL_XGN_KIND_KCREG) {
			which = 0;
			
			if (first) {
				printf("Registered kernel control modules\n");
				if (Aflag)
					printf("%-16.16s ", "kctlref");
				printf("%-8.8s ", "id");
				if (Aflag)
					printf("%-8.8s ", "unit");
				printf("%-8.8s ", "flags");
				printf("%-8.8s ", "pcbcount");
				printf("%-8.8s ", "rcvbuf");
				printf("%-8.8s ", "sndbuf");
				printf("%s ", "name");
				printf("\n");
				first = 0;
			}
			if (Aflag)
				printf("%16llx ", kctl->xkr_kctlref);
			printf("%8x ", kctl->xkr_id);
			if (Aflag)
				printf("%8d ", kctl->xkr_reg_unit);
			printf("%8x ", kctl->xkr_flags);
			printf("%8d ", kctl->xkr_pcbcount);
			printf("%8d ", kctl->xkr_recvbufsize);
			printf("%8d ", kctl->xkr_sendbufsize);
			printf("%s ", kctl->xkr_name);
			printf("\n");
		} else if (which == ALL_XGN_KIND_KCB) {
			which = 0;
			
			if (first) {
				printf("Active kernel control sockets\n");
				if (Aflag)
					printf("%16.16s ", "pcb");
				printf("%-5.5s %-6.6s %-6.6s ",
				        "Proto", "Recv-Q", "Send-Q");
				if (bflag > 0)
					printf("%10.10s %10.10s ",
					      "rxbytes", "txbytes");
				if (vflag > 0)
					printf("%6.6s %6.6s %6.6s %6.6s ",
					       "rhiwat", "shiwat", "pid", "epid");
				printf("%6.6s ", "unit");
				printf("%6.6s ", "id");
				printf("%s", "name");
				printf("\n");
				first = 0;
			}
			if (Aflag)
				printf("%16llx ", kcb->xkp_kctpcb);
			printf("%-5.5s %6u %6u ", name,
			       so_rcv->sb_cc,
			       so_snd->sb_cc);
			if (bflag > 0) {
				int i;
				u_int64_t rxbytes = 0;
				u_int64_t txbytes = 0;
				
				for (i = 0; i < SO_TC_STATS_MAX; i++) {
					rxbytes += so_stat->xst_tc_stats[i].rxbytes;
					txbytes += so_stat->xst_tc_stats[i].txbytes;
				}
				printf("%10llu %10llu ", rxbytes, txbytes);
			}
			if (vflag > 0) {
				printf("%6u %6u %6u %6u ",
				       so_rcv->sb_hiwat,
				       so_snd->sb_hiwat,
				       so->so_last_pid,
				       so->so_e_pid);
			}
			printf("%6d ", kcb->xkp_unit);
			printf("%6d ", kcb->xkp_kctlid);
			printf("%s", kcb->xkp_kctlname);
			printf("\n");
			
		} else if (which == ALL_XGN_KIND_EVT) {
			which = 0;
			if (first) {
				printf("Active kernel event sockets\n");
				if (Aflag)
					printf("%16.16s ", "pcb");
				printf("%-5.5s %-6.6s %-6.6s ",
				       "Proto", "Recv-Q", "Send-Q");
				printf("%6.6s ", "vendor");
				printf("%6.6s ", "class");
				printf("%6.6s", "subclass");
				if (bflag > 0)
					printf("%10.10s %10.10s ",
					       "rxbytes", "txbytes");
				if (vflag > 0)
					printf("%6.6s %6.6s %6.6s %6.6s",
					       "rhiwat", "shiwat", "pid", "epid");
				printf("\n");
				first = 0;
			}
			if (Aflag)
				printf("%16llx ", kevb->kep_evtpcb);
			printf("%-5.5s %6u %6u ", name,
			       so_rcv->sb_cc,
			       so_snd->sb_cc);
			printf("%6d ", kevb->kep_vendor_code_filter);
			printf("%6d ", kevb->kep_class_filter);
			printf("%6d", kevb->kep_subclass_filter);
			if (bflag > 0) {
				int i;
				u_int64_t rxbytes = 0;
				u_int64_t txbytes = 0;
				
				for (i = 0; i < SO_TC_STATS_MAX; i++) {
					rxbytes += so_stat->xst_tc_stats[i].rxbytes;
					txbytes += so_stat->xst_tc_stats[i].txbytes;
				}
				printf("%10llu %10llu ", rxbytes, txbytes);
			}
			if (vflag > 0) {
				printf("%6u %6u %6u %6u",
				       so_rcv->sb_hiwat,
				       so_snd->sb_hiwat,
				       so->so_last_pid,
				       so->so_e_pid);
			}
			printf("\n");
		}
			
	}
	if (xig != oxig && xig->xg_gen != oxig->xg_gen) {
		if (oxig->xg_count > xig->xg_count) {
			printf("Some %s sockets may have been deleted.\n",
			       name);
		} else if (oxig->xg_count < xig->xg_count) {
			printf("Some %s sockets may have been created.\n",
			       name);
		} else {
			printf("Some %s sockets may have been created or deleted",
			       name);
		}
	}
	free(buf);
}