Пример #1
0
void endian_adjust_shdrs(Elf32_Shdr *shdr,int nentries,int flip)
{
	int i;

	if (flip==0) return;

	for (i=0;i<nentries;i++) {
		/*  Elf32_Word      sh_name; */
		ENDIAN_SWAP32(&shdr[i].sh_name);
		/*  ELF32_S_TYPE    sh_type; */
		ENDIAN_SWAP32(&shdr[i].sh_type);
		/*  ELF32_S_FLAGS   sh_flags; */
		ENDIAN_SWAP32(&shdr[i].sh_flags);
		/*  Elf32_Addr      sh_addr; */
	    ENDIAN_SWAP32(&shdr[i].sh_addr);
		/*  Elf32_Off       sh_offset; */
		ENDIAN_SWAP32(&shdr[i].sh_offset);
		/*  Elf32_Word      sh_size; */
		ENDIAN_SWAP32(&shdr[i].sh_size);
		/*  Elf32_Word      sh_link; */
		ENDIAN_SWAP32(&shdr[i].sh_link);
		/*  Elf32_Word      sh_info; */
		ENDIAN_SWAP32(&shdr[i].sh_info);
		/*  Elf32_Word      sh_addralign */
		ENDIAN_SWAP32(&shdr[i].sh_addralign);
		/*  Elf32_Word      sh_entsize; */
		ENDIAN_SWAP32(&shdr[i].sh_entsize);
	}

	return;
}
Пример #2
0
static void		write_in_dat_file(void* data, void *param)
{
	FILE *file = (FILE*)param;
	road_t	*road = (road_t*)data;
	uint16_t	datalen;
	uint32_t	linkid = ENDIAN_SWAP32(road->link_id);
	uint16_t	len;
	uint32_t	last_bytes = 0;

	datalen = (road->name ? road->length + 2: 0) + 12;
	len = (road->name ? ENDIAN_SWAP16(datalen - 12) : 0);

	datalen = ENDIAN_SWAP16(datalen);

	
	fwrite(&datalen, sizeof(datalen), 1, file);
	fwrite(&linkid, sizeof(linkid), 1, file);
	fwrite(&len, sizeof(len), 1, file);

	if (ENDIAN_SWAP16(datalen) > 12)
	{
		last_bytes = 1;
		last_bytes = last_bytes << 3;
	}
	last_bytes |= road->crossings;
	last_bytes = last_bytes << 4;
	last_bytes |= road->if_class;
	last_bytes = ENDIAN_SWAP32(last_bytes);

	fwrite(&last_bytes, sizeof(last_bytes), 1, file);
	if (road->name && road->length)
		fwrite(road->name, road->length + 2, 1, file);
}	
Пример #3
0
void flip_endian_xblk(qnx4fs_xblk_t *xbptr)
{
	int i;

	ENDIAN_SWAP32(&xbptr->xblk_next_xblk);
	ENDIAN_SWAP32(&xbptr->xblk_prev_xblk);
	/* xbptr->xblk_num_xtnts is unsigned char */
    /* xbptr->xblk_spare[3] doesn't need swapping (and isn't dword aligned) */    
    ENDIAN_SWAP32(&xbptr->xblk_num_blocks);
	for (i=0;i<QNX4FS_MAX_XTNTS_PER_XBLK;i++) 
		flip_endian_xtnt(&xbptr->xblk_xtnts[i]);
    /* xbptr->xblk_signature[8] is char and doesn't need swapping */
    flip_endian_xtnt(&xbptr->xblk_first_xtnt);
}
Пример #4
0
int wlTxRingInit(struct net_device *netdev)
{
    struct wlprivate *wlpptr = NETDEV_PRIV_P(struct wlprivate, netdev);
    int currDescr;
    int num;

    WLDBG_ENTER_INFO(DBG_LEVEL_12, "initializing %i descriptors", MAX_NUM_TX_DESC);
    for(num =0; num < NUM_OF_DESCRIPTOR_DATA; num++)
    {
        QUEUE_INIT(&((struct wlprivate_data *)(wlpptr->wlpd_p))->txQ[num]);
        ((struct wlprivate_data *)(wlpptr->wlpd_p))->fwDescCnt[num] =0;
        if (((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[num].pTxRing != NULL)
        {
            for (currDescr = 0; currDescr < MAX_NUM_TX_DESC; currDescr++)
            {
                CURR_TXD(num).Status    = ENDIAN_SWAP32(EAGLE_TXD_STATUS_IDLE);
                CURR_TXD(num).pNext     = &NEXT_TXD(num);
                CURR_TXD(num).pPhysNext =
                    ENDIAN_SWAP32((u_int32_t) ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[num].pPhysTxRing +
                                  ((currDescr+1)*sizeof(wltxdesc_t)));
                WLDBG_INFO(DBG_LEVEL_12,
                           "txdesc: %i status: 0x%x (%i) vnext: 0x%p pnext: 0x%x",
                           currDescr, EAGLE_TXD_STATUS_IDLE, EAGLE_TXD_STATUS_IDLE,
                           CURR_TXD(num).pNext, ENDIAN_SWAP32(CURR_TXD(num).pPhysNext));
            }
            LAST_TXD(num).pNext = &FIRST_TXD(num);
            LAST_TXD(num).pPhysNext =
                ENDIAN_SWAP32((u_int32_t) ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[num].pPhysTxRing);
            ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[num].pStaleTxDesc = &FIRST_TXD(num);
            ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[num].pNextTxDesc  = &FIRST_TXD(num);

            WLDBG_EXIT_INFO(DBG_LEVEL_12,
                            "last txdesc vnext: 0x%p pnext: 0x%x pstale 0x%x vfirst 0x%x",
                            LAST_TXD(num).pNext, ENDIAN_SWAP32(LAST_TXD(num).pPhysNext),
                            ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[num].pStaleTxDesc, ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[num].pNextTxDesc);
        }
        else
        {
            WLDBG_ERROR(DBG_LEVEL_12, "no valid TX mem");
            return FAIL;
        }
    }
    return SUCCESS;
}
Пример #5
0
void endian_adjust_ehdr(Elf32_Ehdr *ehdr,int flip)
{
	if (flip==0) return;

    /*  unsigned char   e_ident[EI_NIDENT] */
    /*  ELF32_E_TYPE    e_type             */                             
    /*  (elf32-half) ELF32_E_MACHINE e_machine */
	ENDIAN_SWAP16(&ehdr->e_machine);

	/*  ELF32_E_VERSION e_version */                       
    ENDIAN_SWAP32(&ehdr->e_version);

	/*  Elf32_Addr      e_entry; */
    ENDIAN_SWAP32(&ehdr->e_entry);

	/*  Elf32_Off       e_phoff; */
	ENDIAN_SWAP32(&ehdr->e_phoff);

	/*  Elf32_Off       e_shoff; */
    ENDIAN_SWAP32(&ehdr->e_shoff);

	/*  Elf32_Word      e_flags; */
    ENDIAN_SWAP32(&ehdr->e_flags);

	/*  Elf32_Half      e_ehsize; */
    ENDIAN_SWAP16(&ehdr->e_ehsize);

	/*  Elf32_Half      e_phentsize; */
    ENDIAN_SWAP16(&ehdr->e_phentsize);

	/*  Elf32_Half      e_phnum; */
	ENDIAN_SWAP16(&ehdr->e_phnum);

	/*  Elf32_Half      e_shentsize */
	ENDIAN_SWAP16(&ehdr->e_shentsize);

	/*  Elf32_Half      e_shnum; */
    ENDIAN_SWAP16(&ehdr->e_shnum);

	/*  Elf32_Half      e_shstrndx; */
	ENDIAN_SWAP16(&ehdr->e_shstrndx);

    return;
}
Пример #6
0
void wlTxRingCleanup(struct net_device *netdev)
{
    struct wlprivate *wlpptr = NETDEV_PRIV_P(struct wlprivate, netdev);
    int cleanedTxDescr = 0;
    int currDescr;
    int num;

    WLDBG_ENTER(DBG_LEVEL_12);

    for(num =0; num < NUM_OF_DESCRIPTOR_DATA; num++)
    {
        QUEUE_PURGE(&((struct wlprivate_data *)(wlpptr->wlpd_p))->txQ[num]);
        ((struct wlprivate_data *)(wlpptr->wlpd_p))->fwDescCnt[num] =0;
        if (((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[num].pTxRing != NULL)
        {
            for (currDescr = 0; currDescr < MAX_NUM_TX_DESC; currDescr++)
            {
                if (CURR_TXD(num).pSkBuff != NULL)
                {
                    WLDBG_INFO(DBG_LEVEL_12,
                               "unmapped and free'd txdesc %i vaddr: 0x%p paddr: 0x%x",
                               currDescr, CURR_TXD(num).pSkBuff->data,
                               ENDIAN_SWAP32(CURR_TXD(num).PktPtr));
                    pci_unmap_single(wlpptr->pPciDev,
                                     ENDIAN_SWAP32(CURR_TXD(num).PktPtr),
                                     CURR_TXD(num).pSkBuff->len,
                                     PCI_DMA_TODEVICE);
                    {
                        WL_SKB_FREE(CURR_TXD(num).pSkBuff);
                    }
                    CURR_TXD(num).Status    = ENDIAN_SWAP32(EAGLE_TXD_STATUS_IDLE);
                    CURR_TXD(num).pSkBuff   = NULL;
                    CURR_TXD(num).PktPtr    = 0;
                    CURR_TXD(num).PktLen    = 0;
                    cleanedTxDescr++;
                }
            }
        }
    }
    WLDBG_EXIT_INFO(DBG_LEVEL_12, "cleaned %i TX descr", cleanedTxDescr);
}
Пример #7
0
//function
void showReceNtpMsg(ntp_packet * msg)
{
    printf("LiVnMode: %d\n", msg->LiVnMode);
    printf("stratum: %d\n", msg->stratum);
    printf("poll: %d\n", msg->poll);
    printf("precision: %d\n", msg->precision);
    printf("rootDelay: %u\n", ENDIAN_SWAP32(msg->rootDelay));
    printf("rootDispersion: %u\n", ENDIAN_SWAP32(msg->rootDispersion));
    printf("refId: %u.", msg->refId[0]);
    printf("%u", msg->refId[1]);
    printf("%u", msg->refId[2]);
    printf("%u\n", msg->refId[3]);
    printf("refTm_s: %u\n", ENDIAN_SWAP32(msg->refTm_s));
    printf("refTm_f: %u\n", ENDIAN_SWAP32(msg->refTm_f));
    printf("origTm_s: %u\n", ENDIAN_SWAP32(msg->origTm_s));
    printf("origTm_f: %u\n", ENDIAN_SWAP32(msg->origTm_f));
    printf("rxTm_s: %u\n", ENDIAN_SWAP32(msg->rxTm_s));
    printf("rxTm_f: %u\n", ENDIAN_SWAP32(msg->rxTm_f));
    printf("txTm_s: %u\n", ENDIAN_SWAP32(msg->txTm_s));
    printf("txTm_f: %u\n", ENDIAN_SWAP32(msg->txTm_f));
}
Пример #8
0
void wlRxRingCleanup(struct net_device *netdev)
{
    struct wlprivate *wlpptr = NETDEV_PRIV_P(struct wlprivate, netdev);
    int currDescr;

    WLDBG_ENTER(DBG_LEVEL_12);

    if (((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].pRxRing != NULL)
    {
        for (currDescr = 0; currDescr < MAX_NUM_RX_DESC; currDescr++)
        {
            if (CURR_RXD.pSkBuff != NULL)
            {
                if (skb_shinfo(CURR_RXD.pSkBuff)->nr_frags)
                {
                    skb_shinfo(CURR_RXD.pSkBuff)->nr_frags = 0;
                }
                if (skb_shinfo(CURR_RXD.pSkBuff)->frag_list)
                {
                    skb_shinfo(CURR_RXD.pSkBuff)->frag_list = NULL;
                }
                pci_unmap_single(wlpptr->pPciDev,
                                 ENDIAN_SWAP32(CURR_RXD.pPhysBuffData),
                                 ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].rxBufSize,
                                 PCI_DMA_FROMDEVICE);
                WL_SKB_FREE(CURR_RXD.pSkBuff);
                WLDBG_INFO(DBG_LEVEL_12,
                           "unmapped+free'd rxdesc %i vaddr: 0x%p paddr: 0x%x len: %i",
                           currDescr, CURR_RXD.pBuffData,
                           ENDIAN_SWAP32(CURR_RXD.pPhysBuffData),
                           ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].rxBufSize);
                CURR_RXD.pBuffData = NULL;
                CURR_RXD.pSkBuff = NULL;
            }
        }
    }
    WLDBG_EXIT(DBG_LEVEL_12);
}
Пример #9
0
static void mwl_rx_ring_cleanup(struct mwl_priv *priv)
{
	int curr_desc;

	WLDBG_ENTER(DBG_LEVEL_4);

	BUG_ON(!priv);

	if (priv->desc_data[0].prx_ring != NULL) {
		for (curr_desc = 0; curr_desc < SYSADPT_MAX_NUM_RX_DESC; curr_desc++) {
			if (CURR_RXD.psk_buff != NULL) {
				if (skb_shinfo(CURR_RXD.psk_buff)->nr_frags)
					skb_shinfo(CURR_RXD.psk_buff)->nr_frags = 0;

				if (skb_shinfo(CURR_RXD.psk_buff)->frag_list)
					skb_shinfo(CURR_RXD.psk_buff)->frag_list = NULL;

				pci_unmap_single(priv->pdev,
						 ENDIAN_SWAP32(CURR_RXD.pphys_buff_data),
						 priv->desc_data[0].rx_buf_size,
						 PCI_DMA_FROMDEVICE);

				dev_kfree_skb_any(CURR_RXD.psk_buff);

				WLDBG_INFO(DBG_LEVEL_4,
					   "unmapped+free'd rxdesc %i vaddr: 0x%p paddr: 0x%x len: %i",
					   curr_desc, CURR_RXD.pbuff_data,
					   ENDIAN_SWAP32(CURR_RXD.pphys_buff_data),
					   priv->desc_data[0].rx_buf_size);

				CURR_RXD.pbuff_data = NULL;
				CURR_RXD.psk_buff = NULL;
			}
		}
	}

	WLDBG_EXIT(DBG_LEVEL_4);
}
Пример #10
0
static int mwl_rx_refill(struct mwl_priv *priv, struct mwl_rx_desc *pdesc)
{
	WLDBG_ENTER(DBG_LEVEL_4);

	BUG_ON(!priv);
	BUG_ON(!pdesc);

	pdesc->psk_buff = dev_alloc_skb(priv->desc_data[0].rx_buf_size);

	if (pdesc->psk_buff == NULL)
		goto nomem;

	if (skb_linearize(pdesc->psk_buff)) {
		dev_kfree_skb_any(pdesc->psk_buff);
		WLDBG_ERROR(DBG_LEVEL_4, "need linearize memory");
		goto nomem;
	}

	skb_reserve(pdesc->psk_buff, SYSADPT_MIN_BYTES_HEADROOM);

	pdesc->status = EAGLE_RXD_STATUS_OK;
	pdesc->qos_ctrl = 0x0000;
	pdesc->channel = 0x00;
	pdesc->rssi = 0x00;

	pdesc->pkt_len = priv->desc_data[0].rx_buf_size;
	pdesc->pbuff_data = pdesc->psk_buff->data;
	pdesc->pphys_buff_data =
		ENDIAN_SWAP32(pci_map_single(priv->pdev,
					     pdesc->psk_buff->data,
					     priv->desc_data[0].rx_buf_size,
					     PCI_DMA_BIDIRECTIONAL));

	WLDBG_EXIT(DBG_LEVEL_4);

	return 0;

nomem:

	WLDBG_EXIT_INFO(DBG_LEVEL_4, "no memory");

	return -ENOMEM;
}
Пример #11
0
//process
PROCESS_THREAD(ntp_connectPprocess, ev, data)
{
    SOCKETMSG msg;
    int recvlen;
    uip_ipaddr_t peeraddr;
    U16 peerport;
    unsigned long now;
    
    PROCESS_BEGIN();

    //printf("ntp_connectPprocess begin\n");
    while(1) {
        PROCESS_WAIT_EVENT();
        if( ev == PROCESS_EVENT_EXIT) {
            break;
        }  else if(ev == PROCESS_EVENT_MSG) {
            msg = *(SOCKETMSG *)data;
            //The TCP socket is connected, This socket can send data after now.
            if(msg.status == SOCKET_NEWDATA) {
                if(UIP_CONNS <= msg.socket && msg.socket < UIP_CONNS + UIP_UDP_CONNS) {
                    recvlen = udprecvfrom(msg.socket, ntp.buf, MAX_BUFFER, &peeraddr, &peerport);
                    pPacket = (ntp_packet *)ntp.buf;
                    //printf("UDP socked:%d recvdata:%s from %d.%d.%d.%d:%d\n", msg.socket, ntp.buf, peeraddr.u8[0], peeraddr.u8[1], peeraddr.u8[2], peeraddr.u8[3], peerport);
                    //showReceNtpMsg(pPacket);
                    now = (ENDIAN_SWAP32(pPacket->txTm_s) - NTP_TIMESTAMP_DELTA);
                    //printf( "Now Time: %d\n", now);
                    update_ntp_time(now);
                    ntp.exit_process = 1;
                } else {
                    printf("Illegal socket:%d\n", msg.socket);
                } 
            }else {
                printf("unknow message type\n");
            }
        }
    }
    //printf("ntp_connectPprocess end\n");
    PROCESS_END();
}
Пример #12
0
void flip_endian_ino(qnx4fs_dir_entry_t *deptr)
{
	/* d_inode.name is char */
	ENDIAN_SWAP32(&deptr->d_inode.i_size);
	flip_endian_xtnt(&deptr->d_inode.i_first_xtnt);
	ENDIAN_SWAP32(&deptr->d_inode.i_xblk);
	ENDIAN_SWAP32(&deptr->d_inode.i_ftime);
	ENDIAN_SWAP32(&deptr->d_inode.i_mtime);
	ENDIAN_SWAP32(&deptr->d_inode.i_atime);
	ENDIAN_SWAP32(&deptr->d_inode.i_ctime);
    ENDIAN_SWAP16(&deptr->d_inode.i_num_xtnts);
    ENDIAN_SWAP16(&deptr->d_inode.i_mode);
    ENDIAN_SWAP16(&deptr->d_inode.i_uid);
    ENDIAN_SWAP16(&deptr->d_inode.i_gid);
    ENDIAN_SWAP16(&deptr->d_inode.i_nlink);
    /* d_inode.i_zero is char */
    /* d_inode.i_type is ftype_t which is char */
    /* d_inode.i_status is char */
}
Пример #13
0
int procmgr_wait(resmgr_context_t *ctp, proc_wait_t *msg) {
	PROCESS						*prp, *child;
	struct wait_entry			*wap, **pwap, waitl;
	int							alive;

	if(ctp->info.flags & _NTO_MI_ENDIAN_DIFF) {
		ENDIAN_SWAP16(&msg->i.idtype);
		ENDIAN_SWAP32(&msg->i.options);
		ENDIAN_SWAP32(&msg->i.id);
	}
	if(msg->i.options & ~WOPTMASK) {
		return EINVAL;
	}
	waitl.rcvid = ctp->rcvid;
	waitl.idtype = msg->i.idtype;
	waitl.options = msg->i.options;
	waitl.id = msg->i.id;

	alive = 0;
	if(ND_NODE_CMP(ctp->info.nd, ND_LOCAL_NODE) != 0) {
		struct _client_info info;
		struct _cred_info *src, *dst;
		pid_t	nm_pid;

		if(ConnectClientInfo(ctp->info.scoid, &info, 0) == -1) {
			return errno;
		}

		nm_pid = pathmgr_netmgr_pid();
		if(nm_pid == 0) {
			/* netmgr is gone */
			return EL2HLT;
		}
		if(!(prp = proc_lock_pid(nm_pid))) {
			return EL2HLT;
		}

		src = &info.cred;

		for(child = prp->child; child; child = child->sibling) {
			if(child->pid != waitl.id) {
				continue;
			}
			/* security check */
			dst = &child->cred->info;
			if(!(src->euid == 0  ||
			   src->ruid == dst->ruid  ||
			   src->ruid == dst->suid  ||
			   src->euid == dst->ruid  ||
			   src->euid == dst->suid)) {
				return proc_error(EPERM, prp);
			}
			switch(procmgr_wait_check(child, prp, &waitl, 0)) {
			case 0:
				alive++;
				break;
			case -1:
				break;
			default:	
				return proc_error(_RESMGR_NOREPLY, prp);
			}
			if(alive) {
				break;
			}
		}
		if(alive == 0) {
			return proc_error(ECHILD, prp);
		}
	} else {
		if(!(prp = proc_lock_pid(ctp->info.pid))) {
			return EL2HLT;
		}

		for(child = prp->child; child; child = child->sibling) {
			switch(procmgr_wait_check(child, prp, &waitl, 0)) {
			case 0:
				alive++;
				break;
			case -1:
				break;
			default:	
				return proc_error(_RESMGR_NOREPLY, prp);
			}
		}

		//
		// If we're the guardian process for our parent, we'll pick up his
		// children when he dies so we need to see if there are any children
		// of our parent which might satisfy the wait condition in the
		// future and, if so, pretend like they're our children for the
		// purposes of the wait request.
		//
		if(prp->parent && prp->parent->guardian == prp) {
			if(procmgr_wait_check(prp->parent, prp, &waitl, 0) == 0) {
				alive++;
			}
		}

		if(alive == 0) {
			if(!prp->parent || prp->parent->guardian != prp || procmgr_wait_check(prp->parent, prp, &waitl, 0) != 0) {
				return proc_error(ECHILD, prp);
			}
		}
	}

	if(waitl.options & WNOHANG) {
		memset(&msg->o, 0x00, sizeof msg->o);
		return proc_error(_RESMGR_PTR(ctp, &msg->o, sizeof msg->o), prp);
	}
			
	// nothing waiting, so add to queue sorted so pid match has higher priority
	if(!(wap = proc_object_alloc(&wait_souls))) {
		return proc_error(ENOMEM, prp);
	}
	for(pwap = &prp->wap; (waitl.next = *pwap); pwap = &waitl.next->next) {
		if(waitl.next->idtype < waitl.idtype) {
			break;
		}
	}
	*wap = waitl;
	*pwap = wap;

	ctp->id = root_id;
	(void)resmgr_open_bind(ctp, wap, &proc_wait_funcs);
	return proc_error(_RESMGR_NOREPLY, prp);
}
Пример #14
0
void flip_endian_xtnt(qnx4fs_xtnt_t *xtptr)
{
	ENDIAN_SWAP32(&xtptr->xtnt_blk);
	ENDIAN_SWAP32(&xtptr->xtnt_size);
}
Пример #15
0
void flip_endian_link(qnx4fs_dir_entry_t *deptr)
{
    /* d_link.l_fname, l_inode_blk, l_spare, l_status are all char fields */
	ENDIAN_SWAP32(&deptr->d_link.l_inode_blk);
}
Пример #16
0
char* s_tag_type::GetString(char out[5])
{
    *(unsigned long*)out = ENDIAN_SWAP32(val);
    out[4] = '\0';
    return out;
}
Пример #17
0
s_tag_type::s_tag_type(const char* str)
{
    unsigned long tmp = *(unsigned long*)str;
    val = ENDIAN_SWAP32(tmp);
}
Пример #18
0
int wlRxRingInit(struct net_device *netdev)
{
    struct wlprivate *wlpptr = NETDEV_PRIV_P(struct wlprivate, netdev);
    int currDescr;

    WLDBG_ENTER_INFO(DBG_LEVEL_12,  "initializing %i descriptors", MAX_NUM_RX_DESC);

    if (((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].pRxRing != NULL)
    {
        ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].rxBufSize = MAX_AGGR_SIZE;
        for (currDescr = 0; currDescr < MAX_NUM_RX_DESC; currDescr++)
        {
            CURR_RXD.pSkBuff   = dev_alloc_skb(((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].rxBufSize);
            if(skb_linearize(CURR_RXD.pSkBuff))
            {
                WL_SKB_FREE(CURR_RXD.pSkBuff);
                printk(KERN_ERR "%s: Need linearize memory\n", netdev->name);
                return FAIL;
            }
            skb_reserve(CURR_RXD.pSkBuff , MIN_BYTES_HEADROOM);
            CURR_RXD.RxControl = EAGLE_RXD_CTRL_DRIVER_OWN;
            CURR_RXD.Status    = EAGLE_RXD_STATUS_OK;
            CURR_RXD.QosCtrl   = 0x0000;
            CURR_RXD.Channel   = 0x00;
            CURR_RXD.RSSI      = 0x00;
            CURR_RXD.SQ2       = 0x00;

            if (CURR_RXD.pSkBuff != NULL)
            {
                CURR_RXD.PktLen    = 6*netdev->mtu + NUM_EXTRA_RX_BYTES;
                CURR_RXD.pBuffData = CURR_RXD.pSkBuff->data;
                CURR_RXD.pPhysBuffData =
                    ENDIAN_SWAP32(pci_map_single(wlpptr->pPciDev,
                                                 CURR_RXD.pSkBuff->data,
                                                 ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].rxBufSize,
                                                 PCI_DMA_FROMDEVICE));
                CURR_RXD.pNext = &NEXT_RXD;
                CURR_RXD.pPhysNext =
                    ENDIAN_SWAP32((u_int32_t) ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].pPhysRxRing +
                                  ((currDescr+1)*sizeof(wlrxdesc_t)));
                WLDBG_INFO(DBG_LEVEL_12,
                           "rxdesc: %i status: 0x%x (%i) len: 0x%x (%i)",
                           currDescr, EAGLE_TXD_STATUS_IDLE, EAGLE_TXD_STATUS_IDLE,
                           ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].rxBufSize, ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].rxBufSize);
                WLDBG_INFO(DBG_LEVEL_12,
                           "rxdesc: %i vnext: 0x%p pnext: 0x%x", currDescr,
                           CURR_RXD.pNext, ENDIAN_SWAP32(CURR_RXD.pPhysNext));
            } else
            {
                WLDBG_ERROR(DBG_LEVEL_12,
                            "rxdesc %i: no skbuff available", currDescr);
                return FAIL;
            }
        }
        LAST_RXD.pPhysNext =
            ENDIAN_SWAP32((u_int32_t) ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].pPhysRxRing);
        LAST_RXD.pNext             = &FIRST_RXD;
        ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].pNextRxDesc = &FIRST_RXD;

        WLDBG_EXIT_INFO(DBG_LEVEL_12,
                        "last rxdesc vnext: 0x%p pnext: 0x%x vfirst 0x%x",
                        LAST_RXD.pNext, ENDIAN_SWAP32(LAST_RXD.pPhysNext),
                        ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].pNextRxDesc);
        return SUCCESS;
    }
    WLDBG_ERROR(DBG_LEVEL_12, "no valid RX mem");
    return FAIL;
}
Пример #19
0
int procmgr_msg_daemon(resmgr_context_t *ctp, proc_daemon_t *msg) {
	PROCESS								*prp;
	int									fd;

	if(ctp->info.flags & _NTO_MI_ENDIAN_DIFF) {
		ENDIAN_SWAP16(&msg->i.subtype);
		ENDIAN_SWAP32(&msg->i.status);
		ENDIAN_SWAP32(&msg->i.flags);
	}

	if(!(prp = proc_lock_pid(ctp->info.pid))) {
		return ESRCH;
	}

	// First detach from parent returning a status if nessessary
	procmgr_nozombie(prp, msg->i.status);

	// Now detach from session and join proc's session
	// Remember an fd that may need to be closed while not proc_lock()ed
	fd = -1;
	if(prp->flags & _NTO_PF_SLEADER) {
		prp->pgrp = 1; // So we don't drop a SIGHUP on ourselves
		fd = procmgr_sleader_detach(prp);
	}
	if(atomic_sub_value(&prp->session->links, 1) == 1) {
		// This should only happen if the session leader died...
		_ksfree(prp->session, sizeof *prp->session);
	}
	prp->session = sysmgr_prp->session;
	atomic_add(&prp->session->links, 1);
	prp->pgrp = prp->pid;

	// Change directory to root
	if(!(msg->i.flags & PROCMGR_DAEMON_NOCHDIR)) {
		if(prp->cwd != prp->root) {
			NODE	*old = prp->cwd;

			prp->cwd = pathmgr_node_clone(prp->root);
			pathmgr_node_detach(old);
		}
	}

	// Clear the umask
	if(!(msg->i.flags & PROCMGR_DAEMON_KEEPUMASK)) {
		prp->umask = 0;
	}

	// Return the highest used fd so the lib can close them
	ctp->status = prp->fdcons.nentries;
   
	// unlock the process;
	proc_unlock(prp);

	// Free up any fd's if nessessary
	if(fd != -1) {
		if(proc_thread_pool_reserve() != 0) {
			return EAGAIN;
		}
		close(fd);
		proc_thread_pool_reserve_done();
	}

	return EOK;
}
Пример #20
0
int
io_open(resmgr_context_t *ctp, io_open_t *msg, MQDEV *dev, void *extra) {
	struct mq_attr	 *mqp = extra;
	uint32_t		 *smp = extra;
	iofunc_attr_t	 *attr = &dev->attr;
	MQDEV			**head;
	struct mq_attr	  mq_attr;
	int				  status;
	dev_t			  rdev;

	if(S_ISDIR(dev->attr.mode)) {
		// Open on a new/non-existent queue.
		if((msg->connect.ioflag & O_CREAT) == 0) {
			return ENOENT;
		}

		// It must have a file_type of _FTYPE_MQUEUE or _FTYPE_SEM
		memset(&mq_attr, 0, sizeof(mq_attr));
		switch(msg->connect.file_type) {
		case _FTYPE_MQUEUE:
			rdev = S_INMQ;
			head = &mq_dir_attr.link;
			if(msg->connect.extra_type == _IO_CONNECT_EXTRA_MQUEUE) {
				if (msg->connect.extra_len != sizeof(struct mq_attr))
					return(ENOSYS);
				if (ctp->info.flags & _NTO_MI_ENDIAN_DIFF) {
					ENDIAN_SWAP32(&mqp->mq_maxmsg);
					ENDIAN_SWAP32(&mqp->mq_msgsize);
				}
				if((mq_attr.mq_maxmsg = mqp->mq_maxmsg) <= 0  ||
				   (mq_attr.mq_msgsize = mqp->mq_msgsize) <= 0) {
					return EINVAL;
				}
			} else {
				mq_attr.mq_maxmsg = 1024;
				mq_attr.mq_msgsize = 4096;
			}
			break;

		case _FTYPE_SEM:
			rdev = S_INSEM;
			head = &sem_dir_attr.link;
			mq_attr.mq_maxmsg = _POSIX_SEM_VALUE_MAX;
			mq_attr.mq_flags = MQ_SEMAPHORE;
			if(msg->connect.extra_type == _IO_CONNECT_EXTRA_SEM) {
				if (msg->connect.extra_len != sizeof(uint32_t))
					return(ENOSYS);
				if (ctp->info.flags & _NTO_MI_ENDIAN_DIFF) {
					ENDIAN_SWAP32(smp);
				}
				mq_attr.mq_curmsgs = *smp;
			}
			break;

		default:
			return ENOSYS;
		}

		// Check for O_CREAT race condition (PR-11060)
		if ((dev = check_duplicate(msg->connect.path, *head)) != NULL) {
			// Re-target open to the already created device.
			ctp->id = dev->id;
			goto race;		// In case non-trivial open verification code
		}

		// Get a device entry and the input/output buffers for it.
		if((dev = MemchunkCalloc(memchunk, 1, sizeof(*dev) + msg->connect.path_len - sizeof(char))) == NULL) {
			return ENOSPC;
		}
			
		msg->connect.mode = (msg->connect.mode & ~S_IFMT) | S_IFNAM;
		if((status = iofunc_open(ctp, msg, &dev->attr, attr, 0)) != EOK) {
			MemchunkFree(memchunk, dev);
			return status;
		}

		dev->mq_attr = mq_attr;
		dev->attr.rdev = rdev;
		IOFUNC_NOTIFY_INIT(dev->notify);

		// Add the new queue to the pathname space
		if((dev->id = create_device(msg->connect.path, msg->connect.file_type, dev)) == -1) {
			if ((status = errno) == EMFILE) { //We have created too many connections, this is the system limit
				status = ENFILE;			  //Tell the client the system is full.
			}
			MemchunkFree(memchunk, dev);
			return status;
		}
		strcpy(dev->name, msg->connect.path);
		dev->link = *head, *head = dev;

		// Re-target open to the newly created device.
		ctp->id = dev->id;
	} else {
race:
		// Open on an existing queue.
		if((status = iofunc_open(ctp, msg, &dev->attr, 0, 0)) != EOK) {
			return status;
		}
	}

	// Attach the ocb to the device
	if((status = iofunc_ocb_attach(ctp, msg, NULL, &dev->attr, NULL)) == -1) {
		return status;
	}

	return EOK;
}
Пример #21
0
void wlRecv(struct net_device *netdev)
{
	struct wlprivate *wlpptr = NETDEV_PRIV_P(struct wlprivate, netdev);
	int work_done = 0;
	wlrxdesc_t *pCurrent = ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].pNextRxDesc;
	static Bool_e isFunctionBusy = WL_FALSE;
	int receivedHandled = 0;
	u_int32_t rxRdPtr;
	u_int32_t rxWrPtr;
	struct sk_buff *pRxSkBuff=NULL;
	WL_BUFF *wlb = NULL;
	void *pCurrentData;
	u_int8_t rxRate;
	int rxCount;
	int rssi;
	vmacApInfo_t *vmacSta_p = wlpptr->vmacSta_p;
	u_int32_t status;
    u_int32_t rssi_paths;
	WLDBG_ENTER(DBG_LEVEL_14);

    /* In a corner case the descriptors may be uninitialized and not usable, accessing these may cause a crash */
	if (isFunctionBusy || (pCurrent == NULL))
	{
		return;
	}
	isFunctionBusy = WL_TRUE;

	rxRdPtr = readl(wlpptr->ioBase0 + ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].rxDescRead);
	rxWrPtr = readl(wlpptr->ioBase0 + ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].rxDescWrite);

	while ((pCurrent->RxControl ==EAGLE_RXD_CTRL_DMA_OWN)
		&& (work_done < vmacSta_p->work_to_do)
		)
	{		
		/* AUTOCHANNEL */
		{
			if(vmacSta_p->StopTraffic)
				goto out;
		} /* AUTOCHANNEL */

		rxCount = ENDIAN_SWAP16(pCurrent->PktLen);
		pRxSkBuff = pCurrent->pSkBuff;
		if (pRxSkBuff == NULL)
		{
			goto out;
		}
		pci_unmap_single(wlpptr->pPciDev, 
			ENDIAN_SWAP32(pCurrent->pPhysBuffData),
			((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].rxBufSize,
			PCI_DMA_FROMDEVICE);
		pCurrentData = pCurrent->pBuffData;
		rxRate = pCurrent->Rate;
		status = (u_int32_t)pCurrent->Status;
		pRxSkBuff->protocol = 0;
		if(pCurrent->QosCtrl & IEEE_QOS_CTL_AMSDU)
		{
			pRxSkBuff->protocol |= WL_WLAN_TYPE_AMSDU;
		}
		rssi = (int)pCurrent->RSSI + W836X_RSSI_OFFSET;
        rssi_paths = *((u_int32_t *)&pCurrent->HwRssiInfo);
		if (skb_tailroom(pRxSkBuff) >= rxCount)
		{
			skb_put(pRxSkBuff, rxCount ); 
			skb_pull(pRxSkBuff, 2); 
		}
		else
		{
			WLDBG_INFO(DBG_LEVEL_14,"Not enough tail room =%x recvlen=%x, pCurrent=%x, pCurrentData=%x", WL_BUFF_TAILROOM(pRxSkBuff), rxCount,pCurrent, pCurrentData);
			WL_SKB_FREE(pRxSkBuff);
			goto out;
		}

		wlpptr->netDevStats->rx_packets++;
		wlb = WL_BUFF_PTR(pRxSkBuff);
		WL_PREPARE_BUF_INFO(pRxSkBuff);
		if(pCurrent->HtSig2 & 0x8 )
		{
			u_int8_t ampdu_qos;
			/** use bit 3 for ampdu flag, and 0,1,2,3 for qos so as to save a register **/	
			ampdu_qos = 8|(pCurrent->QosCtrl&0x7);
			work_done+=ieee80211_input(wlpptr, wlb,rssi,rssi_paths,ampdu_qos,status);
		}	
		else
		{
			u_int8_t ampdu_qos;
			/** use bit 3 for ampdu flag, and 0,1,2,3 for qos so as to save a register **/	
			ampdu_qos = 0|(pCurrent->QosCtrl&0x7); 
			work_done+=ieee80211_input(wlpptr, wlb,rssi,rssi_paths,ampdu_qos,status);
		}

		wlpptr->netDevStats->rx_bytes += pRxSkBuff->len;
		{
			pCurrent->pSkBuff   = dev_alloc_skb(((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].rxBufSize);
			if (pCurrent->pSkBuff != NULL)
			{
				if(skb_linearize(pCurrent->pSkBuff))
				{
					WL_SKB_FREE(pCurrent->pSkBuff);
					printk(KERN_ERR "%s: Need linearize memory\n", netdev->name);
					goto out;
				}
				skb_reserve(pCurrent->pSkBuff , MIN_BYTES_HEADROOM);
				pCurrent->Status    = EAGLE_RXD_STATUS_OK;
				pCurrent->QosCtrl   = 0x0000;
				pCurrent->Channel   = 0x00;
				pCurrent->RSSI      = 0x00;
				pCurrent->SQ2       = 0x00;

				pCurrent->PktLen    = 6*netdev->mtu + NUM_EXTRA_RX_BYTES;
				pCurrent->pBuffData = pCurrent->pSkBuff->data;
				pCurrent->pPhysBuffData =
					ENDIAN_SWAP32(pci_map_single(wlpptr->pPciDev,
					pCurrent->pSkBuff->data,
					((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].rxBufSize/*+sizeof(struct skb_shared_info)*/,
					PCI_DMA_BIDIRECTIONAL));
			}
		}
out:

		receivedHandled++;
		pCurrent->RxControl = EAGLE_RXD_CTRL_DRIVER_OWN;
		pCurrent->QosCtrl =0;
		rxRdPtr = ENDIAN_SWAP32(pCurrent->pPhysNext);
		pCurrent = pCurrent->pNext;
	}
	writel(rxRdPtr, wlpptr->ioBase0 + ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].rxDescRead);
	((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].pNextRxDesc = pCurrent;
	isFunctionBusy = WL_FALSE;
	WLDBG_EXIT(DBG_LEVEL_14);
}
Пример #22
0
/*
 * Return -1
 *     This prp can never match the criteria
 * Return 0
 *     This prp does not currently match the criteria
 * Return WNOHANG 
 *     An error with wap, reply was done (i.e. a thread unblocked)
 * Return non-zero W????? 
 *     This prp matched and a reply was done (i.e. a thread unblocked)
 */
int procmgr_wait_check(PROCESS *prp, PROCESS *parent, struct wait_entry *wap, int match) {
	struct _msg_info		info;

	/*
	 * Check if this prp matched the requested group
	 */
	switch(wap->idtype) {
	case P_ALL:
		break;

	case P_PID:
		if(prp->pid != wap->id) {
			return -1;
		}
		break;

	case P_PGID:
		if(prp->pgrp != wap->id) {
			return -1;
		}
		break;

	default:
		MsgError(wap->rcvid, EINVAL);
		return WNOHANG;
	}

	if(match) {
		/*
		 * Check for match, if parent is ignoring SIGCHLD, are we the last child?
		 */
		match &= wap->options;
		if(wap->idtype == P_ALL && (prp->sibling || parent->child != prp) &&
				sigismember(&parent->sig_ignore, SIGCHLD)) {
			match &= ~WEXITED;
		}
		/*
		 * Have we already responded with exit code?
		 */
		if(!(prp->flags & _NTO_PF_WAITINFO)) {
			match &= ~WEXITED;
		}
	} else {
		/*
		 * Check if the requested prp currently matches any options
		 */
		if((wap->options & WTRAPPED) && (prp->flags & (_NTO_PF_DEBUG_STOPPED | _NTO_PF_PTRACED)) ==
				(_NTO_PF_DEBUG_STOPPED | _NTO_PF_PTRACED)) {
			match = WTRAPPED;
		} else if((wap->options & WEXITED) && (prp->flags & _NTO_PF_WAITINFO)) {
			match = WEXITED;
		} else if((wap->options & WCONTINUED) && (prp->flags & _NTO_PF_CONTINUED)) {
			match = WCONTINUED;
		} else if((wap->options & WUNTRACED) && (prp->flags & _NTO_PF_STOPPED) &&
				prp->siginfo.si_signo != 0) {
			match = WUNTRACED;
		}
	}

	/*
	 * If no options matched, check if it could ever match options
	 */
	if(match == 0) {
		int				options = wap->options;
        
		if(prp->flags & (_NTO_PF_ZOMBIE | _NTO_PF_TERMING)) {
			options &= ~(WUNTRACED|WTRAPPED|WCONTINUED);
		}
		if((prp->flags & (_NTO_PF_ZOMBIE | _NTO_PF_WAITINFO)) == _NTO_PF_ZOMBIE) {
			options &= ~WEXITED;
		}
		if((prp->flags & _NTO_PF_NOZOMBIE) || sigismember(&parent->sig_ignore, SIGCHLD)) {
			options &= ~WEXITED;
		}
		if(prp->flags & _NTO_PF_WAITDONE) {
			options &= ~WEXITED;
		}
		if(!(prp->flags & _NTO_PF_PTRACED)) {
			options &= ~WTRAPPED;
		}
		if((options & (WEXITED|WUNTRACED|WTRAPPED|WCONTINUED)) == 0) {
			return -1;
		}
		return 0;
	}

	/*
	 * Unblock the waiting thread...
	 */
	
	if(MsgInfo(wap->rcvid, &info) != -1) {
		siginfo_t	siginfo;

		// unbind and unblock if rcvid is still around
		if((!parent->wap) || ((parent->wap == wap) && (parent->wap->next == NULL))) {
			(void)_resmgr_unbind(&info);
		}
		siginfo = prp->siginfo;
		if(siginfo.si_signo != SIGCHLD) {
			if(prp->flags & _NTO_PF_COREDUMP) {
				siginfo.si_code = CLD_DUMPED;
			} else {
				siginfo.si_code = CLD_KILLED;
			}
			siginfo.si_status = siginfo.si_signo;
			siginfo.si_pid = prp->pid;
			siginfo.si_signo = SIGCHLD;
		}
		if(info.flags & _NTO_MI_ENDIAN_DIFF) {
			ENDIAN_SWAP32(&siginfo.si_signo);
			ENDIAN_SWAP32(&siginfo.si_code);
			ENDIAN_SWAP32(&siginfo.si_errno);
			ENDIAN_SWAP32(&siginfo.si_pid);
			ENDIAN_SWAP32(&siginfo.si_status);
			ENDIAN_SWAP32(&siginfo.si_utime);
			ENDIAN_SWAP32(&siginfo.si_stime);
		}
		MsgReply(wap->rcvid, 0, &siginfo, sizeof siginfo);
	} else {
		KerextSlogf( _SLOG_SETCODE( _SLOGC_PROC, 0 ), _SLOG_INFO, "proc_wait_check: MsgInfo() failed, errno=%d", errno);
	}

	/*
	 * Clean up prp status if requested so it is not reported again
	 */
	if(wap->options & WNOWAIT) {
		return WNOWAIT;
	}
	switch(match) {
	case WEXITED:
		if(prp->flags & _NTO_PF_WAITINFO) {
			parent->kids_running_time += prp->running_time + prp->kids_running_time;
			parent->kids_system_time += prp->system_time + prp->kids_system_time;
			prp->flags &= ~_NTO_PF_WAITINFO;
			prp->flags |= _NTO_PF_WAITDONE;
		}
		if(prp->flags & _NTO_PF_ZOMBIE) {
			MsgSendPulse(PROCMGR_COID, prp->terming_priority, PROC_CODE_TERM, prp->pid);
		} else {
			match = WNOWAIT;
		}
		break;
	case WUNTRACED: // also WSTOPPED
		prp->siginfo.si_signo = 0;
		break;
	case WTRAPPED:
		break;
	case WCONTINUED:
		prp->flags &= ~_NTO_PF_CONTINUED;
		break;
	default:
		break;
	}
	return match;
}
Пример #23
0
void mwl_rx_recv(unsigned long data)
{
	struct ieee80211_hw *hw = (struct ieee80211_hw *)data;
	struct mwl_priv *priv;
	struct mwl_rx_desc *curr_desc;
	int work_done = 0;
	struct sk_buff *prx_skb = NULL;
	int pkt_len;
	struct ieee80211_rx_status status;
	struct mwl_vif *mwl_vif = NULL;
	struct ieee80211_hdr *wh;
	u32 status_mask;

	WLDBG_ENTER(DBG_LEVEL_4);

	BUG_ON(!hw);
	priv = hw->priv;
	BUG_ON(!priv);

	curr_desc = priv->desc_data[0].pnext_rx_desc;

	if (curr_desc == NULL) {
		status_mask = readl(priv->iobase1 + MACREG_REG_A2H_INTERRUPT_STATUS_MASK);
		writel(status_mask | MACREG_A2HRIC_BIT_RX_RDY,
		       priv->iobase1 + MACREG_REG_A2H_INTERRUPT_STATUS_MASK);

		priv->is_rx_schedule = false;

		WLDBG_EXIT_INFO(DBG_LEVEL_4, "busy or no receiving packets");
		return;
	}

	while ((curr_desc->rx_control == EAGLE_RXD_CTRL_DMA_OWN)
		&& (work_done < priv->recv_limit)) {
		prx_skb = curr_desc->psk_buff;
		if (prx_skb == NULL)
			goto out;
		pci_unmap_single(priv->pdev,
				 ENDIAN_SWAP32(curr_desc->pphys_buff_data),
				 priv->desc_data[0].rx_buf_size,
				 PCI_DMA_FROMDEVICE);
		pkt_len = curr_desc->pkt_len;

		if (skb_tailroom(prx_skb) < pkt_len) {
			WLDBG_PRINT("Critical error: not enough tail room =%x pkt_len=%x, curr_desc=%x, curr_desc_data=%x",
				    skb_tailroom(prx_skb), pkt_len, curr_desc, curr_desc->pbuff_data);
			dev_kfree_skb_any(prx_skb);
			goto out;
		}

		if (curr_desc->channel != hw->conf.chandef.chan->hw_value) {
			dev_kfree_skb_any(prx_skb);
			goto out;
		}

		mwl_rx_prepare_status(curr_desc, &status);

		priv->noise = -curr_desc->noise_floor;

		wh = &((struct mwl_dma_data *)prx_skb->data)->wh;

		if (ieee80211_has_protected(wh->frame_control)) {
			/* Check if hw crypto has been enabled for
			 * this bss. If yes, set the status flags
			 * accordingly
			 */
			if (ieee80211_has_tods(wh->frame_control))
				mwl_vif = mwl_rx_find_vif_bss(&priv->vif_list,
							      wh->addr1);
			else
				mwl_vif = mwl_rx_find_vif_bss(&priv->vif_list,
							      wh->addr2);

			if (mwl_vif != NULL &&
			    mwl_vif->is_hw_crypto_enabled) {
				/*
				 * When MMIC ERROR is encountered
				 * by the firmware, payload is
				 * dropped and only 32 bytes of
				 * mwl8k Firmware header is sent
				 * to the host.
				 *
				 * We need to add four bytes of
				 * key information.  In it
				 * MAC80211 expects keyidx set to
				 * 0 for triggering Counter
				 * Measure of MMIC failure.
				 */
				if (status.flag & RX_FLAG_MMIC_ERROR) {
					struct mwl_dma_data *tr;

					tr = (struct mwl_dma_data *)prx_skb->data;
					memset((void *)&(tr->data), 0, 4);
					pkt_len += 4;
				}

				if (!ieee80211_is_auth(wh->frame_control))
					status.flag |= RX_FLAG_IV_STRIPPED |
						       RX_FLAG_DECRYPTED |
						       RX_FLAG_MMIC_STRIPPED;
			}
		}

		skb_put(prx_skb, pkt_len);
		mwl_rx_remove_dma_header(prx_skb, curr_desc->qos_ctrl);
		memcpy(IEEE80211_SKB_RXCB(prx_skb), &status, sizeof(status));
		ieee80211_rx(hw, prx_skb);
out:
		mwl_rx_refill(priv, curr_desc);
		curr_desc->rx_control = EAGLE_RXD_CTRL_DRIVER_OWN;
		curr_desc->qos_ctrl = 0;
		curr_desc = curr_desc->pnext;
		work_done++;
	}

	priv->desc_data[0].pnext_rx_desc = curr_desc;

	status_mask = readl(priv->iobase1 + MACREG_REG_A2H_INTERRUPT_STATUS_MASK);
	writel(status_mask | MACREG_A2HRIC_BIT_RX_RDY,
	       priv->iobase1 + MACREG_REG_A2H_INTERRUPT_STATUS_MASK);

	priv->is_rx_schedule = false;

	WLDBG_EXIT(DBG_LEVEL_4);
}
Пример #24
0
static int mwl_rx_ring_init(struct mwl_priv *priv)
{
	int curr_desc;

	WLDBG_ENTER_INFO(DBG_LEVEL_4,  "initializing %i descriptors", SYSADPT_MAX_NUM_RX_DESC);

	if (priv->desc_data[0].prx_ring != NULL) {
		priv->desc_data[0].rx_buf_size = SYSADPT_MAX_AGGR_SIZE;

		for (curr_desc = 0; curr_desc < SYSADPT_MAX_NUM_RX_DESC; curr_desc++) {
			CURR_RXD.psk_buff = dev_alloc_skb(priv->desc_data[0].rx_buf_size);

			if (skb_linearize(CURR_RXD.psk_buff)) {
				dev_kfree_skb_any(CURR_RXD.psk_buff);
				WLDBG_ERROR(DBG_LEVEL_4, "need linearize memory");
				WLDBG_EXIT_INFO(DBG_LEVEL_4, "no suitable memory");
				return -ENOMEM;
			}

			skb_reserve(CURR_RXD.psk_buff, SYSADPT_MIN_BYTES_HEADROOM);
			CURR_RXD.rx_control = EAGLE_RXD_CTRL_DRIVER_OWN;
			CURR_RXD.status = EAGLE_RXD_STATUS_OK;
			CURR_RXD.qos_ctrl = 0x0000;
			CURR_RXD.channel = 0x00;
			CURR_RXD.rssi = 0x00;

			if (CURR_RXD.psk_buff != NULL) {
				CURR_RXD.pkt_len = SYSADPT_MAX_AGGR_SIZE;
				CURR_RXD.pbuff_data = CURR_RXD.psk_buff->data;
				CURR_RXD.pphys_buff_data =
					ENDIAN_SWAP32(pci_map_single(priv->pdev,
								     CURR_RXD.psk_buff->data,
								     priv->desc_data[0].rx_buf_size,
								     PCI_DMA_FROMDEVICE));
				CURR_RXD.pnext = &NEXT_RXD;
				CURR_RXD.pphys_next =
					ENDIAN_SWAP32((u32)priv->desc_data[0].pphys_rx_ring +
						      ((curr_desc + 1) * sizeof(struct mwl_rx_desc)));
				WLDBG_INFO(DBG_LEVEL_4,
					   "rxdesc: %i status: 0x%x (%i) len: 0x%x (%i)",
					   curr_desc, EAGLE_TXD_STATUS_IDLE, EAGLE_TXD_STATUS_IDLE,
					   priv->desc_data[0].rx_buf_size, priv->desc_data[0].rx_buf_size);
				WLDBG_INFO(DBG_LEVEL_4,
					   "rxdesc: %i vnext: 0x%p pnext: 0x%x", curr_desc,
					   CURR_RXD.pnext, ENDIAN_SWAP32(CURR_RXD.pphys_next));
			} else {
				WLDBG_ERROR(DBG_LEVEL_4,
					    "rxdesc %i: no skbuff available", curr_desc);
				WLDBG_EXIT_INFO(DBG_LEVEL_4, "no socket buffer");
				return -ENOMEM;
			}
		}
		LAST_RXD.pphys_next =
			ENDIAN_SWAP32((u32)priv->desc_data[0].pphys_rx_ring);
		LAST_RXD.pnext = &FIRST_RXD;
		priv->desc_data[0].pnext_rx_desc = &FIRST_RXD;

		WLDBG_EXIT_INFO(DBG_LEVEL_4,
				"last rxdesc vnext: 0x%p pnext: 0x%x vfirst 0x%x",
				LAST_RXD.pnext, ENDIAN_SWAP32(LAST_RXD.pphys_next),
				priv->desc_data[0].pnext_rx_desc);

		return 0;
	}

	WLDBG_ERROR(DBG_LEVEL_4, "no valid RX mem");
	WLDBG_EXIT_INFO(DBG_LEVEL_4, "no valid RX mem");

	return -ENOMEM;
}