Esempio n. 1
0
*/	DEVICE_CMD Close_DNS(REBREQ *sock)
/*
**		Note: valid even if not open.
**
***********************************************************************/
{
	// Terminate a pending request:
#ifdef HAS_ASYNC_DNS
	if (GET_FLAG(sock->flags, RRF_PENDING)) {
		CLR_FLAG(sock->flags, RRF_PENDING);
		if (sock->requestee.handle) WSACancelAsyncRequest(sock->requestee.handle);
	}
#endif
	if (sock->special.net.host_info) OS_FREE(sock->special.net.host_info);
	sock->special.net.host_info = 0;
	sock->requestee.handle = 0;
	SET_CLOSED(sock);
	return DR_DONE; // Removes it from device's pending list (if needed)
}
/* This is user space implementation of osal_firmware_release()
   It frees malloced buffer and update the size and buffer pointer 
   in fw_ctxt.
*/
osal_result os_firmware_release(os_firmware_t *fw_ctxt)
{
   osal_result os_ret = OSAL_ERROR;
   
   if (NULL != fw_ctxt) {
      if (NULL != fw_ctxt->fw_address) {
         OS_FREE(fw_ctxt->fw_address); /*Release fw buffer's memory */
         fw_ctxt->fw_address =  NULL;
         fw_ctxt->fw_size = 0;
         os_ret = OSAL_SUCCESS;
      }
   }
   else
   {
      os_ret = OSAL_INVALID_PARAM;
   }

   return os_ret;
}
Esempio n. 3
0
/*
 * Delete the given frequency/chwidth from the NOL.
 */
static void
dfs_nol_delete(struct ath_dfs *dfs, u_int16_t delfreq, u_int16_t delchwidth)
{
    struct dfs_nolelem *nol,**prev_next;

    if (dfs == NULL) {
        DFS_DPRINTK(dfs, ATH_DEBUG_DFS, "%s: sc_dfs is NULL\n", __func__);
        return;
    }

    DFS_DPRINTK(dfs, ATH_DEBUG_DFS_NOL,
      "%s: remove channel=%d/%d MHz from NOL\n",
      __func__,
      delfreq, delchwidth);
    prev_next = &(dfs->dfs_nol);
    nol = dfs->dfs_nol;
    while (nol != NULL) {
        if (nol->nol_freq == delfreq && nol->nol_chwidth == delchwidth) {
            *prev_next = nol->nol_next;
            DFS_DPRINTK(dfs, ATH_DEBUG_DFS_NOL,
              "%s removing channel %d/%dMHz from NOL tstamp=%d\n",
                __func__, nol->nol_freq, nol->nol_chwidth,
                (adf_os_ticks_to_msecs(adf_os_ticks()) / 1000));
            OS_CANCEL_TIMER(&nol->nol_timer);
            OS_FREE(nol);
            nol = NULL;
            nol = *prev_next;

            /* Update the NOL counter */
            dfs->dfs_nol_count--;

            /* Be paranoid! */
            if (dfs->dfs_nol_count < 0) {
                DFS_PRINTK("%s: dfs_nol_count < 0; eek!\n", __func__);
                dfs->dfs_nol_count = 0;
            }

        } else {
            prev_next = &(nol->nol_next);
            nol = nol->nol_next;
        }
    }
}
Esempio n. 4
0
ble_error_t ble_l2cap_conn_param_update(uint16_t conn_idx, const gap_conn_params_t *conn_params)
{
        irb_ble_l2cap_conn_param_update_cmd_t *cmd;
        irb_ble_l2cap_conn_param_update_rsp_t *rsp;
        ble_error_t ret = BLE_ERROR_FAILED;

        /* setup IRB with new message and fill it */
        cmd = alloc_ble_msg(IRB_BLE_L2CAP_CONN_PARAM_UPDATE_CMD, sizeof(*cmd));
        cmd->conn_idx = conn_idx;
        cmd->conn_params = conn_params;

        if (!irb_execute(cmd, (void **) &rsp)) {
                return BLE_ERROR_FAILED;
        }

        ret = rsp->status;
        OS_FREE(rsp);

        return ret;
}
Esempio n. 5
0
void *log_open(const char *file_name, const char *version, const char *dir, uint32_t mode)
{
    log_t *log = NULL;
    uint32_t i = 0;

    if ((NULL == file_name) || (NULL == version) || (NULL == dir))
    {
        return NULL;
    }
    
    log = (log_t *)OS_MALLOC(sizeof(log_t));
    if (NULL == log)
    {
        return NULL;
    }

    memset(log, 0, sizeof(log_t));
    
    OS_RWLOCK_INIT(&log->rwlock);

    strncpy(log->dir, dir, LOG_NAME_LEN);
    log->dir[LOG_NAME_LEN - 1] = 0;
    strncpy(log->name, file_name, LOG_NAME_LEN);
    log->name[LOG_NAME_LEN - 1] = 0;
    strncpy(log->version, version, LOG_NAME_LEN);
    log->version[LOG_NAME_LEN - 1] = 0;

    log->mode = mode;
    for (i = 0; i < PIDS_NUM; i++)
    {
        log->levels[i] = DEFAULT_LEVEL;
    }
    
    if (0 > backup_log(log))
    {
        OS_FREE(log);
        return NULL;
    }
    
    return log;
}
Esempio n. 6
0
int pd_print(const char *funcname, const int error,
	const int error_exit, const char *format, ...)
{
#ifdef DEBUG_BUILD_TYPE
	va_list ap;
	unsigned int *blah;
	char *priority = error ? KERN_ERR : EMGD_DEBUG_MSG_PRIORITY;
	char *fmt = NULL;

	/* Can't directly use the EMGD_DEBUG_S macro (because "format" is a string
	 * variable), so duplicate some of it here:
	 */
	if (!(emgd_debug && emgd_debug-> MODULE_NAME)) {
		return 0;
	}

	va_start(ap, format);
	blah = (unsigned int *)ap;

	if (error_exit) {
		EMGD_DEBUG("EXIT With Error...");
	}

	/* Create a new format string, with all of the correct parts: */
	fmt = OS_ALLOC(strlen(priority) + strlen(funcname) +
		strlen(format) + 2);
	if (fmt == NULL) {
		printk(format, blah[0], blah[1], blah[2], blah[3], blah[4], blah[5],
			blah[6], blah[7], blah[8], blah[9]);
	} else {
		sprintf(fmt, "%s%s %s", priority, funcname, format);
		printk(fmt, blah[0], blah[1], blah[2], blah[3], blah[4], blah[5],
			blah[6], blah[7], blah[8], blah[9]);
		OS_FREE(fmt);
	}
	printk("\n");
	va_end(ap);

	return 0;
#endif
}
Esempio n. 7
0
ble_error_t ble_gatts_write_cfm(uint16_t conn_idx, uint16_t handle, att_error_t status)
{
        irb_ble_gatts_write_cfm_cmd_t *cmd;
        irb_ble_gatts_write_cfm_rsp_t *rsp;
        ble_error_t ret = BLE_ERROR_FAILED;

        /* setup IRB with new message and fill it */
        cmd = alloc_ble_msg(IRB_BLE_GATTS_WRITE_CFM_CMD, sizeof(*cmd));
        cmd->conn_idx = conn_idx;
        cmd->handle = handle;
        cmd->status = status;

        if (!irb_execute(cmd, (void **) &rsp)) {
                return BLE_ERROR_FAILED;
        }

        ret = rsp->status;
        OS_FREE(rsp);

        return ret;
}
Esempio n. 8
0
ble_error_t ble_gatts_set_value(uint16_t handle, uint16_t length, const void *value)
{
        irb_ble_gatts_set_value_cmd_t *cmd;
        irb_ble_gatts_set_value_rsp_t *rsp;
        ble_error_t ret = BLE_ERROR_FAILED;

        /* setup IRB with new message and fill it */
        cmd = alloc_ble_msg(IRB_BLE_GATTS_SET_VALUE_CMD, sizeof(*cmd) + length);
        cmd->handle = handle;
        cmd->length = length;
        memcpy(cmd->value, value, length);

        if (!irb_execute(cmd, (void **) &rsp)) {
                return BLE_ERROR_FAILED;
        }

        ret = rsp->status;
        OS_FREE(rsp);

        return ret;
}
Esempio n. 9
0
ble_error_t ble_gatts_add_service(const att_uuid_t *uuid, const gatt_service_t type, uint16_t num_attrs)
{
        irb_ble_gatts_service_add_cmd_t *cmd;
        irb_ble_gatts_service_add_rsp_t *rsp;
        ble_error_t ret = BLE_ERROR_FAILED;

        /* setup IRB with new message and fill it */
        cmd = alloc_ble_msg(IRB_BLE_GATTS_SERVICE_ADD_CMD, sizeof(*cmd));
        cmd->uuid = *uuid;
        cmd->type = type;
        cmd->num_attrs = num_attrs;

        if (!irb_execute(cmd, (void **) &rsp)) {
                return BLE_ERROR_FAILED;
        }

        ret = rsp->status;
        OS_FREE(rsp);

        return ret;
}
Esempio n. 10
0
ble_error_t ble_gatts_disable_service(uint16_t handle)
{
        irb_ble_gatts_service_disable_cmd_t *cmd;
        irb_ble_gatts_service_disable_rsp_t *rsp;
        ble_error_t ret = BLE_ERROR_FAILED;

        /* setup IRB with new message and fill it */
        cmd = alloc_ble_msg(IRB_BLE_GATTS_SERVICE_DISABLE_CMD, sizeof(*cmd));
        cmd->handle = handle;

        if (!irb_execute(cmd, (void **) &rsp)) {
                return ret;
        }

        ret = rsp->status;

        /* free message */
        OS_FREE(rsp);

        return ret;
}
Esempio n. 11
0
/*!
 * This function is exported directly. It will shutdown an instance
 * of the HAL that was initialized with igd_driver_init.
 *
 * Since the symbol is exported as part of the documented API it must
 * always exist, however it becomes an empty function when the init
 * module is not fully included.
 *
 * @param driver_handle
 *
 * @return void
 */
void igd_driver_shutdown(igd_driver_h driver_handle)
{
	igd_context_t *context = (igd_context_t *)driver_handle;

	EMGD_TRACE_ENTER;

	EMGD_ASSERT(context, "Null Driver Handle", );


	/* Shutdown the device context */
	init_dispatch->shutdown(context);

	/* release the driver's context */
	if(context) {
		EMGD_DEBUG("Freeing context");
		OS_FREE(context);
	}

	EMGD_TRACE_EXIT;
	return;
}
Esempio n. 12
0
void log_close(void *log)
{
    log_t *tmp_log = (log_t *)log;
    
    if (NULL == tmp_log)
    {
        return;
    }

    if (NULL != tmp_log->disk_hnd)
    {
        char date_time[DATA_TIME_STR_LEN];
        
        os_get_date_time_string(date_time, DATA_TIME_STR_LEN);
        os_file_printf(tmp_log->disk_hnd, "%s %s\n", date_time, "NOTE: LOG FILE CLOSE!!!");
        os_file_close(tmp_log->disk_hnd);
        tmp_log->disk_hnd = NULL;
    }

    OS_RWLOCK_DESTROY(&tmp_log->rwlock);
    OS_FREE(tmp_log);
}
Esempio n. 13
0
ble_error_t ble_gatts_service_changed_ind(uint16_t conn_idx, uint16_t start_handle,
                                                                        uint16_t end_handle)
{
        irb_ble_gatts_service_changed_ind_cmd_t *cmd;
        irb_ble_gatts_service_changed_ind_rsp_t *rsp;
        ble_error_t ret = BLE_ERROR_FAILED;

        /* setup IRB with new message and fill it */
        cmd = alloc_ble_msg(IRB_BLE_GATTS_SERVICE_CHANGED_IND_CMD, sizeof(*cmd));
        cmd->conn_idx = conn_idx;
        cmd->start_handle = start_handle;
        cmd->end_handle = end_handle;

        if (!irb_execute(cmd, (void **) &rsp)) {
                return BLE_ERROR_FAILED;
        }

        ret = rsp->status;
        OS_FREE(rsp);

        return ret;
}
os_lock_t os_create_lock(void)
{
    os_lock_pvt_t *p_lock = NULL;

    if(NULL == (p_lock = OS_ALLOC(sizeof(os_lock_pvt_t)))) {
        OS_ERROR("OS_ALLOC failed\n");
        goto error;
    }

    if(NULL == (p_lock->p_sema = OS_ALLOC(sizeof(os_sema_t)))) {
        OS_ERROR("OS_ALLOC failed\n");
        goto error;
    }

    os_sema_init(p_lock->p_sema, 1);
    return ((os_lock_t)p_lock);

error:
    if(p_lock != NULL) {
        OS_FREE(p_lock);
    }
    return (NULL);
}
Esempio n. 15
0
void uds_set_db_increment(ble_service_t *svc, uint16_t src_conn_idx, uint32_t increment, bool notify)
{
        uint8_t num_conn;
        uint16_t *conn_idx;
        uint8_t user_id;

        user_id = uds_get_user_id(svc, src_conn_idx);
        ble_gap_get_connected(&num_conn, &conn_idx);

        while (num_conn--) {
                set_db_increment(svc, conn_idx[num_conn], user_id, increment);
        }

        if (conn_idx) {
                OS_FREE(conn_idx);
        }

        if (!notify) {
                return;
        }

        notify_db_increment(svc, src_conn_idx, user_id, increment);
}
Esempio n. 16
0
ble_error_t ble_gatts_set_characteristic_prop(uint16_t handle, gatt_prop_t prop, att_perm_t perm)
{
        irb_ble_gatts_service_characteristic_set_prop_cmd_t *cmd;
        irb_ble_gatts_service_characteristic_set_prop_rsp_t *rsp;
        ble_error_t ret = BLE_ERROR_FAILED;

        /* setup IRB with new message and fill it */
        cmd = alloc_ble_msg(IRB_BLE_GATTS_SERVICE_CHARACTERISTIC_SET_PROP_CMD, sizeof(*cmd));
        cmd->handle = handle;
        cmd->prop = prop;
        cmd->perm = perm;

        if (!irb_execute(cmd, (void **) &rsp)) {
                return ret;
        }

        ret = rsp->status;

        /* free message */
        OS_FREE(rsp);

        return ret;
}
Esempio n. 17
0
ble_error_t ble_gatts_register_service(uint16_t *handle, ...)
{
        va_list ap;
        irb_ble_gatts_service_register_cmd_t *cmd;
        irb_ble_gatts_service_register_rsp_t *rsp;
        ble_error_t ret = BLE_ERROR_FAILED;

        /* setup IRB with new message and fill it */
        cmd = alloc_ble_msg(IRB_BLE_GATTS_SERVICE_REGISTER_CMD, sizeof(*cmd));

        if (!irb_execute(cmd, (void **) &rsp)) {
                return ret;
        }

        ret = rsp->status;

        if (handle) {
                *handle = rsp->handle;
        }

        va_start(ap, handle);
        for (;;) {
                uint16_t *h = va_arg(ap, uint16_t*);

                if (!h) {
                        break;
                }

                *h = *h + rsp->handle;
        }
        va_end(ap);

        /* free message */
        OS_FREE(rsp);

        return ret;
}
Esempio n. 18
0
void _ath_htc_netdeferfn_cleanup(struct ieee80211com *ic)
{

	wbuf_t wbuf = ADF_NBUF_NULL ;
	nawds_dentry_t * nawds_entry = NULL ;

	/* Freeing MGMT defer buffer */
	do {

		OS_MGMT_LOCKBH(&ic->ic_mgmt_lock);
		wbuf = adf_nbuf_queue_remove(&ic->ic_mgmt_nbufqueue);
		OS_MGMT_UNLOCKBH(&ic->ic_mgmt_lock);

		if(!wbuf)
			break;
		wbuf_free(wbuf);
	}while(wbuf);

	atomic_set(&ic->ic_mgmt_deferflags, DEFER_DONE);


	do {

		OS_NAWDSDEFER_LOCKBH(&ic->ic_nawdsdefer_lock);
		nawds_entry = TAILQ_FIRST(&ic->ic_nawdslearnlist);
		if(nawds_entry)
			TAILQ_REMOVE(&ic->ic_nawdslearnlist,nawds_entry,nawds_dlist);
		OS_NAWDSDEFER_UNLOCKBH(&ic->ic_nawdsdefer_lock);
		if(!nawds_entry)
			break;

		OS_FREE(nawds_entry);

	}while(1);
	atomic_set(&ic->ic_nawds_deferflags, DEFER_DONE);

}
Esempio n. 19
0
/*
 * delete the p2p schedule module.
 */
int
ieee80211_p2p_go_schedule_delete(
    ieee80211_p2p_go_schedule_t go_scheduler)
{
    IEEE80211_DPRINTF_IC(go_scheduler->ic, IEEE80211_VERBOSE_FUNCTION, IEEE80211_MSG_P2P_GO_SCH,
        "%s: called.\n", __func__);

    // free the tsf timer. Blocked to wait for any existing callbacks to end.
    if (ieee80211_tsftimer_free(go_scheduler->h_tsftimer, true)) {
       IEEE80211_DPRINTF_IC(go_scheduler->ic, IEEE80211_VERBOSE_SERIOUS, IEEE80211_MSG_P2P_GO_SCH,
             "%s: 0 ieee80211_tsftimer_free returns error.\n", __func__);
    }

    ASSERT(!go_scheduler->callback_active);

    OS_MESGQ_DESTROY(&go_scheduler->cmd_mesg_q);

    IEEE80211_P2P_GOSCHE_LOCK_DESTROY(go_scheduler);

    // free the structure ieee80211_p2p_go_schedule_t
    OS_FREE(go_scheduler);

    return 0;
}
Esempio n. 20
0
ble_error_t ble_gatts_send_event(uint16_t conn_idx, uint16_t handle, gatt_event_t type,
                                                                uint16_t length, const void *value)
{
        irb_ble_gatts_send_event_cmd_t *cmd;
        irb_ble_gatts_send_event_rsp_t *rsp;
        ble_error_t ret = BLE_ERROR_FAILED;

        /* setup IRB with new message and fill it */
        cmd = alloc_ble_msg(IRB_BLE_GATTS_SEND_EVENT_CMD, sizeof(*cmd) + length);
        cmd->conn_idx = conn_idx;
        cmd->handle = handle;
        cmd->type = type;
        cmd->length = length;
        memcpy(cmd->value, value, length);

        if (!irb_execute(cmd, (void **) &rsp)) {
                return BLE_ERROR_FAILED;
        }

        ret = rsp->status;
        OS_FREE(rsp);

        return ret;
}
Esempio n. 21
0
/*
 * crete an instance of the p2p schedule module.
 */
ieee80211_p2p_go_schedule_t
ieee80211_p2p_go_schedule_create(
    osdev_t os_handle, wlan_if_t vap)
{
    struct ieee80211com         *ic;
    ieee80211_p2p_go_schedule_t h_schedule = NULL;
    bool                        bad_return = false;
    tsftimer_handle_t           h_tsftimer = NULL;
    bool                        msg_queue_init = false;

    ASSERT(vap);
    ic = vap->iv_ic;

    IEEE80211_DPRINTF_IC(ic, IEEE80211_VERBOSE_FUNCTION, IEEE80211_MSG_P2P_GO_SCH,
        "%s: called.\n", __func__);

    do {
        // alloc the mem for structure of ieee80211_p2p_go_schedule
        h_schedule = (ieee80211_p2p_go_schedule_t)
                        OS_MALLOC(os_handle, sizeof(struct ieee80211_p2p_go_schedule), 0);
        if (h_schedule == NULL) {
            IEEE80211_DPRINTF_IC(ic, IEEE80211_VERBOSE_SERIOUS, IEEE80211_MSG_P2P_GO_SCH,
                "%s: Failed to alloc memory (size=%d).\n", __func__,
                   sizeof(struct ieee80211_p2p_go_schedule));
            bad_return = true;
            break;
        }

        // zero out and then init. the contents
        OS_MEMZERO(h_schedule, sizeof(struct ieee80211_p2p_go_schedule));
        h_schedule->vap = vap;
        h_schedule->ic = ic;
        h_schedule->os_handle = os_handle;
        IEEE80211_P2P_GOSCHE_LOCK_INIT(h_schedule);

        h_schedule->go_present = true;   /* GO start off from being awake */

        if (OS_MESGQ_INIT(os_handle, &h_schedule->cmd_mesg_q,
                          sizeof(ieee80211_p2p_go_schedule_req) * P2P_GO_PS_MAX_NUM_SCHEDULE_REQ,
                          P2P_GO_SCH_MAX_EVENT_QUEUE_DEPTH, 
                          ieee80211_p2p_go_schedule_mesgq_event_handler,
                          h_schedule, 
                          MESGQ_PRIORITY_NORMAL, 
                          MESGQ_ASYNCHRONOUS_EVENT_DELIVERY) != 0)
        {
            IEEE80211_DPRINTF_IC(h_schedule->ic, IEEE80211_VERBOSE_SERIOUS, IEEE80211_MSG_P2P_GO_SCH,
                "%s : OS_MESGQ_INIT  failed.\n", __func__);
            bad_return = true;
            break;
        }
        msg_queue_init = true;

        /* alloc a tsf timer */
        h_tsftimer = ieee80211_tsftimer_alloc(ic->ic_tsf_timer, 0, my_tsftimer_timeout,
                                              P2P_GO_SCH_CB_ID, h_schedule, my_tsftimer_resync);
        if (h_tsftimer == NULL) {
            IEEE80211_DPRINTF_IC(h_schedule->ic, IEEE80211_VERBOSE_SERIOUS, IEEE80211_MSG_P2P_GO_SCH,
                "%s: 0 ieee80211_tsftimer_alloc returns error.\n", __func__);
            bad_return = true;
            break;
        }
        h_schedule->h_tsftimer = h_tsftimer;
        h_schedule->tsftimer_started = false;
        h_schedule->paused = false;

    } while ( FALSE );

    if (!bad_return) {
        return h_schedule;
    }
    else {
    
        /* Free the TSF Timer */
        if (h_tsftimer) {
            if (ieee80211_tsftimer_free(h_tsftimer, true)) {
               IEEE80211_DPRINTF_IC(h_schedule->ic, IEEE80211_VERBOSE_SERIOUS, IEEE80211_MSG_P2P_GO_SCH,
                "%s: 0 ieee80211_tsftimer_free returns error.\n", __func__);
            }
            h_tsftimer = NULL;
        }

        if (msg_queue_init) {
            OS_MESGQ_DESTROY(&h_schedule->cmd_mesg_q);
            msg_queue_init = false;
        }

        /* Free structure of ieee80211_p2p_go_schedule */
        if (h_schedule) {
            IEEE80211_P2P_GOSCHE_LOCK_DESTROY(h_schedule);
            OS_FREE(h_schedule);
            h_schedule = NULL;
        }
    
        return NULL;
    }
}
Esempio n. 22
0
int
ath_descdma_setup(
    struct ath_softc *sc,
    struct ath_descdma *dd, ath_bufhead *head,
    const char *name, int nbuf, int ndesc, int is_tx, int frag_per_msdu)
{
#define    DS2PHYS(_dd, _ds) \
    ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
#define ATH_DESC_4KB_BOUND_CHECK(_daddr, _len) ((((u_int32_t)(_daddr) & 0xFFF) > (0x1000 - (_len))) ? 1 : 0)
#define ATH_DESC_4KB_BOUND_NUM_SKIPPED(_len) ((_len) / 4096)

    u_int8_t *ds;
    struct ath_buf *bf;
    int i, bsize, error, desc_len;

    uint32_t nbuf_left, nbuf_alloc, alloc_size, buf_arr_size;
    int j, k;
    struct ath_buf **bf_arr;

    if (is_tx) {
        desc_len = sc->sc_txdesclen;

        /* legacy tx descriptor needs to be allocated for each fragment */
        if (sc->sc_num_txmaps == 1) {
            nbuf = nbuf * frag_per_msdu; /*ATH_FRAG_PER_MSDU;*/
        }
    } else {
        desc_len = sizeof(struct ath_desc);
    }

    DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA: %u buffers %u desc/buf %d desc_len\n",
             __func__, name, nbuf, ndesc, desc_len);

    /* ath_desc must be a multiple of DWORDs */
    if ((desc_len % 4) != 0) {
        DPRINTF(sc, ATH_DEBUG_FATAL, "%s: ath_desc not DWORD aligned\n",
                __func__ );
        ASSERT((desc_len % 4) == 0);
        error = -ENOMEM;
        goto fail;
    }

    dd->dd_name = name;
    dd->dd_desc_len = desc_len * nbuf * ndesc;

    /*
     * WAR for bug 30982 (Merlin)
     * Need additional DMA memory because we can't use descriptors that cross the
     * 4K page boundary. Assume one skipped descriptor per 4K page.
     */

    if (!ath_hal_has4kbsplittrans(sc->sc_ah)) {
        int numdescpage = 4096/(desc_len*ndesc);
        dd->dd_desc_len = (nbuf/numdescpage + 1 ) * 4096;
    }
#if ATH_SUPPORT_DESC_CACHABLE
	if ((strcmp(name, "tx") == 0) && sc->sc_enhanceddmasupport) { 
    	dd->dd_desc = (void *)OS_MALLOC_NONCONSISTENT(sc->sc_osdev,
                                       dd->dd_desc_len, &dd->dd_desc_paddr,
                                       OS_GET_DMA_MEM_CONTEXT(dd, dd_dmacontext),
                                       sc->sc_reg_parm.shMemAllocRetry);
	}
	else 
#endif
	{
    /* allocate descriptors */
    	dd->dd_desc = (void *)OS_MALLOC_CONSISTENT(sc->sc_osdev,
                                       dd->dd_desc_len, &dd->dd_desc_paddr,
                                       OS_GET_DMA_MEM_CONTEXT(dd, dd_dmacontext),
                                       sc->sc_reg_parm.shMemAllocRetry);
	}
    if (dd->dd_desc == NULL) {
        error = -ENOMEM;
        goto fail;
    }
    ds = dd->dd_desc;
    DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA map: %p (%u) -> %llx (%u)\n",
            __func__, dd->dd_name, ds, (u_int32_t) dd->dd_desc_len,
            ito64(dd->dd_desc_paddr), /*XXX*/ (u_int32_t) dd->dd_desc_len);

    /* allocate buffers */
    bsize = sizeof(struct ath_buf) * nbuf;

    buf_arr_size = ((nbuf * sizeof(struct ath_buf))/MAX_BUF_MEM_ALLOC_SIZE + 2) ;   /* one extra element is reserved at end to indicate the end of array */
    bf_arr = (struct ath_buf **)OS_MALLOC(sc->sc_osdev, 
                                        (sizeof(struct ath_buf *) * buf_arr_size), GFP_KERNEL);
    if(!bf_arr)
    {
        error = -ENOMEM;
        goto fail2;
    }
    OS_MEMZERO(bf_arr, (buf_arr_size * sizeof(struct ath_buf *)));

    TAILQ_INIT(head);

    nbuf_left = nbuf;
    nbuf_alloc = (MAX_BUF_MEM_ALLOC_SIZE / (sizeof(struct ath_buf)));

    for(j = 0; (nbuf_left && (j < (buf_arr_size-1))); j++)
    {
        nbuf_alloc = MIN(nbuf_left, nbuf_alloc);
        alloc_size = (nbuf_alloc * sizeof(struct ath_buf));

        bf_arr[j] = (struct ath_buf *)OS_MALLOC(sc->sc_osdev, alloc_size, GFP_KERNEL);
        if(bf_arr[j] == NULL)
        {
            for(k = 0; k < j; k++)
            {
                OS_FREE(bf_arr[k]);
                bf_arr[k] = NULL;
            }
            error = -ENOMEM;
            goto fail2;
        }
        else
        {
            OS_MEMZERO(bf_arr[j], alloc_size);
            nbuf_left -= nbuf_alloc;
            bf = bf_arr[j];
            for (i = 0; i < nbuf_alloc; i++, bf++, ds += (desc_len * ndesc)) {
                bf->bf_desc = ds;
                bf->bf_daddr = DS2PHYS(dd, ds);
                if (!ath_hal_has4kbsplittrans(sc->sc_ah)) {
                    /*
                    * WAR for bug 30982.
                    * Skip descriptor addresses which can cause 4KB boundary crossing (addr + length)
                    * with a 32 dword descriptor fetch.
                    */
                    if (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr, desc_len * ndesc)) {
                        ds += 0x1000 - (bf->bf_daddr & 0xFFF);    /* start from the next page */
                        bf->bf_desc = ds;
                        bf->bf_daddr = DS2PHYS(dd, ds);
                    }
                }
                TAILQ_INSERT_TAIL(head, bf, bf_list);
            }
        }
    }

    dd->dd_bufptr = bf_arr;

#ifdef ATH_DEBUG_MEM_LEAK
    dd->dd_num_buf = nbuf;
#endif

    /* 
     * For OS's that need to allocate dma context to be used to 
     * send down to hw, do that here. (BSD is the only one that needs
     * it currently.)
     */ 
    ALLOC_DMA_CONTEXT_POOL(sc->sc_osdev, name, nbuf);

    return 0;
fail2:

#if ATH_SUPPORT_DESC_CACHABLE
	if ((strcmp(name, "tx") == 0) && sc->sc_enhanceddmasupport) { 
    	OS_FREE_NONCONSISTENT(sc->sc_osdev, dd->dd_desc_len,
                       dd->dd_desc, dd->dd_desc_paddr,
                       OS_GET_DMA_MEM_CONTEXT(dd, dd_dmacontext));
	}else 
#endif
	{
	    OS_FREE_CONSISTENT(sc->sc_osdev, dd->dd_desc_len,
                       dd->dd_desc, dd->dd_desc_paddr,
                       OS_GET_DMA_MEM_CONTEXT(dd, dd_dmacontext));
	}
    if (bf_arr) {
        OS_FREE(bf_arr);
    }
fail:
    OS_MEMZERO(dd, sizeof(*dd));
    return error;
#undef ATH_DESC_4KB_BOUND_CHECK
#undef ATH_DESC_4KB_BOUND_NUM_SKIPPED
#undef DS2PHYS
}
Esempio n. 23
0
/* This function Initialize the radar filter tables
 * if the ath dfs domain is uninitalized or
 *    ath dfs domain is different from hal dfs domain
 */
int dfs_init_radar_filters(struct ieee80211com *ic,
  struct ath_dfs_radar_tab_info *radar_info)
{
    u_int32_t T, Tmax;
    int numpulses,p,n, i;
    int numradars = 0, numb5radars = 0;
    struct ath_dfs *dfs = (struct ath_dfs *)ic->ic_dfs;
    struct dfs_filtertype *ft = NULL;
    struct dfs_filter *rf=NULL;
    struct dfs_pulse *dfs_radars;
    struct dfs_bin5pulse *b5pulses=NULL;
    int32_t min_rssithresh=DFS_MAX_RSSI_VALUE;
    u_int32_t max_pulsedur=0;

    if (dfs == NULL) {
        VOS_TRACE(VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_ERROR,
                        "%s[%d]: dfs is NULL", __func__, __LINE__);
        return DFS_STATUS_FAIL;
    }
    /* clear up the dfs domain flag first */
#ifndef ATH_DFS_RADAR_DETECTION_ONLY
    dfs->ath_dfs_isdfsregdomain = 0;
#endif

    /*
     * If radar_info is NULL or dfsdomain is NULL, treat
     * the rest of the radar configuration as suspect.
     */
    if (radar_info == NULL || radar_info->dfsdomain == 0) {
        VOS_TRACE(VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_ERROR,
                        "%s[%d]: Unknown dfs domain %d ",
                        __func__, __LINE__, dfs->dfsdomain);
        /* Disable radar detection since we don't have a radar domain */
        dfs->dfs_proc_phyerr &= ~DFS_RADAR_EN;
        /* Returning success though we are not completing init. A failure
         * will fail dfs_attach also.
         */
        return DFS_STATUS_SUCCESS;
    }

    VOS_TRACE(VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_INFO,
                    "%s[%d]:dfsdomain=%d, numradars=%d, numb5radars=%d",
                    __func__, __LINE__, radar_info->dfsdomain,
                    radar_info->numradars, radar_info->numb5radars);
    dfs->dfsdomain = radar_info->dfsdomain;
    dfs_radars = radar_info->dfs_radars;
    numradars = radar_info->numradars;
    b5pulses = radar_info->b5pulses;
    numb5radars = radar_info->numb5radars;

    /* XXX this should be an explicit copy of some sort! */
    dfs->dfs_defaultparams = radar_info->dfs_defaultparams;

#ifndef ATH_DFS_RADAR_DETECTION_ONLY
    dfs->ath_dfs_isdfsregdomain = 1;
#endif

    dfs->dfs_rinfo.rn_numradars = 0;
    /* Clear filter type table */
    for (n = 0; n < 256; n++) {
        for (i=0;i<DFS_MAX_RADAR_OVERLAP; i++)
            (dfs->dfs_radartable[n])[i] = -1;
    }
    /* Now, initialize the radar filters */
    for (p=0; p<numradars; p++) {
    ft = NULL;
    for (n=0; n<dfs->dfs_rinfo.rn_numradars; n++) {
        if ((dfs_radars[p].rp_pulsedur == dfs->dfs_radarf[n]->ft_filterdur) &&
           (dfs_radars[p].rp_numpulses == dfs->dfs_radarf[n]->ft_numpulses) &&
           (dfs_radars[p].rp_mindur == dfs->dfs_radarf[n]->ft_mindur) &&
           (dfs_radars[p].rp_maxdur == dfs->dfs_radarf[n]->ft_maxdur)) {
           ft = dfs->dfs_radarf[n];
           break;
        }
    }
    if (ft == NULL) {
        /* No filter of the appropriate dur was found */
        if ((dfs->dfs_rinfo.rn_numradars+1) >DFS_MAX_RADAR_TYPES) {
         DFS_DPRINTK(dfs, ATH_DEBUG_DFS, "%s: Too many filter types",
         __func__);
         goto bad4;
        }
        ft = dfs->dfs_radarf[dfs->dfs_rinfo.rn_numradars];
        ft->ft_numfilters = 0;
        ft->ft_numpulses = dfs_radars[p].rp_numpulses;
        ft->ft_patterntype = dfs_radars[p].rp_patterntype;
        ft->ft_mindur = dfs_radars[p].rp_mindur;
        ft->ft_maxdur = dfs_radars[p].rp_maxdur;
        ft->ft_filterdur = dfs_radars[p].rp_pulsedur;
        ft->ft_rssithresh = dfs_radars[p].rp_rssithresh;
        ft->ft_rssimargin = dfs_radars[p].rp_rssimargin;
        ft->ft_minpri = 1000000;

        if (ft->ft_rssithresh < min_rssithresh)
        min_rssithresh = ft->ft_rssithresh;
        if (ft->ft_maxdur > max_pulsedur)
        max_pulsedur = ft->ft_maxdur;
        for (i=ft->ft_mindur; i<=ft->ft_maxdur; i++) {
               u_int32_t stop=0,tableindex=0;
               while ((tableindex < DFS_MAX_RADAR_OVERLAP) && (!stop)) {
                   if ((dfs->dfs_radartable[i])[tableindex] == -1)
                  stop = 1;
                   else
                  tableindex++;
               }
               if (stop) {
                   (dfs->dfs_radartable[i])[tableindex] =
                  (int8_t) (dfs->dfs_rinfo.rn_numradars);
               } else {
                   DFS_DPRINTK(dfs, ATH_DEBUG_DFS,
                  "%s: Too many overlapping radar filters",
                  __func__);
                   goto bad4;
               }
        }
        dfs->dfs_rinfo.rn_numradars++;
    }
    rf = &(ft->ft_filters[ft->ft_numfilters++]);
    dfs_reset_delayline(&rf->rf_dl);
    numpulses = dfs_radars[p].rp_numpulses;

    rf->rf_numpulses = numpulses;
    rf->rf_patterntype = dfs_radars[p].rp_patterntype;
    rf->rf_pulseid = dfs_radars[p].rp_pulseid;
    rf->rf_mindur = dfs_radars[p].rp_mindur;
    rf->rf_maxdur = dfs_radars[p].rp_maxdur;
    rf->rf_numpulses = dfs_radars[p].rp_numpulses;
    rf->rf_ignore_pri_window = dfs_radars[p].rp_ignore_pri_window;
    T = (100000000/dfs_radars[p].rp_max_pulsefreq) -
         100*(dfs_radars[p].rp_meanoffset);
            rf->rf_minpri =
            dfs_round((int32_t)T - (100*(dfs_radars[p].rp_pulsevar)));
    Tmax = (100000000/dfs_radars[p].rp_pulsefreq) -
            100*(dfs_radars[p].rp_meanoffset);
      rf->rf_maxpri =
         dfs_round((int32_t)Tmax + (100*(dfs_radars[p].rp_pulsevar)));

        if( rf->rf_minpri < ft->ft_minpri )
        ft->ft_minpri = rf->rf_minpri;

    rf->rf_fixed_pri_radar_pulse = ( dfs_radars[p].rp_max_pulsefreq == dfs_radars[p].rp_pulsefreq ) ? 1 : 0;
    rf->rf_threshold = dfs_radars[p].rp_threshold;
    rf->rf_filterlen = rf->rf_maxpri * rf->rf_numpulses;

    VOS_TRACE(VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_INFO, "%s[%d]: minprf = %d maxprf = %d pulsevar = %d thresh=%d",
            __func__,__LINE__,dfs_radars[p].rp_pulsefreq, dfs_radars[p].rp_max_pulsefreq,
            dfs_radars[p].rp_pulsevar, rf->rf_threshold);
    VOS_TRACE(VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_INFO, "%s[%d]:minpri = %d maxpri = %d filterlen = %d filterID = %d",__func__,__LINE__,
            rf->rf_minpri, rf->rf_maxpri, rf->rf_filterlen, rf->rf_pulseid);

    }

#ifdef DFS_DEBUG
    dfs_print_filters(ic);
#endif
    dfs->dfs_rinfo.rn_numbin5radars  = numb5radars;
    if (dfs->dfs_b5radars != NULL)
        OS_FREE(dfs->dfs_b5radars);

    dfs->dfs_b5radars = (struct dfs_bin5radars *)OS_MALLOC(NULL,
      numb5radars * sizeof(struct dfs_bin5radars), GFP_KERNEL);
    if (dfs->dfs_b5radars == NULL) {
        DFS_DPRINTK(dfs, ATH_DEBUG_DFS,
        "%s: cannot allocate memory for bin5 radars",
        __func__);
        goto bad4;
    }
    for (n=0; n<numb5radars; n++) {
        dfs->dfs_b5radars[n].br_pulse = b5pulses[n];
        dfs->dfs_b5radars[n].br_pulse.b5_timewindow *= 1000000;
        if (dfs->dfs_b5radars[n].br_pulse.b5_rssithresh < min_rssithresh)
            min_rssithresh = dfs->dfs_b5radars[n].br_pulse.b5_rssithresh;
        if (dfs->dfs_b5radars[n].br_pulse.b5_maxdur > max_pulsedur)
            max_pulsedur = dfs->dfs_b5radars[n].br_pulse.b5_maxdur;
    }
    dfs_reset_alldelaylines(dfs);
    dfs_reset_radarq(dfs);
    dfs->dfs_curchan_radindex = -1;
    dfs->dfs_extchan_radindex = -1;
    dfs->dfs_rinfo.rn_minrssithresh = min_rssithresh;
    /* Convert durations to TSF ticks */
    dfs->dfs_rinfo.rn_maxpulsedur = dfs_round((int32_t)((max_pulsedur*100/80)*100));
    /* relax the max pulse duration a little bit due to inaccuracy caused by chirping. */
    dfs->dfs_rinfo.rn_maxpulsedur = dfs->dfs_rinfo.rn_maxpulsedur +20;
    VOS_TRACE(VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_INFO,
                    "%s[%d]: DFS min filter rssiThresh = %d",
                    __func__, __LINE__, min_rssithresh);
    VOS_TRACE(VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_INFO,
                    "%s[%d]:DFS max pulse dur = %d ticks",
                    __func__ ,__LINE__, dfs->dfs_rinfo.rn_maxpulsedur);
    return DFS_STATUS_SUCCESS;

 bad4:
     return DFS_STATUS_FAIL;
}
Esempio n. 24
0
int spectral_retrieve_raw_capture(ath_dev_t dev, void* outdata, u_int32_t *outsize)
{
    struct ath_softc *sc = ATH_DEV_TO_SC(dev);
    SPECTRAL_ADC_DATA *rparams = (SPECTRAL_ADC_DATA *)outdata;
    int error = 0;
    u_int32_t max_samples;
    HAL_STATUS status;
    SPECTRAL_ADC_DC_FILTER* dcf = NULL;

    if (!sc->sc_raw_adc_capture_enabled) {
        DPRINTF(sc, ATH_DEBUG_ANY, "%s[%d]: called while not in raw capture mode!\n", __func__, __LINE__);
        return -ENOTCONN; 
    }

    if (sc->sc_opmode != HAL_M_RAW_ADC_CAPTURE) {
        spectral_exit_raw_capture_mode(dev);
        DPRINTF(sc, ATH_DEBUG_ANY, "%s[%d]: called while HAL mode not raw capture!\n", __func__, __LINE__);
        return -ESTALE; 
    }

    if (*outsize < sizeof(SPECTRAL_ADC_DATA) || !outdata) {
        DPRINTF(sc, ATH_DEBUG_ANY, "%s[%d]: too small: expect >= %d, got %d\n", 
                __func__, __LINE__, sizeof(SPECTRAL_ADC_DATA), *outsize);
        return -EINVAL; 
    }

    if (sc->sc_invalid) {
        DPRINTF(sc, ATH_DEBUG_ANY, "%s[%d]: called but device is invalid or removed!\n", __func__, __LINE__);
        return -ENXIO;
    }

    if (!(sc->sc_adc_capture_flags & SPECTRAL_ADC_CAPTURE_FLAG_DISABLE_DC_FILTER)) {
        dcf = (SPECTRAL_ADC_DC_FILTER *) OS_MALLOC(dev, sizeof(SPECTRAL_ADC_DC_FILTER), GPF_KERNEL);
        if (dcf) {
            OS_MEMZERO(dcf, sizeof(SPECTRAL_ADC_DC_FILTER)); 
        } else {
            DPRINTF(sc, ATH_DEBUG_ANY, "%s: alloc DC filter failed!\n", __func__);
        }
    }

    DPRINTF(sc, ATH_DEBUG_RECV, "%s[%d]: chain_mask=0x%x capture_flags=0x%x\n", __func__, __LINE__, sc->sc_adc_chain_mask, sc->sc_adc_capture_flags);
    ath_hal_begin_adc_capture(sc->sc_ah, (sc->sc_adc_capture_flags & SPECTRAL_ADC_CAPTURE_FLAG_AGC_AUTO));
    rparams->cap.version = SPECTRAL_ADC_API_VERSION;
    rparams->duration = SPECTRAL_RAW_ADC_SAMPLES_PER_CALL / SPECTRAL_RAW_ADC_SAMPLES_FREQUENCY_MHZ;
    rparams->sample_len = max_samples = (*outsize - sizeof(SPECTRAL_ADC_DATA))/sizeof(SPECTRAL_ADC_SAMPLE);
    rparams->cap.chain_info.num_chains = sc->sc_adc_num_chains;
    rparams->cap.freq = sc->sc_adc_freq;
    rparams->cap.ieee = sc->sc_adc_ieee;
    rparams->cap.chan_flags = sc->sc_adc_chan_flags;
    rparams->cap.chain_info.chain_mask = sc->sc_adc_chain_mask;
    rparams->cap.capture_flags = sc->sc_adc_capture_flags;
    ath_hal_calculate_adc_ref_powers(sc->sc_ah, sc->sc_adc_freq, &rparams->min_sample_val, &rparams->max_sample_val,
                                     rparams->ref_power, sizeof(rparams->ref_power)/sizeof(rparams->ref_power[0]));
    if ((status = ath_hal_retrieve_capture_data(sc->sc_ah, sc->sc_adc_chain_mask,
                                                1, /* disable dc filt here - now we do it after call */
                                                rparams->data, &rparams->sample_len)) != HAL_OK) {
        DPRINTF(sc, ATH_DEBUG_ANY, "%s[%d]: ath_hal_retrieve_capture_data failed[%d]: cnt passed was %d[%d]\n", 
                __func__, __LINE__, status, max_samples, rparams->sample_len);
        error = (status == HAL_ENOMEM) ? -ENOMEM : -EIO; 
    } else {
        if (!(sc->sc_adc_capture_flags & SPECTRAL_ADC_CAPTURE_FLAG_DISABLE_DC_FILTER) &&
            dcf) 
        {
            DPRINTF(sc, ATH_DEBUG_RECV, "%s: running DC filter [wnd=%d] over %d chains now...\n", 
                    __func__, DC_FILT_WINDOW, sc->sc_adc_num_chains);
            spectral_run_dc_filter(dev, rparams, dcf);
        }
        *outsize = sizeof(SPECTRAL_ADC_DATA) + rparams->sample_len*sizeof(SPECTRAL_ADC_SAMPLE);
    }

    if (dcf) {
        OS_FREE(dcf);
    }

    return error;
}
Esempio n. 25
0
/* Set application defined IEs */
int wlan_mlme_set_appie(wlan_if_t vaphandle, ieee80211_frame_type ftype, u_int8_t *buf, u_int16_t buflen)
{
    struct ieee80211vap    *vap = vaphandle;
    struct ieee80211com    *ic = vap->iv_ic;
    int                    error = 0;
    u_int8_t               *iebuf = NULL;
    bool                   alloc_iebuf = FALSE;

    IEEE80211_DPRINTF(vap, IEEE80211_MSG_MLME, "%s\n", __func__);

    ASSERT(ftype < IEEE80211_FRAME_TYPE_MAX);

    if (ftype >= IEEE80211_FRAME_TYPE_MAX) {
        error = -EINVAL;
        goto exit;
    }

#if ATH_SUPPORT_HS20
    buflen = wlan_mlme_parse_appie(vap, ftype, buf, buflen);
#endif

    if (buflen > vap->iv_app_ie_maxlen[ftype]) {
        /* Allocate ie buffer */
        iebuf = OS_MALLOC(ic->ic_osdev, buflen, 0);

        if (iebuf == NULL) {
            error = -ENOMEM;
            goto exit;
        }

        alloc_iebuf = TRUE;
        vap->iv_app_ie_maxlen[ftype] = buflen;
    } else {
        iebuf = vap->iv_app_ie[ftype].ie;
    }

    IEEE80211_VAP_LOCK(vap);
    /* 
     * Temp: reduce window of race with beacon update in Linux AP.
     * In Linux AP, ieee80211_beacon_update is called in ISR, so
     * iv_lock is not acquired.
     */
    IEEE80211_VAP_APPIE_UPDATE_DISABLE(vap);

    /* Free existing buffer */
    if (alloc_iebuf == TRUE && vap->iv_app_ie[ftype].ie) {
        OS_FREE(vap->iv_app_ie[ftype].ie);
    }

    vap->iv_app_ie[ftype].ie = iebuf;
    vap->iv_app_ie[ftype].length = buflen;

    if (buflen) {
        ASSERT(buf);
        if (buf == NULL) {
            IEEE80211_VAP_UNLOCK(vap);
            error = -EINVAL;
            goto exit;
        }

        /* Copy app ie contents and save pointer/length */
        OS_MEMCPY(iebuf, buf, buflen);
    }

    /* Set appropriate flag so that the IE gets updated in the next beacon */
    IEEE80211_VAP_APPIE_UPDATE_ENABLE(vap);
    IEEE80211_VAP_UNLOCK(vap);
    
exit:
    return error;
}
Esempio n. 26
0
/**
 * \brief BLE manager task
 */
static void ble_mgr_task(void *pvParameters)
{
        ad_ble_hdr_t *msg_rx;
        uint32_t ulNotifiedValue;
        OS_BASE_TYPE xResult;
        int8_t wdog_id;

        /* Register task to be monitored by watch dog. */
        wdog_id = sys_watchdog_register(false);

        for (;;) {
                /* Notify watch dog on each loop since there's no other trigger for this. */
                sys_watchdog_notify(wdog_id);

                /* Suspend monitoring while task is blocked on OS_TASK_NOTIFY_WAIT(). */
                sys_watchdog_suspend(wdog_id);

                /*
                 * Wait on any of the event group bits, then clear them all.
                 */
                xResult = OS_TASK_NOTIFY_WAIT(0x0, OS_TASK_NOTIFY_ALL_BITS, &ulNotifiedValue,
                                                                            OS_TASK_NOTIFY_FOREVER);
                OS_ASSERT(xResult == OS_OK);

                /* Resume watch dog monitoring. */
                sys_watchdog_notify_and_resume(wdog_id);

                if (ulNotifiedValue & mainBIT_ADAPTER_EVENT_QUEUE) {
                        /* Make sure there are messages waiting on the queue. */
                        if (!uxQueueMessagesWaiting(adapter_if->evt_q)) {
                                goto no_event;
                        }

                        /* Check if there is free space on BLE manager's event queue. */
                        if (uxQueueSpacesAvailable(mgr_if.evt_q)) {
                                /* Get message from queue. */
                                OS_QUEUE_GET(adapter_if->evt_q, &msg_rx, 0);
                                OS_ASSERT(msg_rx->op_code < AD_BLE_OP_CODE_LAST);

#ifdef BLE_STACK_PASSTHROUGH_MODE
                                {
                                        OS_IRB new_irb;

                                        /* Fill-in new IRB fields. */
                                        new_irb.status   = IRB_PENDING;
                                        new_irb.class_id = IRB_BLE;
                                        new_irb.ptr_buf  = msg_rx;

                                        /* Send directly to BLE manager's event queue. */
                                        ble_mgr_event_queue_send(&new_irb, OS_QUEUE_FOREVER);
                                }
#else

                                if (msg_rx->op_code == AD_BLE_OP_CODE_STACK_MSG) {
                                        irb_ble_stack_msg_t *stack_msg =
                                                                      (irb_ble_stack_msg_t*) msg_rx;

                                        /* In non-passthrough we only expect GTL messages. */
                                        OS_ASSERT(stack_msg->msg_type == GTL_MSG);

                                        /*
                                         * during reset we ignore messages other than GAPM_CMP_EVT
                                         * and GAPM_RESET operation
                                         */
                                        if (reset) {
                                                struct gapm_cmp_evt *evt;

                                                if (stack_msg->msg.gtl.msg_id != GAPM_CMP_EVT) {
                                                        goto rx_done;
                                                }

                                                evt = (void *) stack_msg->msg.gtl.param;

                                                if (evt->operation != GAPM_RESET) {
                                                        goto rx_done;
                                                }
                                        }

                                        /*
                                         * Check if someone is waiting for this message.
                                         * if not, try to handle message as an event.
                                         */
                                        if (!ble_gtl_waitqueue_match(&stack_msg->msg.gtl)) {
                                                if (!ble_gtl_handle_event(&stack_msg->msg.gtl)) {
                                                        /* Stack message is not handled by the manager. */
#ifdef DEBUG
                                                        configASSERT(0);
#endif
                                                }
                                        }
                                }
                                else if (msg_rx->op_code == AD_BLE_OP_CODE_ADAPTER_MSG) {
                                        ad_ble_msg_t *ad_msg = (ad_ble_msg_t *) msg_rx;

                                        /* In non-passthrough we only expect GTL messages. */
                                        OS_ASSERT(ad_msg->operation < AD_BLE_OP_LAST);

                                        /* Check if someone is waiting for this message. */
                                        ble_ad_msg_waitqueue_match(ad_msg);

                                }

rx_done:
                                OS_FREE(msg_rx);
#endif
                                /*
                                 * Check if there are more messages waiting in the BLE adapter's
                                 * event queue.
                                 */
                                if (uxQueueMessagesWaiting(adapter_if->evt_q)) {
                                        OS_TASK_NOTIFY(mgr_if.task,
                                                mainBIT_ADAPTER_EVENT_QUEUE, OS_NOTIFY_SET_BITS);
                                }
                        }
                        else {
                                /* Set blocked flag to true. */
                                ble_mgr_blocked = true;
                        }
                }
no_event:
                if (ulNotifiedValue & mainBIT_MANAGER_COMMAND_QUEUE) {
                        if (uxQueueMessagesWaiting(mgr_if.cmd_q)) {
                                OS_IRB irb_rx;

                                /* Get IRB from the queue. */
                                OS_QUEUE_GET(mgr_if.cmd_q, &irb_rx, 0);

                                if (irb_rx.status == IRB_COMPLETED) {
                                        /* Free message buffer if it was not freed by application. */
                                        irb_ble_free_msg(&irb_rx);
                                }
                                else if (irb_rx.status == IRB_ERROR) {
                                        ble_mgr_event_queue_send(&irb_rx, OS_QUEUE_FOREVER);
                                }
                                else if (irb_rx.status == IRB_PENDING) {
                                        /* New IRB from application. */
                                        if (!ble_irb_handle_msg(&irb_rx)) {
                                                /*
                                                 * No handler found for IRB - free command buffer
                                                 * because nothing else will free it.
                                                 */
                                                OS_FREE(irb_rx.ptr_buf);
                                                irb_rx.ptr_buf = NULL;
                                        }
                                }

                                /* Check if blocked and if there is space on the event queue. */
                                if (ble_mgr_blocked && uxQueueSpacesAvailable(mgr_if.evt_q)) {
                                        /* Set flag to false. */
                                        ble_mgr_blocked = false;

                                        /* Notify task to resume getting BLE adapter events. */
                                        OS_TASK_NOTIFY(mgr_if.task, mainBIT_ADAPTER_EVENT_QUEUE,
                                                OS_NOTIFY_SET_BITS);
                                }

                                /* Check if there are messages waiting in the command queue. */
                                if (uxQueueMessagesWaiting(mgr_if.cmd_q)) {
                                        OS_TASK_NOTIFY(mgr_if.task,
                                                mainBIT_MANAGER_COMMAND_QUEUE, OS_NOTIFY_SET_BITS);
                                }
                        }
                }

                /*
                 * Check this bit as last one since previous commands may also update storage. In
                 * such case changes will be written to flash already and there's no need to execute
                 * this twice in a row.
                 */
                if (ulNotifiedValue & mainBIT_COMMIT_STORAGE) {
                        /*
                         * To commit anything modified in storage it's enough to acquire and then
                         * immediately release lock - if dirty flag was set, contents of storage
                         * will be written to flash automatically.
                         */
                        storage_acquire();
                        storage_release();
                }

                /* Check if BLE adapter is blocked and if there is free space on its event queue. */
                if (ble_mgr_adapter_is_blocked() && uxQueueSpacesAvailable(adapter_if->evt_q)) {
                        /* Notify BLE adapter that there is free space on its event queue. */
                        ad_ble_notify_event_queue_avail();
                }
        }
}
Esempio n. 27
0
/* Interface functions */
static struct ieee80211_node *
ol_ath_node_alloc(struct ieee80211vap *vap, const u_int8_t *macaddr, bool tmpnode)
{
    struct ieee80211com *ic = vap->iv_ic;
    struct ol_ath_vap_net80211 *avn = OL_ATH_VAP_NET80211(vap);
    struct ol_ath_softc_net80211 *scn = OL_ATH_SOFTC_NET80211(ic);
    struct ol_ath_node_net80211 *anode;

    adf_os_spin_lock_bh(&scn->scn_lock);
    scn->peer_count++;
    if (scn->peer_count > scn->wlan_resource_config.num_peers) {
        adf_os_spin_unlock_bh(&scn->scn_lock);
        printk("%s: vap (%p) scn (%p) the peer count exceeds the supported number %d \n",
                __func__, vap, scn, scn->wlan_resource_config.num_peers);
        goto err_node_alloc;
    }
    adf_os_spin_unlock_bh(&scn->scn_lock);

    anode = (struct ol_ath_node_net80211 *)OS_MALLOC(scn->sc_osdev,
                                                  sizeof(struct ol_ath_node_net80211),
                                                  GFP_ATOMIC);
    if (anode == NULL)
        goto err_node_alloc;

    OS_MEMZERO(anode, sizeof(struct ol_ath_node_net80211));

    anode->an_node.ni_vap = vap;

    /* do not create/delete peer on target for temp nodes and self-peers */
    if (!tmpnode && !is_node_self_peer(vap, macaddr)) {
        if (wmi_unified_peer_create_send(scn->wmi_handle, macaddr,avn->av_if_id)) {
            printk("%s : Unable to create peer in Target \n", __func__);
            OS_FREE(anode);
            goto err_node_alloc;
        }

        adf_os_spin_lock_bh(&scn->scn_lock);
        anode->an_txrx_handle = ol_txrx_peer_attach(scn->pdev_txrx_handle,
                avn->av_txrx_handle, (u_int8_t *) macaddr);

        if (anode->an_txrx_handle == NULL) {
			adf_os_spin_unlock_bh(&scn->scn_lock);
            printk("%s : Unable to attach txrx peer\n", __func__);
            OS_FREE(anode);
            goto err_node_alloc;
        }
        adf_os_spin_unlock_bh(&scn->scn_lock);

        /* static wep keys stored in vap needs to be
         * pushed to all nodes except self node
         */
        if(IEEE80211_VAP_IS_PRIVACY_ENABLED(vap) &&
                (OS_MEMCMP(macaddr,vap->iv_myaddr,IEEE80211_ADDR_LEN) != 0 )) {
            set_node_wep_keys(vap,macaddr);
        }
    }

    return &anode->an_node;

err_node_alloc:
    adf_os_spin_lock_bh(&scn->scn_lock);
    scn->peer_count--;
    adf_os_spin_unlock_bh(&scn->scn_lock);
    return NULL;

}
Esempio n. 28
0
/*!
 * This function is directly exported.
 *
 * @param found_device
 * @param pdev
 *
 * @return igd_driver_h
 * @return NULL on failure
 */
igd_driver_h igd_driver_init( igd_init_info_t *init_info )
{
	igd_context_t *context;
	os_pci_dev_t pdev = (os_pci_dev_t)NULL;
	os_pci_dev_t vga_disable_dev;
	iegd_pci_t *found_device;
	int ret;
	int i;

	EMGD_TRACE_ENTER;

	/* Allocate a context */
	context = (void *) OS_ALLOC(sizeof(igd_context_t));
	fixme_vbios_context = context;
	if(!context) {
		EMGD_ERROR_EXIT("igd_driver_init failed to create context");
		return NULL;
	}
	OS_MEMSET(context, 0, sizeof(igd_context_t));

	/* Search VGA devices for a supported one */
	ret = detect_device(&found_device, &pdev);
	if(ret) {
		OS_FREE(context);
		return NULL;
	}

	/*
	 * Some platforms (currently only Atom E6xx) use two PCI devices (the
	 * second device being for SDVO) and this causes the VGA arbiter to get
	 * involved.  Legacy VGA decoding must be disabled for all PCI devices
	 * except one, otherwise the VGA arbiter will prevent DRI usage in the
	 * X server.
	 */
	for (i = 0; i < MAX_LEGACY_VGA_DISABLE; i++) {
		vga_disable_dev = os_pci_find_device(PCI_VENDOR_ID_INTEL,
				PCI_DEVICE_ID_SDVO_TNC, 0xFFFF, 0, 0, NULL);
		if (vga_disable_dev) {
			printk(KERN_INFO "VGA arbiter detected; disabling legacy VGA"
					" decoding on SDVO device\n");
			os_pci_disable_legacy_vga_decoding(vga_disable_dev);
			os_pci_free_device(vga_disable_dev);
		}
	}

	context->device_context.did = found_device->device_id;
	init_dispatch = (init_dispatch_t *)dispatch_acquire(context,
		init_dispatch_table);

	if(!init_dispatch) {
		EMGD_ERROR_EXIT("No dispatch found for listed device");
		return NULL;
	}

	ret = init_dispatch->query(context, init_dispatch, pdev, &init_info->bus,
		&init_info->slot, &init_info->func);
	if(ret) {
		OS_FREE(context);
		EMGD_ERROR_EXIT("Device Dependent Query Failed");
		return NULL;
	}

	/* init info */
	init_info->vendor_id = found_device->vendor_id;
	init_info->device_id = found_device->device_id;
	init_info->name = init_dispatch->name;
	init_info->chipset = init_dispatch->chipset;
	init_info->default_pd_list = init_dispatch->default_pd_list;

	EMGD_TRACE_EXIT;

	return (igd_driver_h)context;
}
Esempio n. 29
0
void
dfs_detach(struct ieee80211com *ic)
{
	struct ath_dfs *dfs = (struct ath_dfs *)ic->ic_dfs;
	int n, empty;

	if (dfs == NULL) {
	        DFS_DPRINTK(dfs, ATH_DEBUG_DFS1, "%s: ic_dfs is NULL\n", __func__);
		return;
	}
  
        /* Bug 29099 make sure all outstanding timers are cancelled*/

        if (dfs->ath_radar_tasksched) {
            OS_CANCEL_TIMER(&dfs->ath_dfs_task_timer);
            dfs->ath_radar_tasksched = 0;
        }

	if (dfs->ath_dfstest) {
		OS_CANCEL_TIMER(&dfs->ath_dfstesttimer);
		dfs->ath_dfstest = 0;
	}

#if 0        
#ifndef ATH_DFS_RADAR_DETECTION_ONLY
        if (dfs->ic_dfswait) {
            OS_CANCEL_TIMER(&dfs->ic_dfswaittimer);
            dfs->ath_dfswait = 0;
        }

 		OS_CANCEL_TIMER(&dfs->sc_dfs_war_timer);
	if (dfs->dfs_nol != NULL) {
	    struct dfs_nolelem *nol, *next;
	    nol = dfs->dfs_nol;
                /* Bug 29099 - each NOL element has its own timer, cancel it and 
                   free the element*/
		while (nol != NULL) {
                       OS_CANCEL_TIMER(&nol->nol_timer);
		       next = nol->nol_next;
		       OS_FREE(nol);
		       nol = next;
		}
		dfs->dfs_nol = NULL;
	}
#endif
#endif
        /* Return radar events to free q*/
        dfs_reset_radarq(dfs);
	dfs_reset_alldelaylines(dfs);

        /* Free up pulse log*/
        if (dfs->pulses != NULL) {
                OS_FREE(dfs->pulses);
                dfs->pulses = NULL;
        }

	for (n=0; n<DFS_MAX_RADAR_TYPES;n++) {
		if (dfs->dfs_radarf[n] != NULL) {
			OS_FREE(dfs->dfs_radarf[n]);
			dfs->dfs_radarf[n] = NULL;
		}
	}


	if (dfs->dfs_radartable != NULL) {
		for (n=0; n<256; n++) {
			if (dfs->dfs_radartable[n] != NULL) {
				OS_FREE(dfs->dfs_radartable[n]);
				dfs->dfs_radartable[n] = NULL;
			}
		}
		OS_FREE(dfs->dfs_radartable);
		dfs->dfs_radartable = NULL;
#ifndef ATH_DFS_RADAR_DETECTION_ONLY
		dfs->ath_dfs_isdfsregdomain = 0;
#endif
	}
        
	if (dfs->dfs_b5radars != NULL) {
		OS_FREE(dfs->dfs_b5radars);
		dfs->dfs_b5radars=NULL;
	}

	dfs_reset_ar(dfs);

	ATH_ARQ_LOCK(dfs);
	empty = STAILQ_EMPTY(&(dfs->dfs_arq));
	ATH_ARQ_UNLOCK(dfs);
	if (!empty) {
		dfs_reset_arq(dfs);
	}
        if (dfs->events != NULL) {
                OS_FREE(dfs->events);
                dfs->events = NULL;
        }
       dfs_nol_timer_cleanup(dfs);
	OS_FREE(dfs);

	/* XXX? */
        ic->ic_dfs = NULL;
}
Esempio n. 30
0
/*
 * create a HSM and return a handle to it.
 */
ieee80211_hsm_t ieee80211_sm_create(osdev_t                       oshandle, 
                                    const char                    *name,
                                    void                          *ctx, 
                                    u_int8_t                      init_state, 
                                    const ieee80211_state_info    *state_info,
                                    u_int8_t                      num_states,
                                    u_int8_t                      max_queued_events, 
                                    u_int16_t                     event_data_len, 
                                    mesgq_priority_t              priority,
                                    u_int32_t                     flags,
                                    void (*ieee80211_debug_print) (void *ctx,const char *fmt,...), 
                                    const char                    **event_names, 
                                    u_int32_t                     num_event_names)
{
    ieee80211_hsm_t              hsm;
    u_int32_t                    i;
    mesgq_event_delivery_type    mq_type;

#ifdef ATH_DISABLE_DBG_PRINT    
#define ieee80211_debug_print(ctx,fmt...)
#endif
    if (num_states > IEEE80211_HSM_MAX_STATES) {
        return NULL;
    }

    /*
     * validate the state_info table.
     * the entries need to be valid and also  
     * need to be in order.
     */
    for (i = 0; i < num_states; ++i) {
        u_int8_t state_visited[IEEE80211_HSM_MAX_STATES] = {0};
        u_int8_t state,next_state;
        /*
         * make sure that the state definitions are in order
         */
        if ((state_info[i].state >= IEEE80211_HSM_MAX_STATES) || (state_info[i].state != i)) {
            if (ieee80211_debug_print)
                ieee80211_debug_print(ctx,  "HSM: %s : entry %d has invalid state %d \n", name, i,state_info[i].state); 

            return NULL;
        }
        if ((state_info[i].has_substates) && (state_info[i].initial_substate == IEEE80211_HSM_STATE_NONE)) {
            if (ieee80211_debug_print)
                ieee80211_debug_print(ctx,  "HSM: %s : entry %d is marked as super state but has no initial sub state \n", name, i); 

            return NULL;
        }
        if ((!state_info[i].has_substates) && (state_info[i].initial_substate != IEEE80211_HSM_STATE_NONE)) {
            if (ieee80211_debug_print)
                ieee80211_debug_print(ctx,  "HSM: %s : entry %d is not a super state but has initial sub state \n", name, i); 

            return NULL;
        }
        if ((state_info[i].has_substates) && (state_info[state_info[i].initial_substate].parent_state != i)) {
            if (ieee80211_debug_print)
                ieee80211_debug_print(ctx,  "HSM: %s : entry %d initial sub state is not a sub state \n", name, i); 

            return NULL;
        }
        /* detect if there is any loop in the hierarichy */
        state = state_info[i].state;
        while(state != IEEE80211_HSM_STATE_NONE) {
            if (state_visited[state]) {
                if (ieee80211_debug_print)
                    ieee80211_debug_print(ctx,  "HSM: %s : detected a loop with entry %d \n", name, i); 

                return NULL;
            }
            
            state_visited[state] = 1;
            next_state = state_info[state].parent_state;

            if (next_state != IEEE80211_HSM_STATE_NONE) {
                if (!state_info[next_state].has_substates) {
                    if (ieee80211_debug_print)
                        ieee80211_debug_print(ctx,  "HSM: %s : state %d is marked as perent of %d but is not a super state \n", 
                                              name,next_state,state); 

                    return NULL;
                }
            }
            state = next_state;
        }               
    }
    
    hsm = (ieee80211_hsm_t) OS_MALLOC(oshandle, sizeof(ieee80211_hsm), 0);
    if (hsm == NULL) {
        if (ieee80211_debug_print)
            ieee80211_debug_print(ctx, "HSM: %s : hsm allocation failed \n", name); 

        return NULL;
    }
    /* Clear hsm structure */
    OS_MEMZERO(hsm, sizeof(ieee80211_hsm));
    if(flags & IEEE80211_HSM_SYNCHRONOUS) {
        mq_type = MESGQ_SYNCHRONOUS_EVENT_DELIVERY;
    } else {
        mq_type = MESGQ_ASYNCHRONOUS_EVENT_DELIVERY;
    }
    if (OS_MESGQ_INIT(oshandle, &hsm->mesg_q, event_data_len,
                      max_queued_events, ieee80211_sm_dispatch_sync_internal, hsm, priority, mq_type) != 0) {
        if (ieee80211_debug_print)
            ieee80211_debug_print(ctx, "HSM: %s : OS_MESGQ_INIT  failed \n", name); 

        OS_FREE(hsm);

        return NULL;
    }

#if ENABLE_HSM_HISTORY
    hsm_history_init(hsm);
#endif    /* ENABLE_HISTORY */

#ifndef ATH_DISABLE_DBG_PRINT
#undef ieee80211_debug_print
#endif
    hsm->cur_state                 = init_state;
    hsm->num_states                = num_states;
    hsm->oshandle                  = oshandle;
    hsm->state_info                = state_info;
    hsm->ctx                       = ctx;
    hsm->last_event                = IEEE80211_HSM_EVENT_NONE;
    hsm->ieee80211_hsm_debug_print = ieee80211_debug_print;
    hsm->in_state_transition       = false;
    hsm->event_names               = event_names;
    hsm->num_event_names           = num_event_names;

    /* strncpy - don't forget the string terminator */
    i = 0;
    while ((name[i] != '\0') && (i < IEEE80211_HSM_MAX_NAME)) {
        hsm->name[i] = name[i];
        ++i;
    }
    if (i < IEEE80211_HSM_MAX_NAME) {
        hsm->name[i] = '\0';
    }
    
    return hsm;    
}