void * wmi_unified_attach(ol_scn_t scn_handle, wma_wow_tx_complete_cbk func) { struct wmi_unified *wmi_handle; wmi_handle = (struct wmi_unified *)OS_MALLOC(NULL, sizeof(struct wmi_unified), GFP_ATOMIC); if (wmi_handle == NULL) { printk("allocation of wmi handle failed %zu \n", sizeof(struct wmi_unified)); return NULL; } OS_MEMZERO(wmi_handle, sizeof(struct wmi_unified)); wmi_handle->scn_handle = scn_handle; adf_os_atomic_init(&wmi_handle->pending_cmds); adf_os_atomic_init(&wmi_handle->is_target_suspended); #ifdef FEATURE_RUNTIME_PM adf_os_atomic_init(&wmi_handle->runtime_pm_inprogress); #endif adf_os_spinlock_init(&wmi_handle->eventq_lock); adf_nbuf_queue_init(&wmi_handle->event_queue); #ifdef CONFIG_CNSS cnss_init_work(&wmi_handle->rx_event_work, wmi_rx_event_work); #else INIT_WORK(&wmi_handle->rx_event_work, wmi_rx_event_work); #endif #ifdef WMI_INTERFACE_EVENT_LOGGING adf_os_spinlock_init(&wmi_handle->wmi_record_lock); #endif wmi_handle->wma_wow_tx_complete_cbk = func; return wmi_handle; }
void *wmi_unified_attach(ol_scn_t scn_handle, wma_process_fw_event_handler_cbk func) { struct wmi_unified *wmi_handle; wmi_handle = (struct wmi_unified *)os_malloc(NULL, sizeof(struct wmi_unified), GFP_ATOMIC); if (wmi_handle == NULL) { printk("allocation of wmi handle failed %zu \n", sizeof(struct wmi_unified)); return NULL; } OS_MEMZERO(wmi_handle, sizeof(struct wmi_unified)); wmi_handle->scn_handle = scn_handle; cdf_atomic_init(&wmi_handle->pending_cmds); cdf_atomic_init(&wmi_handle->is_target_suspended); cdf_spinlock_init(&wmi_handle->eventq_lock); cdf_nbuf_queue_init(&wmi_handle->event_queue); #ifdef CONFIG_CNSS cnss_init_work(&wmi_handle->rx_event_work, wmi_rx_event_work); #else INIT_WORK(&wmi_handle->rx_event_work, wmi_rx_event_work); #endif #ifdef WMI_INTERFACE_EVENT_LOGGING cdf_spinlock_init(&wmi_handle->wmi_record_lock); #endif wmi_handle->wma_process_fw_event_handler_cbk = func; return wmi_handle; }