/* * \param rs resource to validate * \param ref_count_is_zero One of 3 values * > 0 - check ref count is zero * = 0 - check ref count is non-zero * < 0 - ref count could be any value */ void efrm_resource_assert_valid(struct efrm_resource *rs, int ref_count_is_zero, const char *file, int line) { struct efrm_resource_manager *rm; _EFRM_ASSERT(rs, file, line); if (ref_count_is_zero >= 0) { if (!(ref_count_is_zero || rs->rs_ref_count > 0) || !(!ref_count_is_zero || rs->rs_ref_count == 0)) EFRM_WARN("%s: check %szero ref=%d " EFRM_RESOURCE_FMT, __FUNCTION__, ref_count_is_zero == 0 ? "non-" : "", rs->rs_ref_count, EFRM_RESOURCE_PRI_ARG(rs)); _EFRM_ASSERT(!(ref_count_is_zero == 0) || rs->rs_ref_count != 0, file, line); _EFRM_ASSERT(!(ref_count_is_zero > 0) || rs->rs_ref_count == 0, file, line); } rm = efrm_rm_table[rs->rs_type]; efrm_resource_manager_assert_valid(rm, file, line); }
int efx_vi_dma_map_pages(struct efx_vi_state *vih, struct page **pages, int n_pages, struct efx_vi_dma_map_state **dmh_out) { struct efx_vi_state *efx_state = vih; int order = fls(n_pages - 1), rc, i, evq_id; dma_addr_t dma_addr; struct efx_vi_dma_map_state *dm_state; if (n_pages != (1 << order)) { EFRM_WARN("%s: Can only allocate buffers in power of 2 " "sizes (not %d)", __func__, n_pages); return -EINVAL; } dm_state = kmalloc(sizeof(struct efx_vi_dma_map_state), GFP_KERNEL); if (!dm_state) return -ENOMEM; dm_state->dma_addrs = kmalloc(sizeof(dma_addr_t) * n_pages, GFP_KERNEL); if (!dm_state->dma_addrs) { kfree(dm_state); return -ENOMEM; } rc = efrm_buffer_table_alloc(order, &dm_state->bt_handle); if (rc < 0) { kfree(dm_state->dma_addrs); kfree(dm_state); return rc; } evq_id = EFRM_RESOURCE_INSTANCE(efx_state->vi_res->rs.rs_handle); for (i = 0; i < n_pages; i++) { /* TODO do we need to get_page() here ? */ dma_addr = pci_map_page(linux_efhw_nic(efx_state->nic)-> pci_dev, pages[i], 0, PAGE_SIZE, PCI_DMA_TODEVICE); efrm_buffer_table_set(&dm_state->bt_handle, efx_state->nic, i, dma_addr, evq_id); dm_state->dma_addrs[i] = dma_addr; /* Would be nice to not have to call commit each time, but * comment says there are hardware restrictions on how often * you can go without it, so do this to be safe */ efrm_buffer_table_commit(); } dm_state->n_pages = n_pages; *dmh_out = dm_state; return 0; }
static void restore_pat(void) { int fail = 0; efx_qword_t pat; preempt_disable(); { EFRM_VERIFY_EQ(read_pat(&pat), 0); if( pat.u64 == compat_pat_wc.modified_pat.u64 ) write_pat_on_cpus(&compat_pat_wc.original_pat); else fail = 1; } preempt_enable(); if( fail ) EFRM_WARN("%s: WARNING: PAT was modified while the driver was running, PAT: " " original %llx, modified %llx, current %llx", __func__, compat_pat_wc.original_pat.u64, compat_pat_wc.modified_pat.u64, pat.u64); else EFRM_WARN("%s: PAT restored", __func__); }
int compat_pat_wc_init(void) { int rc; struct cpuinfo_x86* cpu_info = &boot_cpu_data; if( compat_pat_wc.inited ) { ++compat_pat_wc.inited; return 0; } if( !cpu_has(cpu_info, X86_FEATURE_MSR) || !cpu_has(cpu_info, X86_FEATURE_PAT) ) { EFRM_ERR("%s: ERROR: PAT not available on this processor", __func__); return -ENOSYS; } rc = setup_pat(); switch (rc) { case -EIO: EFRM_ERR("%s: ERROR: failed accessing PAT register", __func__); return rc; case -EFAULT: EFRM_ERR("%s: ERROR: PAT registers inconsistent across CPUs", __func__); return rc; case -ENOSPC: EFRM_ERR("%s: ERROR: incompatible PAT modification detected %llx", __func__, compat_pat_wc.original_pat.u64); return rc; case -EALREADY: EFRM_WARN("%s: WARNING: compatible PAT modification detected %llx", __func__, compat_pat_wc.original_pat.u64); case 0: EFRM_WARN( "%s: PAT modified for WC", __func__); break; default: EFRM_ERR( "%s: unknown return code", __func__); } compat_pat_wc.inited = 1; return 0; }
/* Function needed as Xen can't get pages for grants in dom0, but can get dma address */ int efx_vi_dma_map_addrs(struct efx_vi_state *vih, unsigned long long *bus_dev_addrs, int n_pages, struct efx_vi_dma_map_state **dmh_out) { struct efx_vi_state *efx_state = vih; int order = fls(n_pages - 1), rc, i, evq_id; dma_addr_t dma_addr; struct efx_vi_dma_map_state *dm_state; if (n_pages != (1 << order)) { EFRM_WARN("%s: Can only allocate buffers in power of 2 " "sizes (not %d)", __func__, n_pages); return -EINVAL; } dm_state = kmalloc(sizeof(struct efx_vi_dma_map_state), GFP_KERNEL); if (!dm_state) return -ENOMEM; dm_state->dma_addrs = kmalloc(sizeof(dma_addr_t) * n_pages, GFP_KERNEL); if (!dm_state->dma_addrs) { kfree(dm_state); return -ENOMEM; } rc = efrm_buffer_table_alloc(order, &dm_state->bt_handle); if (rc < 0) { kfree(dm_state->dma_addrs); kfree(dm_state); return rc; } evq_id = EFRM_RESOURCE_INSTANCE(efx_state->vi_res->rs.rs_handle); #if 0 EFRM_WARN("%s: mapping %d pages to evq %d, bt_ids %d-%d\n", __func__, n_pages, evq_id, dm_state->bt_handle.base, dm_state->bt_handle.base + n_pages); #endif for (i = 0; i < n_pages; i++) { dma_addr = (dma_addr_t)bus_dev_addrs[i]; efrm_buffer_table_set(&dm_state->bt_handle, efx_state->nic, i, dma_addr, evq_id); dm_state->dma_addrs[i] = dma_addr; /* Would be nice to not have to call commit each time, but * comment says there are hardware restrictions on how often * you can go without it, so do this to be safe */ efrm_buffer_table_commit(); } dm_state->n_pages = n_pages; *dmh_out = dm_state; return 0; }