int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask, u32 reserved_bot, u32 reserved_top) { /* num must be a power of 2 */ if (num != roundup_pow_of_two(num)) return -EINVAL; bitmap->last = 0; bitmap->top = 0; bitmap->max = num - reserved_top; bitmap->mask = mask; bitmap->reserved_top = reserved_top; spin_lock_init(&bitmap->lock); bitmap->table = kzalloc(BITS_TO_LONGS(bitmap->max) * sizeof (long), GFP_KERNEL); if (!bitmap->table) return -ENOMEM; bitmap_set(bitmap->table, 0, reserved_bot); return 0; }
static __init struct cma *cma_create_area(unsigned long base_pfn, unsigned long carved_out_count, unsigned long count) { int bitmap_size = BITS_TO_LONGS(count) * sizeof(long); struct cma *cma; int ret = -ENOMEM; pr_debug("%s(base %08lx, count %lx)\n", __func__, base_pfn, count); cma = kzalloc(sizeof *cma, GFP_KERNEL); if (!cma) return ERR_PTR(-ENOMEM); cma->base_pfn = base_pfn; cma->count = count; cma->free_count = count; cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL); #ifdef CMA_NO_MIGRATION cma->isolated = true; #endif if (!cma->bitmap) goto no_mem; ret = cma_activate_area(base_pfn, carved_out_count); if (ret) goto error; pr_debug("%s: returned %p\n", __func__, (void *)cma); return cma; error: kfree(cma->bitmap); no_mem: kfree(cma); return ERR_PTR(ret); }
static int __init cma_activate_area(struct cma *cma) { int bitmap_size = BITS_TO_LONGS(cma->count) * sizeof(long); unsigned long base_pfn = cma->base_pfn, pfn = base_pfn; unsigned i = cma->count >> pageblock_order; struct zone *zone; cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL); if (!cma->bitmap) return -ENOMEM; WARN_ON_ONCE(!pfn_valid(pfn)); zone = page_zone(pfn_to_page(pfn)); do { unsigned j; base_pfn = pfn; for (j = pageblock_nr_pages; j; --j, pfn++) { WARN_ON_ONCE(!pfn_valid(pfn)); /* * alloc_contig_range requires the pfn range * specified to be in the same zone. Make this * simple by forcing the entire CMA resv range * to be in the same zone. */ if (page_zone(pfn_to_page(pfn)) != zone) goto err; } init_cma_reserved_pageblock(pfn_to_page(base_pfn)); } while (--i); return 0; err: kfree(cma->bitmap); return -EINVAL; }
static int bits_to_user(unsigned long *bits, unsigned int maxbit, unsigned int maxlen, void __user *p, int compat) { int len, i; if (compat) { len = BITS_TO_LONGS_COMPAT(maxbit) * sizeof(compat_long_t); if (len > maxlen) len = maxlen; for (i = 0; i < len / sizeof(compat_long_t); i++) if (copy_to_user((compat_long_t __user *) p + i, (compat_long_t *) bits + i + 1 - ((i % 2) << 1), sizeof(compat_long_t))) return -EFAULT; } else { len = BITS_TO_LONGS(maxbit) * sizeof(long); if (len > maxlen) len = maxlen; if (copy_to_user(p, bits, len)) return -EFAULT; } return len; }
void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj) { int page_count = obj->base.size >> PAGE_SHIFT; int i; if (obj->bit_17 == NULL) { obj->bit_17 = kmalloc(BITS_TO_LONGS(page_count) * sizeof(long), GFP_KERNEL); if (obj->bit_17 == NULL) { DRM_ERROR("Failed to allocate memory for bit 17 " "record\n"); return; } } for (i = 0; i < page_count; i++) { if (page_to_phys(obj->pages[i]) & (1 << 17)) __set_bit(i, obj->bit_17); else __clear_bit(i, obj->bit_17); } }
int ath10k_htt_tx_alloc(struct ath10k_htt *htt) { struct ath10k *ar = htt->ar; spin_lock_init(&htt->tx_lock); if (test_bit(ATH10K_FW_FEATURE_WMI_10X, htt->ar->fw_features)) htt->max_num_pending_tx = TARGET_10X_NUM_MSDU_DESC; else htt->max_num_pending_tx = TARGET_NUM_MSDU_DESC; ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n", htt->max_num_pending_tx); htt->pending_tx = kzalloc(sizeof(*htt->pending_tx) * htt->max_num_pending_tx, GFP_KERNEL); if (!htt->pending_tx) return -ENOMEM; htt->used_msdu_ids = kzalloc(sizeof(unsigned long) * BITS_TO_LONGS(htt->max_num_pending_tx), GFP_KERNEL); if (!htt->used_msdu_ids) { kfree(htt->pending_tx); return -ENOMEM; } htt->tx_pool = dma_pool_create("ath10k htt tx pool", htt->ar->dev, sizeof(struct ath10k_htt_txbuf), 4, 0); if (!htt->tx_pool) { kfree(htt->used_msdu_ids); kfree(htt->pending_tx); return -ENOMEM; } return 0; }
static int intel_setup_irq_remapping(struct intel_iommu *iommu, int mode) { struct ir_table *ir_table; struct page *pages; unsigned long *bitmap; ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table), GFP_ATOMIC); if (!iommu->ir_table) return -ENOMEM; pages = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, INTR_REMAP_PAGE_ORDER); if (!pages) { pr_err("IR%d: failed to allocate pages of order %d\n", iommu->seq_id, INTR_REMAP_PAGE_ORDER); kfree(iommu->ir_table); return -ENOMEM; } bitmap = kcalloc(BITS_TO_LONGS(INTR_REMAP_TABLE_ENTRIES), sizeof(long), GFP_ATOMIC); if (bitmap == NULL) { pr_err("IR%d: failed to allocate bitmap\n", iommu->seq_id); __free_pages(pages, INTR_REMAP_PAGE_ORDER); kfree(ir_table); return -ENOMEM; } ir_table->base = page_address(pages); ir_table->bitmap = bitmap; iommu_set_irq_remapping(iommu, mode); return 0; }
static int hclge_set_vf_mc_mta_status(struct hclge_vport *vport, u8 *msg, u8 idx, bool is_end) { #define HCLGE_MTA_STATUS_MSG_SIZE 13 #define HCLGE_MTA_STATUS_MSG_BITS \ (HCLGE_MTA_STATUS_MSG_SIZE * BITS_PER_BYTE) #define HCLGE_MTA_STATUS_MSG_END_BITS \ (HCLGE_MTA_TBL_SIZE % HCLGE_MTA_STATUS_MSG_BITS) unsigned long status[BITS_TO_LONGS(HCLGE_MTA_STATUS_MSG_BITS)]; u16 tbl_cnt; u16 tbl_idx; u8 msg_ofs; u8 msg_bit; tbl_cnt = is_end ? HCLGE_MTA_STATUS_MSG_END_BITS : HCLGE_MTA_STATUS_MSG_BITS; /* set msg field */ msg_ofs = 0; msg_bit = 0; memset(status, 0, sizeof(status)); for (tbl_idx = 0; tbl_idx < tbl_cnt; tbl_idx++) { if (msg[msg_ofs] & BIT(msg_bit)) set_bit(tbl_idx, status); msg_bit++; if (msg_bit == BITS_PER_BYTE) { msg_bit = 0; msg_ofs++; } } return hclge_update_mta_status_common(vport, status, idx * HCLGE_MTA_STATUS_MSG_BITS, tbl_cnt, is_end); }
static struct mlxsw_sp2_kvdl_part * mlxsw_sp2_kvdl_part_init(struct mlxsw_sp *mlxsw_sp, const struct mlxsw_sp2_kvdl_part_info *info) { unsigned int indexes_per_usage_bit; struct mlxsw_sp2_kvdl_part *part; unsigned int index_range; unsigned int usage_bit_count; size_t usage_size; if (!mlxsw_core_res_valid(mlxsw_sp->core, info->usage_bit_count_res_id) || !mlxsw_core_res_valid(mlxsw_sp->core, info->index_range_res_id)) return ERR_PTR(-EIO); usage_bit_count = mlxsw_core_res_get(mlxsw_sp->core, info->usage_bit_count_res_id); index_range = mlxsw_core_res_get(mlxsw_sp->core, info->index_range_res_id); /* For some partitions, one usage bit represents a group of indexes. * That's why we compute the number of indexes per usage bit here, * according to queried resources. */ indexes_per_usage_bit = index_range / usage_bit_count; usage_size = BITS_TO_LONGS(usage_bit_count) * sizeof(unsigned long); part = kzalloc(sizeof(*part) + usage_size, GFP_KERNEL); if (!part) return ERR_PTR(-ENOMEM); part->info = info; part->usage_bit_count = usage_bit_count; part->indexes_per_usage_bit = indexes_per_usage_bit; part->last_allocated_bit = usage_bit_count - 1; return part; }
int mlx5_mpfs_init(struct mlx5_core_dev *dev) { int l2table_size = 1 << MLX5_CAP_GEN(dev, log_max_l2_table); struct mlx5_mpfs *mpfs; if (!MLX5_VPORT_MANAGER(dev)) return 0; mpfs = kzalloc(sizeof(*mpfs), GFP_KERNEL); if (!mpfs) return -ENOMEM; mutex_init(&mpfs->lock); mpfs->size = l2table_size; mpfs->bitmap = kcalloc(BITS_TO_LONGS(l2table_size), sizeof(uintptr_t), GFP_KERNEL); if (!mpfs->bitmap) { kfree(mpfs); return -ENOMEM; } dev->priv.mpfs = mpfs; return 0; }
int hns_roce_bitmap_init(struct hns_roce_bitmap *bitmap, u32 num, u32 mask, u32 reserved_bot, u32 reserved_top) { u32 i; if (num != roundup_pow_of_two(num)) return -EINVAL; bitmap->last = 0; bitmap->top = 0; bitmap->max = num - reserved_top; bitmap->mask = mask; bitmap->reserved_top = reserved_top; spin_lock_init(&bitmap->lock); bitmap->table = kcalloc(BITS_TO_LONGS(bitmap->max), sizeof(long), GFP_KERNEL); if (!bitmap->table) return -ENOMEM; for (i = 0; i < reserved_bot; ++i) set_bit(i, bitmap->table); return 0; }
/** * iio_scan_mask_set() - set particular bit in the scan mask * @buffer: the buffer whose scan mask we are interested in * @bit: the bit to be set. **/ int iio_scan_mask_set(struct iio_buffer *buffer, int bit) { struct iio_dev *indio_dev = buffer->indio_dev; unsigned long *mask; unsigned long *trialmask; trialmask = kmalloc(sizeof(*trialmask)* BITS_TO_LONGS(indio_dev->masklength), GFP_KERNEL); if (trialmask == NULL) return -ENOMEM; if (!indio_dev->masklength) { WARN_ON("trying to set scanmask prior to registering buffer\n"); kfree(trialmask); return -EINVAL; } bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength); set_bit(bit, trialmask); if (indio_dev->available_scan_masks) { mask = iio_scan_mask_match(indio_dev->available_scan_masks, indio_dev->masklength, trialmask); if (!mask) { kfree(trialmask); return -EINVAL; } } bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength); buffer->scan_count++; kfree(trialmask); return 0; };
int msi_bitmap_alloc(struct msi_bitmap *bmp, unsigned int irq_count, struct device_node *of_node) { int size; if (!irq_count) return -EINVAL; size = BITS_TO_LONGS(irq_count) * sizeof(long); pr_debug("msi_bitmap: allocator bitmap size is 0x%x bytes\n", size); bmp->bitmap = zalloc_maybe_bootmem(size, GFP_KERNEL); if (!bmp->bitmap) { pr_debug("msi_bitmap: ENOMEM allocating allocator bitmap!\n"); return -ENOMEM; } /* We zalloc'ed the bitmap, so all irqs are free by default */ spin_lock_init(&bmp->lock); bmp->of_node = of_node_get(of_node); bmp->irq_count = irq_count; return 0; }
void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj) { int page_count = obj->base.size >> PAGE_SHIFT; int i; if (obj->bit_17 == NULL) { obj->bit_17 = kmalloc(BITS_TO_LONGS(page_count) * sizeof(long), DRM_I915_GEM, M_WAITOK); if (obj->bit_17 == NULL) { DRM_ERROR("Failed to allocate memory for bit 17 " "record\n"); return; } } /* XXXKIB: review locking, atomics might be not needed there */ for (i = 0; i < page_count; i++) { if (VM_PAGE_TO_PHYS(obj->pages[i]) & (1 << 17)) set_bit(i, obj->bit_17); else clear_bit(i, obj->bit_17); } }
* EV_ABS events which should not be cached are listed here. */ static unsigned int input_abs_bypass_init_data[] __initdata = { ABS_MT_TOUCH_MAJOR, ABS_MT_TOUCH_MINOR, ABS_MT_WIDTH_MAJOR, ABS_MT_WIDTH_MINOR, ABS_MT_ORIENTATION, ABS_MT_POSITION_X, ABS_MT_POSITION_Y, ABS_MT_TOOL_TYPE, ABS_MT_BLOB_ID, ABS_MT_PRESSURE, 0 }; static unsigned long input_abs_bypass[BITS_TO_LONGS(ABS_CNT)]; static LIST_HEAD(input_dev_list); static LIST_HEAD(input_handler_list); /* * input_mutex protects access to both input_dev_list and input_handler_list. * This also causes input_[un]register_device and input_[un]register_handler * be mutually exclusive which simplifies locking in drivers implementing * input handlers. */ static DEFINE_MUTEX(input_mutex); static struct input_handler *input_table[8]; static inline int is_event_supported(unsigned int code,
int au_si_alloc(struct super_block *sb) { int err; struct au_sbinfo *sbinfo; static struct lock_class_key aufs_si; err = -ENOMEM; sbinfo = kzalloc(sizeof(*sbinfo), GFP_NOFS); if (unlikely(!sbinfo)) goto out; BUILD_BUG_ON(sizeof(unsigned long) != sizeof(*sbinfo->au_si_pid.bitmap)); sbinfo->au_si_pid.bitmap = kcalloc(BITS_TO_LONGS(PID_MAX_DEFAULT), sizeof(*sbinfo->au_si_pid.bitmap), GFP_NOFS); if (unlikely(!sbinfo->au_si_pid.bitmap)) goto out_sbinfo; /* will be reallocated separately */ sbinfo->si_branch = kzalloc(sizeof(*sbinfo->si_branch), GFP_NOFS); if (unlikely(!sbinfo->si_branch)) goto out_pidmap; err = sysaufs_si_init(sbinfo); if (unlikely(err)) goto out_br; au_nwt_init(&sbinfo->si_nowait); au_rw_init_wlock(&sbinfo->si_rwsem); au_rw_class(&sbinfo->si_rwsem, &aufs_si); spin_lock_init(&sbinfo->au_si_pid.tree_lock); INIT_RADIX_TREE(&sbinfo->au_si_pid.tree, GFP_ATOMIC | __GFP_NOFAIL); atomic_long_set(&sbinfo->si_ninodes, 0); atomic_long_set(&sbinfo->si_nfiles, 0); sbinfo->si_bend = -1; sbinfo->si_wbr_copyup = AuWbrCopyup_Def; sbinfo->si_wbr_create = AuWbrCreate_Def; sbinfo->si_wbr_copyup_ops = au_wbr_copyup_ops + sbinfo->si_wbr_copyup; sbinfo->si_wbr_create_ops = au_wbr_create_ops + sbinfo->si_wbr_create; sbinfo->si_mntflags = au_opts_plink(AuOpt_Def); mutex_init(&sbinfo->si_xib_mtx); sbinfo->si_xino_brid = -1; /* leave si_xib_last_pindex and si_xib_next_bit */ sbinfo->si_rdcache = msecs_to_jiffies(AUFS_RDCACHE_DEF * MSEC_PER_SEC); sbinfo->si_rdblk = AUFS_RDBLK_DEF; sbinfo->si_rdhash = AUFS_RDHASH_DEF; sbinfo->si_dirwh = AUFS_DIRWH_DEF; au_spl_init(&sbinfo->si_plink); init_waitqueue_head(&sbinfo->si_plink_wq); spin_lock_init(&sbinfo->si_plink_maint_lock); /* leave other members for sysaufs and si_mnt. */ sbinfo->si_sb = sb; sb->s_fs_info = sbinfo; si_pid_set(sb); au_debug_sbinfo_init(sbinfo); return 0; /* success */ out_br: kfree(sbinfo->si_branch); out_pidmap: kfree(sbinfo->au_si_pid.bitmap); out_sbinfo: kfree(sbinfo); out: return err; }
*/ #include <linux/input.h> #include <linux/keyreset.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/reboot.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/syscalls.h> #include <linux/workqueue.h> struct keyreset_state { struct input_handler input_handler; unsigned long keybit[BITS_TO_LONGS(KEY_CNT)]; unsigned long upbit[BITS_TO_LONGS(KEY_CNT)]; unsigned long key[BITS_TO_LONGS(KEY_CNT)]; spinlock_t lock; int key_down_target; int key_down; int key_up; int restart_disabled; int restart_requested; int (*reset_fn)(void); int down_time_ms; struct delayed_work restart_work; }; static void deferred_restart(struct work_struct *work) {
#include <linux/debugfs.h> #include <linux/slab.h> #include "heartbeat.h" #include "tcp.h" #include "nodemanager.h" #include "quorum.h" #include "masklog.h" static DECLARE_RWSEM(o2hb_callback_sem); static DEFINE_SPINLOCK(o2hb_live_lock); static struct list_head o2hb_live_slots[O2NM_MAX_NODES]; static unsigned long o2hb_live_node_bitmap[BITS_TO_LONGS(O2NM_MAX_NODES)]; static LIST_HEAD(o2hb_node_events); static DECLARE_WAIT_QUEUE_HEAD(o2hb_steady_queue); #define O2HB_DEBUG_DIR "o2hb" #define O2HB_DEBUG_LIVENODES "livenodes" static struct dentry *o2hb_debug_dir; static struct dentry *o2hb_debug_livenodes; static LIST_HEAD(o2hb_all_regions); static struct o2hb_callback { struct list_head list; } o2hb_callbacks[O2HB_NUM_CB]; static struct o2hb_callback *hbcall_from_type(enum o2hb_callback_type type);
#include <hv/hypervisor.h> #define TILE_MAX_COUNTERS 4 #define PERF_COUNT_0_IDX 0 #define PERF_COUNT_1_IDX 1 #define AUX_PERF_COUNT_0_IDX 2 #define AUX_PERF_COUNT_1_IDX 3 struct cpu_hw_events { int n_events; struct perf_event *events[TILE_MAX_COUNTERS]; /* counter order */ struct perf_event *event_list[TILE_MAX_COUNTERS]; /* enabled order */ int assign[TILE_MAX_COUNTERS]; unsigned long active_mask[BITS_TO_LONGS(TILE_MAX_COUNTERS)]; unsigned long used_mask; }; /* TILE arch specific performance monitor unit */ struct tile_pmu { const char *name; int version; const int *hw_events; /* generic hw events table */ /* generic hw cache events table */ const int (*cache_events)[PERF_COUNT_HW_CACHE_MAX] [PERF_COUNT_HW_CACHE_OP_MAX] [PERF_COUNT_HW_CACHE_RESULT_MAX]; int (*map_hw_event)(u64); /*method used to map hw events */ int (*map_cache_event)(u64); /*method used to map
bfin_write_PFCTL((bfin_read_PFCTL() & mask) | val); } static void bfin_pfmon_disable_all(void) { bfin_write_PFCTL(bfin_read_PFCTL() & ~PFPWR); } static void bfin_pfmon_enable_all(void) { bfin_write_PFCTL(bfin_read_PFCTL() | PFPWR); } struct cpu_hw_events { struct perf_event *events[MAX_HWEVENTS]; unsigned long used_mask[BITS_TO_LONGS(MAX_HWEVENTS)]; }; DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events); static int hw_perf_cache_event(int config, int *evp) { unsigned long type, op, result; int ev; /* unpack config */ type = config & 0xff; op = (config >> 8) & 0xff; result = (config >> 16) & 0xff; if (type >= PERF_COUNT_HW_CACHE_MAX || op >= PERF_COUNT_HW_CACHE_OP_MAX ||
static void ocfs2_node_map_init(struct ocfs2_node_map *map) { map->num_nodes = OCFS2_NODE_MAP_MAX_NODES; memset(map->map, 0, BITS_TO_LONGS(OCFS2_NODE_MAP_MAX_NODES) * sizeof(unsigned long)); }
#ifdef CONFIG_INPUT /* Simple translation table for the SysRq keys */ static const unsigned char sysrq_xlate[KEY_CNT] = "\000\0331234567890-=\177\t" /* 0x00 - 0x0f */ "qwertyuiop[]\r\000as" /* 0x10 - 0x1f */ "dfghjkl;'`\000\\zxcv" /* 0x20 - 0x2f */ "bnm,./\000*\000 \000\201\202\203\204\205" /* 0x30 - 0x3f */ "\206\207\210\211\212\000\000789-456+1" /* 0x40 - 0x4f */ "230\177\000\000\213\214\000\000\000\000\000\000\000\000\000\000" /* 0x50 - 0x5f */ "\r\000/"; /* 0x60 - 0x6f */ struct sysrq_state { struct input_handle handle; struct work_struct reinject_work; unsigned long key_down[BITS_TO_LONGS(KEY_CNT)]; unsigned int alt; unsigned int alt_use; bool active; bool need_reinject; bool reinjecting; }; static void sysrq_reinject_alt_sysrq(struct work_struct *work) { struct sysrq_state *sysrq = container_of(work, struct sysrq_state, reinject_work); struct input_handle *handle = &sysrq->handle; unsigned int alt_code = sysrq->alt_use; if (sysrq->need_reinject) {
/* Init Flow Ring specific data structures */ int dhd_flow_rings_init(dhd_pub_t *dhdp, uint32 num_flow_rings) { uint32 idx; uint32 flow_ring_table_sz; uint32 if_flow_lkup_sz; void * flowid_allocator; flow_ring_table_t *flow_ring_table; if_flow_lkup_t *if_flow_lkup = NULL; #ifdef PCIE_TX_DEFERRAL uint32 count; #endif void *lock = NULL; unsigned long flags; DHD_INFO(("%s\n", __FUNCTION__)); /* Construct a 16bit flow1d allocator */ flowid_allocator = id16_map_init(dhdp->osh, num_flow_rings - FLOW_RING_COMMON, FLOWID_RESERVED); if (flowid_allocator == NULL) { DHD_ERROR(("%s: flowid allocator init failure\n", __FUNCTION__)); return BCME_NOMEM; } /* Allocate a flow ring table, comprising of requested number of rings */ flow_ring_table_sz = (num_flow_rings * sizeof(flow_ring_node_t)); flow_ring_table = (flow_ring_table_t *)MALLOC(dhdp->osh, flow_ring_table_sz); if (flow_ring_table == NULL) { DHD_ERROR(("%s: flow ring table alloc failure\n", __FUNCTION__)); goto fail; } /* Initialize flow ring table state */ bzero((uchar *)flow_ring_table, flow_ring_table_sz); for (idx = 0; idx < num_flow_rings; idx++) { flow_ring_table[idx].status = FLOW_RING_STATUS_CLOSED; flow_ring_table[idx].flowid = (uint16)idx; flow_ring_table[idx].lock = dhd_os_spin_lock_init(dhdp->osh); if (flow_ring_table[idx].lock == NULL) { DHD_ERROR(("%s: Failed to init spinlock for queue!\n", __FUNCTION__)); goto fail; } dll_init(&flow_ring_table[idx].list); /* Initialize the per flow ring backup queue */ dhd_flow_queue_init(dhdp, &flow_ring_table[idx].queue, FLOW_RING_QUEUE_THRESHOLD); } /* Allocate per interface hash table */ if_flow_lkup_sz = sizeof(if_flow_lkup_t) * DHD_MAX_IFS; if_flow_lkup = (if_flow_lkup_t *)DHD_OS_PREALLOC(dhdp, DHD_PREALLOC_IF_FLOW_LKUP, if_flow_lkup_sz); if (if_flow_lkup == NULL) { DHD_ERROR(("%s: if flow lkup alloc failure\n", __FUNCTION__)); goto fail; } /* Initialize per interface hash table */ bzero((uchar *)if_flow_lkup, if_flow_lkup_sz); for (idx = 0; idx < DHD_MAX_IFS; idx++) { int hash_ix; if_flow_lkup[idx].status = 0; if_flow_lkup[idx].role = 0; for (hash_ix = 0; hash_ix < DHD_FLOWRING_HASH_SIZE; hash_ix++) if_flow_lkup[idx].fl_hash[hash_ix] = NULL; } #ifdef PCIE_TX_DEFERRAL count = BITS_TO_LONGS(num_flow_rings); dhdp->bus->delete_flow_map = kzalloc(count, GFP_ATOMIC); if (!dhdp->bus->delete_flow_map) { DHD_ERROR(("%s: delete_flow_map alloc failure\n", __FUNCTION__)); goto fail; } #endif lock = dhd_os_spin_lock_init(dhdp->osh); if (lock == NULL) goto fail; dhdp->flow_prio_map_type = DHD_FLOW_PRIO_AC_MAP; bcopy(prio2ac, dhdp->flow_prio_map, sizeof(uint8) * NUMPRIO); /* Now populate into dhd pub */ DHD_FLOWID_LOCK(lock, flags); dhdp->num_flow_rings = num_flow_rings; dhdp->flowid_allocator = (void *)flowid_allocator; dhdp->flow_ring_table = (void *)flow_ring_table; dhdp->if_flow_lkup = (void *)if_flow_lkup; dhdp->flowid_lock = lock; DHD_FLOWID_UNLOCK(lock, flags); DHD_INFO(("%s done\n", __FUNCTION__)); return BCME_OK; fail: #ifdef PCIE_TX_DEFERRAL if (dhdp->bus->delete_flow_map) kfree(dhdp->bus->delete_flow_map); #endif /* Destruct the per interface flow lkup table */ if (dhdp->if_flow_lkup != NULL) { DHD_OS_PREFREE(dhdp, if_flow_lkup, if_flow_lkup_sz); } if (flow_ring_table != NULL) { for (idx = 0; idx < num_flow_rings; idx++) { if (flow_ring_table[idx].lock != NULL) dhd_os_spin_lock_deinit(dhdp->osh, flow_ring_table[idx].lock); } MFREE(dhdp->osh, flow_ring_table, flow_ring_table_sz); } id16_map_fini(dhdp->osh, flowid_allocator); return BCME_NOMEM; }
#include <xen/config.h> #include <xen/init.h> #include <xen/mm.h> #include <xen/bitops.h> /* Parameters for PFN/MADDR compression. */ unsigned long __read_mostly max_pdx; unsigned long __read_mostly pfn_pdx_bottom_mask = ~0UL; unsigned long __read_mostly ma_va_bottom_mask = ~0UL; unsigned long __read_mostly pfn_top_mask = 0; unsigned long __read_mostly ma_top_mask = 0; unsigned long __read_mostly pfn_hole_mask = 0; unsigned int __read_mostly pfn_pdx_hole_shift = 0; unsigned long __read_mostly pdx_group_valid[BITS_TO_LONGS( (FRAMETABLE_NR + PDX_GROUP_COUNT - 1) / PDX_GROUP_COUNT)] = { [0] = 1 }; int __mfn_valid(unsigned long mfn) { return likely(mfn < max_page) && likely(!(mfn & pfn_hole_mask)) && likely(test_bit(pfn_to_pdx(mfn) / PDX_GROUP_COUNT, pdx_group_valid)); } /* Sets all bits from the most-significant 1-bit down to the LSB */ static u64 __init fill_mask(u64 mask) { while (mask & (mask + 1)) mask |= mask + 1; return mask;
.sig = 0, }; /* * Variables exported for vt.c */ int shift_state = 0; /* * Internal Data. */ static struct input_handler kbd_handler; static DEFINE_SPINLOCK(kbd_event_lock); static unsigned long key_down[BITS_TO_LONGS(KEY_CNT)]; /* keyboard key bitmap */ static unsigned char shift_down[NR_SHIFT]; /* shift state counters.. */ static bool dead_key_next; static int npadch = -1; /* -1 or number assembled on pad */ static unsigned int diacr; static char rep; /* flag telling character repeat */ static unsigned char ledstate = 0xff; /* undefined */ static unsigned char ledioctl; static struct ledptr { unsigned int *addr; unsigned int mask; unsigned char valid:1; } ledptrs[3];
static s64 __init test_rhashtable(struct rhashtable *ht, struct test_obj *array, unsigned int entries) { struct test_obj *obj; int err; unsigned int i, insert_retries = 0; s64 start, end; /* * Insertion Test: * Insert entries into table with all keys even numbers */ pr_info(" Adding %d keys\n", entries); start = ktime_get_ns(); for (i = 0; i < entries; i++) { struct test_obj *obj = &array[i]; obj->value.id = i * 2; err = insert_retry(ht, obj, test_rht_params); if (err > 0) insert_retries += err; else if (err) return err; } if (insert_retries) pr_info(" %u insertions retried due to memory pressure\n", insert_retries); test_bucket_stats(ht, entries); rcu_read_lock(); test_rht_lookup(ht, array, entries); rcu_read_unlock(); test_bucket_stats(ht, entries); pr_info(" Deleting %d keys\n", entries); for (i = 0; i < entries; i++) { struct test_obj_val key = { .id = i * 2, }; if (array[i].value.id != TEST_INSERT_FAIL) { obj = rhashtable_lookup_fast(ht, &key, test_rht_params); BUG_ON(!obj); rhashtable_remove_fast(ht, &obj->node, test_rht_params); } cond_resched(); } end = ktime_get_ns(); pr_info(" Duration of test: %lld ns\n", end - start); return end - start; } static struct rhashtable ht; static struct rhltable rhlt; static int __init test_rhltable(unsigned int entries) { struct test_obj_rhl *rhl_test_objects; unsigned long *obj_in_table; unsigned int i, j, k; int ret, err; if (entries == 0) entries = 1; rhl_test_objects = vzalloc(sizeof(*rhl_test_objects) * entries); if (!rhl_test_objects) return -ENOMEM; ret = -ENOMEM; obj_in_table = vzalloc(BITS_TO_LONGS(entries) * sizeof(unsigned long)); if (!obj_in_table) goto out_free; /* nulls_base not supported in rhlist interface */ test_rht_params.nulls_base = 0; err = rhltable_init(&rhlt, &test_rht_params); if (WARN_ON(err)) goto out_free; k = prandom_u32(); ret = 0; for (i = 0; i < entries; i++) { rhl_test_objects[i].value.id = k; err = rhltable_insert(&rhlt, &rhl_test_objects[i].list_node, test_rht_params); if (WARN(err, "error %d on element %d\n", err, i)) break; if (err == 0) set_bit(i, obj_in_table); } if (err) ret = err; pr_info("test %d add/delete pairs into rhlist\n", entries); for (i = 0; i < entries; i++) { struct rhlist_head *h, *pos; struct test_obj_rhl *obj; struct test_obj_val key = { .id = k, }; bool found; rcu_read_lock(); h = rhltable_lookup(&rhlt, &key, test_rht_params); if (WARN(!h, "key not found during iteration %d of %d", i, entries)) { rcu_read_unlock(); break; } if (i) { j = i - 1; rhl_for_each_entry_rcu(obj, pos, h, list_node) { if (WARN(pos == &rhl_test_objects[j].list_node, "old element found, should be gone")) break; } } cond_resched_rcu(); found = false; rhl_for_each_entry_rcu(obj, pos, h, list_node) { if (pos == &rhl_test_objects[i].list_node) { found = true; break; } } rcu_read_unlock(); if (WARN(!found, "element %d not found", i)) break; err = rhltable_remove(&rhlt, &rhl_test_objects[i].list_node, test_rht_params); WARN(err, "rhltable_remove: err %d for iteration %d\n", err, i); if (err == 0) clear_bit(i, obj_in_table); } if (ret == 0 && err) ret = err; for (i = 0; i < entries; i++) { WARN(test_bit(i, obj_in_table), "elem %d allegedly still present", i); err = rhltable_insert(&rhlt, &rhl_test_objects[i].list_node, test_rht_params); if (WARN(err, "error %d on element %d\n", err, i)) break; if (err == 0) set_bit(i, obj_in_table); } pr_info("test %d random rhlist add/delete operations\n", entries); for (j = 0; j < entries; j++) { u32 i = prandom_u32_max(entries); u32 prand = prandom_u32(); cond_resched(); if (prand == 0) prand = prandom_u32(); if (prand & 1) { prand >>= 1; continue; } err = rhltable_remove(&rhlt, &rhl_test_objects[i].list_node, test_rht_params); if (test_bit(i, obj_in_table)) { clear_bit(i, obj_in_table); if (WARN(err, "cannot remove element at slot %d", i)) continue; } else { if (WARN(err != -ENOENT, "removed non-existant element %d, error %d not %d", i, err, -ENOENT)) continue; } if (prand & 1) { prand >>= 1; continue; } err = rhltable_insert(&rhlt, &rhl_test_objects[i].list_node, test_rht_params); if (err == 0) { if (WARN(test_and_set_bit(i, obj_in_table), "succeeded to insert same object %d", i)) continue; } else { if (WARN(!test_bit(i, obj_in_table), "failed to insert object %d", i)) continue; } if (prand & 1) { prand >>= 1; continue; }
*/ /* * Based on (mostly copied from) plat-orion based Linux 2.6 kernel driver. * Removed orion_gpiochip struct and kernel level irq handling. * * Dieter Kiermaier [email protected] */ #include <common.h> #include <asm/bitops.h> #include <asm/io.h> #include <asm/arch/kirkwood.h> #include <asm/arch/gpio.h> static unsigned long gpio_valid_input[BITS_TO_LONGS(GPIO_MAX)]; static unsigned long gpio_valid_output[BITS_TO_LONGS(GPIO_MAX)]; void __set_direction(unsigned pin, int input) { u32 u; u = readl(GPIO_IO_CONF(pin)); if (input) u |= 1 << (pin & 31); else u &= ~(1 << (pin & 31)); writel(u, GPIO_IO_CONF(pin)); u = readl(GPIO_IO_CONF(pin)); }
int main(int argc, char* argv[]) { bcm_host_init(); // get an EGL display connection display = eglGetDisplay(EGL_DEFAULT_DISPLAY); if (!display) { printf("Failed to get display\n"); return 1; } // initialize the EGL display connection //EGLBoolean result; if (!eglInitialize(display, NULL, NULL)) { printf("Failed to initialize EGL\n"); return 1; } // get an appropriate EGL frame buffer configuration static const EGLint attributeList[] = { EGL_RED_SIZE, 8, EGL_GREEN_SIZE, 8, EGL_BLUE_SIZE, 8, EGL_ALPHA_SIZE, 8, EGL_SURFACE_TYPE, EGL_WINDOW_BIT, EGL_NONE }; EGLConfig config; EGLint numConfig; if (!eglChooseConfig(display, attributeList, &config, 1, &numConfig)) { printf("Failed to choose EGL config\n"); return 1; } // get an appropriate EGL frame buffer configuration if (!eglBindAPI(EGL_OPENGL_ES_API)) { printf("Failed to bind OpenGL ES API\n"); return 1; } // create an EGL rendering context static const EGLint contextAttributes[] = { EGL_CONTEXT_CLIENT_VERSION, 2, EGL_NONE }; context = eglCreateContext(display, config, EGL_NO_CONTEXT, contextAttributes); if (context == EGL_NO_CONTEXT) { printf("Failed to create EGL context\n"); return 1; } // create an EGL window surface uint32_t screenWidth; uint32_t screenHeight; int32_t success = graphics_get_display_size(0, &screenWidth, &screenHeight); if (success == -1) { printf("Failed to get display size\n"); return 1; } VC_RECT_T dstRect; dstRect.x = 0; dstRect.y = 0; dstRect.width = screenWidth; dstRect.height = screenHeight; VC_RECT_T srcRect; srcRect.x = 0; srcRect.y = 0; srcRect.width = screenWidth; srcRect.height = screenHeight; DISPMANX_DISPLAY_HANDLE_T dispmanDisplay = vc_dispmanx_display_open(0); DISPMANX_UPDATE_HANDLE_T dispmanUpdate = vc_dispmanx_update_start(0); DISPMANX_ELEMENT_HANDLE_T dispmanElement = vc_dispmanx_element_add(dispmanUpdate, dispmanDisplay, 0, &dstRect, 0, &srcRect, DISPMANX_PROTECTION_NONE, 0, 0, DISPMANX_NO_ROTATE); static EGL_DISPMANX_WINDOW_T nativewindow; nativewindow.element = dispmanElement; nativewindow.width = screenWidth; nativewindow.height = screenHeight; vc_dispmanx_update_submit_sync(dispmanUpdate); surface = eglCreateWindowSurface(display, config, &nativewindow, NULL); if (surface == EGL_NO_SURFACE) { printf("Failed to create EGL window surface\n"); return 1; } // connect the context to the surface if (!eglMakeCurrent(display, surface, surface, context)) { printf("Failed to set current EGL context\n"); return 1; } // input struct InputDeviceRPI { enum DeviceClass { CLASS_KEYBOARD = 1, CLASS_MOUSE = 2, CLASS_TOUCHPAD = 4, CLASS_GAMEPAD = 8 }; uint32_t deviceClass = 0; int fd = 0; }; int maxFd = 0; std::vector<InputDeviceRPI> inputDevices; uint32_t mouseX = 0; uint32_t mouseY = 0; char TEMP[256]; glob_t g; int result = glob("/dev/input/event*", GLOB_NOSORT, NULL, &g); if (result == GLOB_NOMATCH) { printf("No event devices found\n"); return 1; } else if (result) { printf("Could not read /dev/input/event*\n"); return 1; } for (size_t i = 0; i < g.gl_pathc; i++) { InputDeviceRPI inputDevice; inputDevice.fd = open(g.gl_pathv[i], O_RDONLY); if (inputDevice.fd == -1) { printf("Failed to open device file descriptor\n"); continue; } if (ioctl(inputDevice.fd, EVIOCGRAB, (void *)1) == -1) { printf("Failed to get grab device\n"); } memset(TEMP, 0, sizeof(TEMP)); if (ioctl(inputDevice.fd, EVIOCGNAME(sizeof(TEMP) - 1), TEMP) == -1) { printf("Failed to get device name\n"); } else { printf("Got device: %s\n", TEMP); } unsigned long eventBits[BITS_TO_LONGS(EV_CNT)]; unsigned long absBits[BITS_TO_LONGS(ABS_CNT)]; unsigned long relBits[BITS_TO_LONGS(REL_CNT)]; unsigned long keyBits[BITS_TO_LONGS(KEY_CNT)]; if (ioctl(inputDevice.fd, EVIOCGBIT(0, sizeof(eventBits)), eventBits) == -1 || ioctl(inputDevice.fd, EVIOCGBIT(EV_ABS, sizeof(absBits)), absBits) == -1 || ioctl(inputDevice.fd, EVIOCGBIT(EV_REL, sizeof(relBits)), relBits) == -1 || ioctl(inputDevice.fd, EVIOCGBIT(EV_KEY, sizeof(keyBits)), keyBits) == -1) { printf("Failed to get device event bits\n"); } if (bit_is_set(eventBits, EV_KEY) && ( bit_is_set(keyBits, KEY_1) || bit_is_set(keyBits, KEY_2) || bit_is_set(keyBits, KEY_3) || bit_is_set(keyBits, KEY_4) || bit_is_set(keyBits, KEY_5) || bit_is_set(keyBits, KEY_6) || bit_is_set(keyBits, KEY_7) || bit_is_set(keyBits, KEY_8) || bit_is_set(keyBits, KEY_9) || bit_is_set(keyBits, KEY_0) )) { printf("Keyboard\n"); inputDevice.deviceClass = InputDeviceRPI::CLASS_KEYBOARD; } if (bit_is_set(eventBits, EV_ABS) && bit_is_set(absBits, ABS_X) && bit_is_set(absBits, ABS_Y)) { if (bit_is_set(keyBits, BTN_STYLUS) || bit_is_set(keyBits, BTN_TOOL_PEN)) { printf("Tablet\n"); inputDevice.deviceClass |= InputDeviceRPI::CLASS_TOUCHPAD; } else if (bit_is_set(keyBits, BTN_TOOL_FINGER) && !bit_is_set(keyBits, BTN_TOOL_PEN)) { printf("Touchpad\n"); inputDevice.deviceClass |= InputDeviceRPI::CLASS_TOUCHPAD; } else if (bit_is_set(keyBits, BTN_MOUSE)) { printf("Mouse\n"); inputDevice.deviceClass |= InputDeviceRPI::CLASS_MOUSE; } else if (bit_is_set(keyBits, BTN_TOUCH)) { printf("Touchscreen\n"); inputDevice.deviceClass |= InputDeviceRPI::CLASS_TOUCHPAD; } } else if (bit_is_set(eventBits, EV_REL) && bit_is_set(relBits, REL_X) && bit_is_set(relBits, REL_Y)) { if (bit_is_set(keyBits, BTN_MOUSE)) { printf("Mouse\n"); inputDevice.deviceClass |= InputDeviceRPI::CLASS_MOUSE; } } if (bit_is_set(keyBits, BTN_JOYSTICK)) { printf("Joystick\n"); inputDevice.deviceClass = InputDeviceRPI::CLASS_GAMEPAD; } if (bit_is_set(keyBits, BTN_GAMEPAD)) { printf("Gamepad\n"); inputDevice.deviceClass = InputDeviceRPI::CLASS_GAMEPAD; } if (inputDevice.fd > maxFd) { maxFd = inputDevice.fd; } inputDevices.push_back(inputDevice); } globfree(&g); fd_set rfds; struct timeval tv; for(;;) { FD_ZERO(&rfds); for (const InputDeviceRPI& inputDevice : inputDevices) { FD_SET(inputDevice.fd, &rfds); } tv.tv_sec = 0; tv.tv_usec = 0; int retval = select(maxFd + 1, &rfds, NULL, NULL, &tv); if (retval == -1) { printf("Select failed\n"); return 1; } else if (retval > 0) { for (const InputDeviceRPI& inputDevice : inputDevices) { if (FD_ISSET(inputDevice.fd, &rfds)) { ssize_t bytesRead = read(inputDevice.fd, TEMP, sizeof(TEMP)); if (bytesRead == -1) { printf("Failed to read input"); } printf("Got input, read %d bytes\n", bytesRead); for (ssize_t i = 0; i < bytesRead - static_cast<ssize_t>(sizeof(input_event)) + 1; i += sizeof(input_event)) { input_event* event = reinterpret_cast<input_event*>(TEMP + i); if (inputDevice.deviceClass & InputDeviceRPI::CLASS_KEYBOARD) { printf("Timestamp: %d.%d, type: %d", (uint32_t)event->time.tv_sec, (uint32_t)event->time.tv_usec, event->type); switch (event->type) { case EV_SYN: printf(", EV_SYN"); break; case EV_KEY: printf(", EV_KEY"); break; case EV_MSC: printf(", EV_MSC"); break; case EV_REP: printf(", EV_REP"); break; } printf(", value: %d, key: %d\n", event->value, event->code); if (event->type == EV_KEY && event->code == KEY_ESC) { return 0; } } if (inputDevice.deviceClass & InputDeviceRPI::CLASS_MOUSE) { printf("Timestamp: %d.%d, type: %d", (uint32_t)event->time.tv_sec, (uint32_t)event->time.tv_usec, event->type); switch (event->type) { case EV_SYN: printf(", EV_SYN"); break; case EV_KEY: printf(", EV_KEY"); break; case EV_MSC: printf(", EV_MSC"); break; case EV_REL: printf(", EV_REL"); break; } printf(", value: %d, key: %d\n", event->value, event->code); } } } } } glClearColor(1.0, 0.0, 0.0, 1.0); glClear(GL_COLOR_BUFFER_BIT); glFlush(); eglSwapBuffers(display, surface); } for (const InputDeviceRPI& inputDevice : inputDevices) { if (ioctl(inputDevice.fd, EVIOCGRAB, (void*)0) == -1) { printf("Failed to release device\n"); } if (close(inputDevice.fd) == -1) { printf("Failed to close file descriptor\n"); } } if (!eglDestroySurface(display, surface)) { printf("Failed to destroy EGL surface\n"); } if (!eglDestroyContext(display, context)) { printf("Failed to destroy EGL context\n"); } if (!eglTerminate(display)) { printf("Failed to terminate EGL\n"); } bcm_host_deinit(); return 0; }
/* * cpu_bit_bitmap[] is a special, "compressed" data structure that * represents all NR_CPUS bits binary values of 1<<nr. * * It is used by cpumask_of() to get a constant address to a CPU * mask value that has a single bit set only. */ /* cpu_bit_bitmap[0] is empty - so we can back into it */ #define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x)) #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1) #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2) #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4) const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = { MASK_DECLARE_8(0), MASK_DECLARE_8(8), MASK_DECLARE_8(16), MASK_DECLARE_8(24), #if BITS_PER_LONG > 32 MASK_DECLARE_8(32), MASK_DECLARE_8(40), MASK_DECLARE_8(48), MASK_DECLARE_8(56), #endif }; EXPORT_SYMBOL_GPL(cpu_bit_bitmap); const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL; EXPORT_SYMBOL(cpu_all_bits); #ifdef CONFIG_INIT_ALL_POSSIBLE static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly
case KVM_PPC_RTAS_DEFINE_TOKEN: { struct kvm *kvm = filp->private_data; r = kvm_vm_ioctl_rtas_define_token(kvm, argp); break; } #endif /* CONFIG_PPC_BOOK3S_64 */ default: r = -ENOTTY; } out: return r; } static unsigned long lpid_inuse[BITS_TO_LONGS(KVMPPC_NR_LPIDS)]; static unsigned long nr_lpids; long kvmppc_alloc_lpid(void) { long lpid; do { lpid = find_first_zero_bit(lpid_inuse, KVMPPC_NR_LPIDS); if (lpid >= nr_lpids) { pr_err("%s: No LPIDs free\n", __func__); return -ENOMEM; } } while (test_and_set_bit(lpid, lpid_inuse)); return lpid;