static void qinit_common(struct queue *q) { spinlock_init_irqsave(&q->lock); qlock_init(&q->rlock); qlock_init(&q->wlock); rendez_init(&q->rr); rendez_init(&q->wr); }
static void perfmon_counters_env_init(void) { for (int i = 0; i < num_cores; i++) { struct perfmon_cpu_context *cctx = _PERCPU_VARPTR(counters_env, i); spinlock_init_irqsave(&cctx->lock); } }
/* Arch-independent per-cpu initialization. This will call the arch dependent * init first. */ void smp_percpu_init(void) { uint32_t coreid = core_id(); struct per_cpu_info *pcpui = &per_cpu_info[coreid]; void *trace_buf; struct kthread *kthread; /* Don't initialize __ctx_depth here, since it is already 1 (at least on * x86), since this runs in irq context. */ /* Do this first */ __arch_pcpu_init(coreid); /* init our kthread (tracks our currently running context) */ kthread = __kthread_zalloc(); kthread->stacktop = get_stack_top(); /* assumes we're on the 1st page */ pcpui->cur_kthread = kthread; /* Treat the startup threads as ktasks. This will last until smp_idle when * they clear it, either in anticipation of being a user-backing kthread or * to handle an RKM. */ kthread->flags = KTH_KTASK_FLAGS; per_cpu_info[coreid].spare = 0; /* Init relevant lists */ spinlock_init_irqsave(&per_cpu_info[coreid].immed_amsg_lock); STAILQ_INIT(&per_cpu_info[coreid].immed_amsgs); spinlock_init_irqsave(&per_cpu_info[coreid].routine_amsg_lock); STAILQ_INIT(&per_cpu_info[coreid].routine_amsgs); /* Initialize the per-core timer chain */ init_timer_chain(&per_cpu_info[coreid].tchain, set_pcpu_alarm_interrupt); #ifdef CONFIG_KTHREAD_POISON *kstack_bottom_addr(kthread->stacktop) = 0xdeadbeef; #endif /* CONFIG_KTHREAD_POISON */ /* Init generic tracing ring */ trace_buf = kpage_alloc_addr(); assert(trace_buf); trace_ring_init(&pcpui->traces, trace_buf, PGSIZE, sizeof(struct pcpu_trace_event)); for (int i = 0; i < NR_CPU_STATES; i++) pcpui->state_ticks[i] = 0; pcpui->last_tick_cnt = read_tsc(); /* Core 0 is in the KERNEL state, called from smp_boot. The other cores are * too, at least on x86, where we were called from asm (woken by POKE). */ pcpui->cpu_state = CPU_STATE_KERNEL; /* Enable full lock debugging, after all pcpui work is done */ pcpui->__lock_checking_enabled = 1; }
int percpu_counter_init(struct percpu_counter *fbc, int64_t amount, gfp_t gfp) { unsigned long flags __maybe_unused; spinlock_init_irqsave(&fbc->lock); fbc->count = amount; fbc->counters = alloc_percpu_gfp(int32_t, gfp); if (!fbc->counters) return -ENOMEM; return 0; }
struct u16_pool *create_u16_pool(unsigned int size) { struct u16_pool *id; /* We could have size be a u16, but this might catch bugs where users * tried to ask for more than 2^16 and had it succeed. */ if (size > MAX_U16_POOL_SZ) return NULL; /* ids and check are alloced and aligned right after the id struct */ id = kmalloc(sizeof(*id) + sizeof(uint16_t) * size + size, MEM_WAIT); spinlock_init_irqsave(&id->lock); id->size = size; id->ids = (void *)&id[1]; id->check = (void *)&id->ids[id->size]; for (int i = 0; i < id->size; i++) { id->ids[i] = i; // fe rhymes with "free" id->check[i] = 0xfe; } id->tos = 0; return id; }
static void *mlx4_en_add(struct mlx4_dev *dev) { struct mlx4_en_dev *mdev; int i; printk_once(KERN_INFO "%s", mlx4_en_version); mdev = kzmalloc(sizeof(*mdev), KMALLOC_WAIT); if (!mdev) goto err_free_res; if (mlx4_pd_alloc(dev, &mdev->priv_pdn)) goto err_free_dev; if (mlx4_uar_alloc(dev, &mdev->priv_uar)) goto err_pd; mdev->uar_map = ioremap((phys_addr_t) mdev->priv_uar.pfn << PAGE_SHIFT, PAGE_SIZE); if (!mdev->uar_map) goto err_uar; spinlock_init_irqsave(&mdev->uar_lock); mdev->dev = dev; #if 0 // AKAROS_PORT mdev->dma_device = &dev->persist->pdev->dev; #else mdev->dma_device = 0; #endif mdev->pdev = dev->persist->pdev; mdev->device_up = false; mdev->LSO_support = !!(dev->caps.flags & (1 << 15)); if (!mdev->LSO_support) mlx4_warn(mdev, "LSO not supported, please upgrade to later FW version to enable LSO\n"); if (mlx4_mr_alloc(mdev->dev, mdev->priv_pdn, 0, ~0ull, MLX4_PERM_LOCAL_WRITE | MLX4_PERM_LOCAL_READ, 0, 0, &mdev->mr)) { mlx4_err(mdev, "Failed allocating memory region\n"); goto err_map; } if (mlx4_mr_enable(mdev->dev, &mdev->mr)) { mlx4_err(mdev, "Failed enabling memory region\n"); goto err_mr; } /* Build device profile according to supplied module parameters */ if (mlx4_en_get_profile(mdev)) { mlx4_err(mdev, "Bad module parameters, aborting\n"); goto err_mr; } /* Configure which ports to start according to module parameters */ mdev->port_cnt = 0; mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) mdev->port_cnt++; #if 0 // AKAROS_PORT /* Initialize time stamp mechanism */ if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) mlx4_en_init_timestamp(mdev); #endif /* Set default number of RX rings*/ mlx4_en_set_num_rx_rings(mdev); /* Create our own workqueue for reset/multicast tasks * Note: we cannot use the shared workqueue because of deadlocks caused * by the rtnl lock */ mdev->workqueue = create_singlethread_workqueue("mlx4_en"); if (!mdev->workqueue) goto err_mr; /* At this stage all non-port specific tasks are complete: * mark the card state as up */ qlock_init(&mdev->state_lock); mdev->device_up = true; /* Setup ports */ #if 0 // AKAROS_PORT /* Create a netdev for each port */ mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) { mlx4_info(mdev, "Activating port:%d\n", i); if (mlx4_en_init_netdev(mdev, i, &mdev->profile.prof[i])) mdev->pndev[i] = NULL; }