/* * Report the number of pinned/un-pinned breakpoints we have in * a given cpu (cpu > -1) or in all of them (cpu = -1). */ static void fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp) { int cpu = bp->cpu; struct task_struct *tsk = bp->ctx->task; if (cpu >= 0) { slots->pinned = per_cpu(nr_cpu_bp_pinned, cpu); if (!tsk) slots->pinned += max_task_bp_pinned(cpu); else slots->pinned += task_bp_pinned(tsk); slots->flexible = per_cpu(nr_bp_flexible, cpu); return; } for_each_online_cpu(cpu) { unsigned int nr; nr = per_cpu(nr_cpu_bp_pinned, cpu); if (!tsk) nr += max_task_bp_pinned(cpu); else nr += task_bp_pinned(tsk); if (nr > slots->pinned) slots->pinned = nr; nr = per_cpu(nr_bp_flexible, cpu); if (nr > slots->flexible) slots->flexible = nr; } }
/* * Add a pinned breakpoint for the given task in our constraint table */ static void toggle_bp_task_slot(struct task_struct *tsk, int cpu, bool enable) { unsigned int *tsk_pinned; int count = 0; count = task_bp_pinned(tsk); tsk_pinned = per_cpu(nr_task_bp_pinned, cpu); if (enable) { tsk_pinned[count]++; if (count > 0) tsk_pinned[count-1]--; } else { tsk_pinned[count]--; if (count > 0) tsk_pinned[count-1]++; } }
/* * Add a pinned breakpoint for the given task in our constraint table */ static void toggle_bp_task_slot(struct task_struct *tsk, int cpu, bool enable, enum bp_type_idx type, int weight) { unsigned int *tsk_pinned; int old_count = 0; int old_idx = 0; int idx = 0; old_count = task_bp_pinned(tsk, type); old_idx = old_count - 1; idx = old_idx + weight; tsk_pinned = per_cpu(nr_task_bp_pinned[type], cpu); if (enable) { tsk_pinned[idx]++; if (old_count > 0) tsk_pinned[old_idx]--; } else { tsk_pinned[idx]--; if (old_count > 0) tsk_pinned[old_idx]++; } }