/* * fixup_init is called when: * - an active object is initialized */ static int rcuhead_fixup_init(void *addr, enum debug_obj_state state) { struct rcu_head *head = addr; switch (state) { case ODEBUG_STATE_ACTIVE: /* * Ensure that queued callbacks are all executed. * If we detect that we are nested in a RCU read-side critical * section, we should simply fail, otherwise we would deadlock. <<<<<<< HEAD * In !PREEMPT configurations, there is no way to tell if we are * in a RCU read-side critical section or not, so we never * attempt any fixup and just print a warning. */ #ifndef CONFIG_PREEMPT WARN_ON_ONCE(1); return 0; #endif if (rcu_preempt_depth() != 0 || preempt_count() != 0 || irqs_disabled()) { WARN_ON_ONCE(1); ======= */ if (rcu_preempt_depth() != 0 || preempt_count() != 0 || irqs_disabled()) { WARN_ON(1); >>>>>>> 296c66da8a02d52243f45b80521febece5ed498a return 0;
/* * fixup_free is called when: * - an active object is freed */ static int rcuhead_fixup_free(void *addr, enum debug_obj_state state) { struct rcu_head *head = addr; switch (state) { case ODEBUG_STATE_ACTIVE: /* * Ensure that queued callbacks are all executed. * If we detect that we are nested in a RCU read-side critical * section, we should simply fail, otherwise we would deadlock. */ #ifndef CONFIG_PREEMPT WARN_ON(1); return 0; #else if (rcu_preempt_depth() != 0 || preempt_count() != 0 || irqs_disabled()) { WARN_ON(1); return 0; } rcu_barrier(); rcu_barrier_sched(); rcu_barrier_bh(); debug_object_free(head, &rcuhead_debug_descr); return 1; #endif default: return 0; } }
/* * fixup_activate is called when: * - an active object is activated * - an unknown object is activated (might be a statically initialized object) * Activation is performed internally by call_rcu(). */ static int rcuhead_fixup_activate(void *addr, enum debug_obj_state state) { struct rcu_head *head = addr; switch (state) { case ODEBUG_STATE_NOTAVAILABLE: /* * This is not really a fixup. We just make sure that it is * tracked in the object tracker. */ debug_object_init(head, &rcuhead_debug_descr); debug_object_activate(head, &rcuhead_debug_descr); return 0; case ODEBUG_STATE_ACTIVE: /* * Ensure that queued callbacks are all executed. * If we detect that we are nested in a RCU read-side critical * section, we should simply fail, otherwise we would deadlock. */ if (rcu_preempt_depth() != 0 || preempt_count() != 0 || irqs_disabled()) { WARN_ON(1); return 0; } rcu_barrier(); rcu_barrier_sched(); rcu_barrier_bh(); debug_object_activate(head, &rcuhead_debug_descr); return 1; default: return 0; } }
/* * fixup_free is called when: * - an active object is freed */ static int rcuhead_fixup_free(void *addr, enum debug_obj_state state) { struct rcu_head *head = addr; switch (state) { case ODEBUG_STATE_ACTIVE: /* * Ensure that queued callbacks are all executed. * If we detect that we are nested in a RCU read-side critical * section, we should simply fail, otherwise we would deadlock. * In !PREEMPT configurations, there is no way to tell if we are * in a RCU read-side critical section or not, so we never * attempt any fixup and just print a warning. */ #ifndef CONFIG_PREEMPT WARN_ON_ONCE(1); return 0; #endif if (rcu_preempt_depth() != 0 || preempt_count() != 0 || irqs_disabled()) { WARN_ON_ONCE(1); return 0; } rcu_barrier(); rcu_barrier_sched(); rcu_barrier_bh(); debug_object_free(head, &rcuhead_debug_descr); return 1; default: return 0; } }