/* ============================== L_CollideMapForEachInSpace ============================== */ void L_CollideMapForEachInSpace( ati_t *owner, move_volume_t *self, void (*action_func())(ati_t *caller, ati_t *collide_with) ) { int i; vec3d_t min, max; vec3d_t min1, max1; vec3d_t min2, max2; move_volume_t *other_move; ati_t *other_owner; L_MoveVolumeGetFromBoundBox( self, min1, max1 ); L_MoveVolumeGetToBoundBox( self, min, max ); Vec3dAddToBB( min1, max1, min ); Vec3dAddToBB( min1, max1, max ); for ( i = 0; i < l_collidemap_object_num; i++ ) { other_owner = l_collidemap_owners[i]; other_move = l_collidemap_moves[i]; if ( other_move == self ) continue; L_MoveVolumeGetFromBoundBox( other_move, min2, max2 ); L_MoveVolumeGetToBoundBox( other_move, min, max ); Vec3dAddToBB( min2, max2, min ); Vec3dAddToBB( min2, max2, max ); if ( Vec3dBBDoIntersect( min1, max1, min2, max2 ) ) { action_func( owner, other_owner ); } } }
void smp_rendezvous_cpus(unsigned long map, void (* action_func)(void *), void *arg) { unsigned int cpumask = 1 << cpu_number(); if (ncpus == 1) { if (action_func != NULL) action_func(arg); return; } /* obtain rendezvous lock */ mtx_enter(&smp_ipi_mtx); /* set static function pointers */ smp_rv_map = map; smp_rv_action_func = action_func; smp_rv_func_arg = arg; smp_rv_waiters[0] = 0; smp_rv_waiters[1] = 0; /* signal other processors, which will enter the IPI with interrupts off */ mips64_multicast_ipi(map & ~cpumask, MIPS64_IPI_RENDEZVOUS); /* Check if the current CPU is in the map */ if (map & cpumask) smp_rendezvous_action(); while (smp_rv_waiters[1] != smp_rv_map) ; /* release lock */ mtx_leave(&smp_ipi_mtx); }
/*Print array elements using function pointer*/ void array_iterator(int *array, int size, void (*action_func)(int)) { int i; i = 0; while(i < size) { action_func(array[i]); i++; } }
/* ============================== L_SwitchMapSelectSwitchAbleWhere_switchid ============================== */ void L_SwitchMapSelectSwitchAbleWhere_switchid( ati_t *self, unique_t switch_id, void (*action_func())(ati_t *caller, ati_t *switch_able ) ) { int i; for ( i = 0; i < L_SWITCHMAP_MAX_OBJECTS; i++ ) { if ( l_switchmap_switchid[i] == switch_id && l_switchmap_owners[i] ) { action_func( self, l_switchmap_owners[i] ); } } }
void smp_rendezvous_cpus(cpumask_t map, void (* setup_func)(void *), void (* action_func)(void *), void (* teardown_func)(void *), void *arg) { int i, ncpus = 0; if (!smp_started) { if (setup_func != NULL) setup_func(arg); if (action_func != NULL) action_func(arg); if (teardown_func != NULL) teardown_func(arg); return; } CPU_FOREACH(i) { if (((1 << i) & map) != 0) ncpus++; } if (ncpus == 0) panic("ncpus is 0 with map=0x%x", map); /* obtain rendezvous lock */ mtx_lock_spin(&smp_ipi_mtx); /* set static function pointers */ smp_rv_ncpus = ncpus; smp_rv_setup_func = setup_func; smp_rv_action_func = action_func; smp_rv_teardown_func = teardown_func; smp_rv_func_arg = arg; smp_rv_waiters[1] = 0; smp_rv_waiters[2] = 0; atomic_store_rel_int(&smp_rv_waiters[0], 0); /* signal other processors, which will enter the IPI with interrupts off */ ipi_selected(map & ~(1 << curcpu), IPI_RENDEZVOUS); /* Check if the current CPU is in the map */ if ((map & (1 << curcpu)) != 0) smp_rendezvous_action(); if (teardown_func == smp_no_rendevous_barrier) while (atomic_load_acq_int(&smp_rv_waiters[2]) < ncpus) cpu_spinwait(); /* release lock */ mtx_unlock_spin(&smp_ipi_mtx); }
void smp_rendezvous(void (*setup_func)(void *), void (*action_func)(void *), void (*teardown_func)(void *), void *arg) { if (setup_func != NULL) setup_func(arg); if (action_func != NULL) action_func(arg); if (teardown_func != NULL) teardown_func(arg); }
void smp_rendezvous(void (*setup_func)(void *), void (*action_func)(void *), void (*teardown_func)(void *), void *arg) { /* Look comments in the smp_rendezvous_cpus() case. */ spinlock_enter(); if (setup_func != NULL) setup_func(arg); if (action_func != NULL) action_func(arg); if (teardown_func != NULL) teardown_func(arg); spinlock_exit(); }
static int umem_size_process(const umem_env_item_t *item, const char *item_arg) { const char *name = item->item_name; void (*action_func)(size_t); size_t result; int ret; if (strcmp(name, "size_clear") == 0) { if (item_arg != NULL) { log_message("%s: %s: does not take a value. ignored\n", CURRENT, name); return (ARG_BAD); } umem_alloc_sizes_clear(); return (ARG_SUCCESS); } else if (strcmp(name, "size_add") == 0) { action_func = umem_alloc_sizes_add; } else if (strcmp(name, "size_remove") == 0) { action_func = umem_alloc_sizes_remove; } else { log_message("%s: %s: internally unrecognized\n", CURRENT, name, name, name); return (ARG_BAD); } if (item_arg == NULL) { log_message("%s: %s: requires a value. ignored\n", CURRENT, name); return (ARG_BAD); } ret = item_size_process(item, item_arg); if (ret != ARG_SUCCESS) return (ret); result = *item->item_size_target; action_func(result); return (ARG_SUCCESS); }
void smp_rendezvous_cpus(cpuset_t map, void (*setup_func)(void *), void (*action_func)(void *), void (*teardown_func)(void *), void *arg) { /* * In the !SMP case we just need to ensure the same initial conditions * as the SMP case. */ spinlock_enter(); if (setup_func != NULL) setup_func(arg); if (action_func != NULL) action_func(arg); if (teardown_func != NULL) teardown_func(arg); spinlock_exit(); }
/* ============================== ShapeDB_SelectShapeWhere_selfid ============================== */ void ShapeDB_SelectShapeWhere_selfid_CB( db_shape_t *db, unique_t self_id, void (*action_func)(shape_t *obj) ) { shape_t tmp; shape_t *shp; if ( self_id == UNIQUE_ALL ) { U_MapForEach( db->by_selfid_map, (void(*)(void*)) action_func ); } else { tmp.self_id = self_id; shp = (shape_t *)U_MapSearch( db->by_selfid_map, &tmp ); if ( !shp ) return; action_func( shp ); } }
/** ** Call the supplied user action function on the highest priority ** events. ** ** @return integer ** ** @retval -1 action function failed on an event ** @retval 0 no events logged ** @retval 1 events logged */ int sfeventq_action(int (*action_func)(void *, void *), void *user) { SF_EVENTQ_NODE *node; int logged = 0; if(!action_func) return -1; if(!(s_eventq.head)) return 0; for(node = s_eventq.head; node; node = node->next) { if(logged >= s_eventq.log_nodes) return 1; if(action_func(node->event, user)) return -1; logged++; } return 1; }
/** ** Call the supplied user action function on the highest priority ** events. ** ** @return integer ** ** @retval -1 action function failed on an event ** @retval 0 no events logged ** @retval 1 events logged */ int sfeventq_action(SF_EVENTQ *eq, int (*action_func)(void *, void *), void *user) { SF_EVENTQ_NODE *node; int logged = 0; if (action_func == NULL) return -1; if (eq->head == NULL) return 0; for (node = eq->head; node != NULL; node = node->next) { if (logged >= eq->log_nodes) return 1; if (action_func(node->event, user)) return -1; logged++; } return 1; }
void smp_rendezvous_cpus(cpuset_t map, void (* setup_func)(void *), void (* action_func)(void *), void (* teardown_func)(void *), void *arg) { int curcpumap, i, ncpus = 0; /* Look comments in the !SMP case. */ if (!smp_started) { spinlock_enter(); if (setup_func != NULL) setup_func(arg); if (action_func != NULL) action_func(arg); if (teardown_func != NULL) teardown_func(arg); spinlock_exit(); return; } CPU_FOREACH(i) { if (CPU_ISSET(i, &map)) ncpus++; } if (ncpus == 0) panic("ncpus is 0 with non-zero map"); mtx_lock_spin(&smp_ipi_mtx); /* Pass rendezvous parameters via global variables. */ smp_rv_ncpus = ncpus; smp_rv_setup_func = setup_func; smp_rv_action_func = action_func; smp_rv_teardown_func = teardown_func; smp_rv_func_arg = arg; smp_rv_waiters[1] = 0; smp_rv_waiters[2] = 0; smp_rv_waiters[3] = 0; atomic_store_rel_int(&smp_rv_waiters[0], 0); /* * Signal other processors, which will enter the IPI with * interrupts off. */ curcpumap = CPU_ISSET(curcpu, &map); CPU_CLR(curcpu, &map); ipi_selected(map, IPI_RENDEZVOUS); /* Check if the current CPU is in the map */ if (curcpumap != 0) smp_rendezvous_action(); /* * Ensure that the master CPU waits for all the other * CPUs to finish the rendezvous, so that smp_rv_* * pseudo-structure and the arg are guaranteed to not * be in use. */ while (atomic_load_acq_int(&smp_rv_waiters[3]) < ncpus) cpu_spinwait(); mtx_unlock_spin(&smp_ipi_mtx); }