static int test_one(bool client_negotiate_unix_fds, bool server_negotiate_unix_fds, bool client_anonymous_auth, bool server_anonymous_auth) { struct context c; pthread_t s; void *p; int r, q; zero(c); assert_se(socketpair(AF_UNIX, SOCK_STREAM, 0, c.fds) >= 0); c.client_negotiate_unix_fds = client_negotiate_unix_fds; c.server_negotiate_unix_fds = server_negotiate_unix_fds; c.client_anonymous_auth = client_anonymous_auth; c.server_anonymous_auth = server_anonymous_auth; r = pthread_create(&s, NULL, server, &c); if (r != 0) return -r; r = client(&c); q = pthread_join(s, &p); if (q != 0) return -q; if (r < 0) return r; if (PTR_TO_INT(p) < 0) return PTR_TO_INT(p); return 0; }
int main(int argc, char *argv[]) { struct context c = {}; pthread_t s; void *p; int r, q; zero(c); c.automatic_integer_property = 4711; assert_se(c.automatic_string_property = strdup("dudeldu")); assert_se(socketpair(AF_UNIX, SOCK_STREAM, 0, c.fds) >= 0); r = pthread_create(&s, NULL, server, &c); if (r != 0) return -r; r = client(&c); q = pthread_join(s, &p); if (q != 0) return -q; if (r < 0) return r; if (PTR_TO_INT(p) < 0) return PTR_TO_INT(p); free(c.something); free(c.automatic_string_property); return EXIT_SUCCESS; }
void __turbulence_loop_discard_broken (TurbulenceCtx * ctx, TurbulenceLoop * loop) { int loop_descriptor; int result = -1; char bytes[4]; /* reset cursor */ axl_list_cursor_first (loop->cursor); while (axl_list_cursor_has_item (loop->cursor)) { /* get loop descriptor */ loop_descriptor = PTR_TO_INT (axl_list_cursor_get (loop->cursor)); /* now add to the waiting socket */ result = recv (loop_descriptor, bytes, 1, MSG_PEEK); if (result == -1 && errno == EBADF) { /* failed to add descriptor, close it and remove from wait list */ error ("Discarding descriptor %d because it is broken/invalid (EBADF/%d)", loop_descriptor, errno); /* remove cursor */ axl_list_cursor_remove (loop->cursor); continue; } /* end if */ /* get the next item */ axl_list_cursor_next (loop->cursor); } /* end if */ return; }
void TkAlarmRxThread(void *cookie) { int16 MsgLen = 0; TkMsgBuff *pMsgBuf ; uint32 pathId = PTR_TO_INT(cookie); pMsgBuf = (TkMsgBuff *) TkOamMemGet(pathId); while (1) { if ((MsgLen = sal_msg_rcv(gTagLinkCfg[pathId].almMsgQid, (char *) pMsgBuf, TK_MAX_RX_TX_DATA_LENGTH, WAIT_FOREVER)) < 0) { sal_usleep(1); continue; } else { if (TkDbgLevelIsSet(TkDbgMsgEnable | TkDbgAlmEnable)) { TkDbgPrintf(("\r\nAlmTask received a msg\n")); TkDbgDataDump((uint8 *) pMsgBuf, MsgLen, 16); } if (ieeeAlmProcessFn) ieeeAlmProcessFn(pathId, pMsgBuf->buff, MsgLen); } sal_usleep(1); } TkOamMemPut(pathId,(void *) pMsgBuf); }
static bool match_app_by_id(const void *data, const void *user_data) { const struct health_app *app = data; uint16_t app_id = PTR_TO_INT(user_data); return app->id == app_id; }
STATIC int _bcm_gport_show_bandwidth(int unit, bcm_gport_t port, int numq, uint32 flags, bcm_gport_t sched_gport, void *user_data) { bcm_cos_queue_t cosq; bcm_port_t user_port = PTR_TO_INT(user_data); bcm_port_t local_port = 0; uint32 kbits_sec_min, kbits_sec_max, bw_flags; BCM_IF_ERROR_RETURN( bcm_port_local_get(unit, port, &local_port)); if (user_port != local_port) { return BCM_E_NONE; } for (cosq = 0; cosq < 16; cosq++) { if (bcm_cosq_gport_bandwidth_get(unit, sched_gport, cosq, &kbits_sec_min, &kbits_sec_max, &bw_flags) == 0) { printk(" U %4s | %d | %8d | %8d | %6d\n", BCM_PORT_NAME(unit, local_port), cosq, kbits_sec_min, kbits_sec_max, bw_flags); } } return BCM_E_NONE; }
static int device_monitor_handler(sd_device_monitor *monitor, sd_device *device, void *userdata) { const char *action = NULL, *devpath = NULL, *subsystem = NULL; MonitorNetlinkGroup group = PTR_TO_INT(userdata); struct timespec ts; assert(device); assert(IN_SET(group, MONITOR_GROUP_UDEV, MONITOR_GROUP_KERNEL)); (void) sd_device_get_property_value(device, "ACTION", &action); (void) sd_device_get_devpath(device, &devpath); (void) sd_device_get_subsystem(device, &subsystem); assert_se(clock_gettime(CLOCK_MONOTONIC, &ts) == 0); printf("%-6s[%"PRI_TIME".%06"PRI_NSEC"] %-8s %s (%s)\n", group == MONITOR_GROUP_UDEV ? "UDEV" : "KERNEL", ts.tv_sec, (nsec_t)ts.tv_nsec/1000, action, devpath, subsystem); if (arg_show_property) { const char *key, *value; FOREACH_DEVICE_PROPERTY(device, key, value) printf("%s=%s\n", key, value); printf("\n"); } return 0; }
static int exit_handler(sd_event_source *s, void *userdata) { log_info("got quit handler on %c", PTR_TO_INT(userdata)); got_exit = true; return 3; }
static int signal_handler(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) { sd_event_source *p = NULL; sigset_t ss; pid_t pid; assert_se(s); assert_se(si); log_info("got signal on %c", PTR_TO_INT(userdata)); assert_se(userdata == INT_TO_PTR('e')); assert_se(sigemptyset(&ss) >= 0); assert_se(sigaddset(&ss, SIGCHLD) >= 0); assert_se(sigprocmask(SIG_BLOCK, &ss, NULL) >= 0); pid = fork(); assert_se(pid >= 0); if (pid == 0) _exit(0); assert_se(sd_event_add_child(sd_event_source_get_event(s), &p, pid, WEXITED, child_handler, INT_TO_PTR('f')) >= 0); assert_se(sd_event_source_set_enabled(p, SD_EVENT_ONESHOT) >= 0); sd_event_source_unref(s); return 1; }
/* * Function: * soc_ipoll_thread * Description: * Thread context for interrupt handlers in polled IRQ mode * Parameters: * data - poll delay in usecs (passed as pointer) * Returns: * Nothing */ STATIC void soc_ipoll_thread(void *data) { int dev, spl, udelay; udelay = PTR_TO_INT(data); while (_ihandlers) { spl = sal_splhi(); for (dev = 0; dev < SOC_MAX_NUM_DEVICES; dev++) { if (_ictrl[dev].handler != NULL && !_ictrl[dev].paused) { if (soc_feature(dev, soc_feature_cmicm)) { soc_cmicm_ipoll_handler(dev); } else { soc_cmic_ipoll_handler(dev); } } } sal_spl(spl); if (udelay) { sal_usleep(udelay); } else { sal_thread_yield(); } } sal_thread_exit(0); }
static bool match_mdep_by_id(const void *data, const void *user_data) { const struct mdep_cfg *mdep = data; uint16_t mdep_id = PTR_TO_INT(user_data); return mdep->id == mdep_id; }
void cputrans_rx_pkt_free(bcm_pkt_t *pkt) { int idx; if (pkt == NULL) { LOG_INFO(BSL_LS_TKS_CTPKT, (BSL_META("CT free: Packet NULL\n"))); return; } if (!_rx_setup_done) { LOG_INFO(BSL_LS_TKS_CTPKT, (BSL_META("CT free: Not initialized\n"))); return; } idx = PTR_TO_INT(pkt->cookie2); if (idx >= _rx_lists) { LOG_INFO(BSL_LS_TKS_CTPKT, (BSL_META("CT free: bad CT index: %d > %d\n"), idx, _rx_lists)); return; } CPUTRANS_LOCK; pkt->next = _rx_free_lists[idx]; _rx_free_lists[idx] = pkt; CPUTRANS_UNLOCK; }
void * sal_dma_alloc(size_t sz, char *s) { uint32 *p; /* * Round up size to accommodate corruption detection sentinels. * Place sentinels at the beginning and end of the data area to * detect memory corruption. These are verified on free. */ sz = (sz + 3) & ~3; if ((p = malloc(sz + 12)) == 0) { return p; } assert(INT_TO_PTR(PTR_TO_INT(p)) == p); p[0] = sz / 4; p[1] = 0xaaaaaaaa; p[2 + sz / 4] = 0xbbbbbbbb; #ifdef BROADCOM_DEBUG #ifdef INCLUDE_BCM_SAL_PROFILE SAL_DMA_ALLOC_RESOURCE_USAGE_INCR( _sal_dma_alloc_curr, _sal_dma_alloc_max, (sz), ilock); #endif #endif /* BROADCOM_DEBUG */ MEMLOG_ALLOC("sal_dma_alloc", &p[0], orig_sz, s); return (void *) &p[2]; }
static bool match_mdep_by_role(const void *data, const void *user_data) { const struct mdep_cfg *mdep = data; uint16_t role = PTR_TO_INT(user_data); return mdep->role == role; }
static bool match_channel_by_mdep_id(const void *data, const void *user_data) { const struct health_channel *channel = data; uint16_t mdep_id = PTR_TO_INT(user_data); return channel->mdep_id == mdep_id; }
void __myqttd_conn_mgr_proxy_on_close (MyQttConn * conn, axlPointer _loop) { MyQttdLoop * loop = _loop; /* MyQttdCtx * ctx = myqttd_loop_ctx (loop); */ /* get socket associated */ int _socket = PTR_TO_INT (myqtt_conn_get_data (conn, "myqttd:proxy:fd")); /* msg ("PROXY: closing connection-id=%d, refs=%d, socket=%d", myqtt_conn_get_id (conn), myqtt_conn_ref_count (conn), _socket); */ /* unregister socket from loop watcher */ /* msg ("PROXY: calling to unwatch descriptor from loop _socket=%d (finished watching=%d)", _socket, myqttd_loop_watching (loop)); */ myqttd_loop_unwatch_descriptor (loop, _socket, axl_true); /* msg ("PROXY: calling to unwatch descriptor from loop _socket=%d (finished watching=%d)", _socket, myqttd_loop_watching (loop)); */ /* close socket */ myqtt_close_socket (_socket); /* release and shutdown */ conn->preread_handler = NULL; conn->preread_user_data = NULL; /* reduce reference counting but do it out side the close handler */ myqtt_thread_pool_new_task (CONN_CTX (conn), __myqttd_conn_mgr_release_proxy_conn, conn); return; }
INLINE HCHAR *NameNH(memchr_search)(void *data, HCHAR *haystack, ptrdiff_t haystacklen) { return NameNH(MEMCHR)(haystack, DO_NOT_WARN((NCHAR)(ptrdiff_t) PTR_TO_INT(data)), haystacklen); }
long get_irg_graph_nr(const ir_graph *irg) { #ifdef DEBUG_libfirm return irg->graph_nr; #else return PTR_TO_INT(irg); #endif }
static dbus_bool_t add_watch(DBusWatch *watch, void *data) { EpollData _cleanup_free_ *e = NULL; struct epoll_event ev; assert(watch); e = new0(EpollData, 1); if (!e) return FALSE; e->fd = dbus_watch_get_unix_fd(watch); e->object = watch; e->is_timeout = false; zero(ev); ev.events = bus_flags_to_events(watch); ev.data.ptr = e; if (epoll_ctl(PTR_TO_INT(data), EPOLL_CTL_ADD, e->fd, &ev) < 0) { if (errno != EEXIST) return FALSE; /* Hmm, bloody D-Bus creates multiple watches on the * same fd. epoll() does not like that. As a dirty * hack we simply dup() the fd and hence get a second * one we can safely add to the epoll(). */ e->fd = dup(e->fd); if (e->fd < 0) return FALSE; if (epoll_ctl(PTR_TO_INT(data), EPOLL_CTL_ADD, e->fd, &ev) < 0) { close_nointr_nofail(e->fd); return FALSE; } e->fd_is_dupped = true; } dbus_watch_set_data(watch, e, NULL); e = NULL; /* prevent freeing */ return TRUE; }
long get_entity_nr(const ir_entity *ent) { assert(ent->kind == k_entity); #ifdef DEBUG_libfirm return ent->nr; #else return (long)PTR_TO_INT(ent); #endif }
long get_type_nr(const ir_type *tp) { assert(tp); #ifdef DEBUG_libfirm return tp->nr; #else return (long)PTR_TO_INT(tp); #endif }
/** * @brief Allows to install a new async event represented by the event * handler provided. This async event represents a handler called at * the interval defined by microseconds, optionally refreshing that * period if the event handler returns axl_false. * * The event handler will be called after microseconds provided has * expired. And if the handler returns axl_true (remove) the event * will be cleared and called no more. * * Note that events installed on this function must be tasks that * aren't loops or takes too long to complete. This is because the * thread pool asigns one thread to check and execute pending events, * so, if one of those events delays, the rest won't be executed until * the former finishes. In the case you want to install a loop handler * or some handler that executes a long running code, then use \ref * valvula_thread_pool_new_task. * * @param ctx The ValvulaCtx context where the event will be * installed. This is provided because event handlers are handled by * the valvula thread pool. This parameter can't be NULL. * * @param microseconds The amount of time to wait before calling to * event handler. This value must be > 0. * * @param event_handler The handler to be called after microseconds value has * expired. This parameter can't be NULL. * * @param user_data User defined pointer to data to be passed to the * event handler. * * @param user_data2 Second user defined pointer to data to be passed * to the event handler. * * @return The method returns the event identifier. This identifier * can be used to remove the event by using * valvula_thread_pool_remove_event. The function returns -1 in case of * failure. */ int valvula_thread_pool_new_event (ValvulaCtx * ctx, long microseconds, ValvulaThreadAsyncEvent event_handler, axlPointer user_data, axlPointer user_data2) { /* get current context */ ValvulaThreadPoolEvent * event; /* check parameters */ if (event_handler == NULL || ctx == NULL || ctx->thread_pool == NULL || ctx->thread_pool_being_stopped) return -1; /* lock the thread pool */ valvula_mutex_lock (&(ctx->thread_pool->mutex)); /* create the event data */ event = axl_new (ValvulaThreadPoolEvent, 1); /* check alloc result */ if (event) { event->func = event_handler; event->ref_count = 1; event->data = user_data; event->data2 = user_data2; event->delay = microseconds; gettimeofday (&event->next_step, NULL); /* update next step to the appropiate value */ __valvula_thread_pool_increase_stamp (event); /* add into the event event */ axl_list_add (ctx->thread_pool->events, event); } /* end if */ /* (un)lock the thread pool */ valvula_mutex_unlock (&(ctx->thread_pool->mutex)); /* in case of failure */ if (event == NULL) return PTR_TO_INT (-1); return PTR_TO_INT (event); }
static void disconn_handler(void *data, void *user_data) { struct att_disconn *disconn = data; int err = PTR_TO_INT(user_data); if (disconn->removed) return; if (disconn->callback) disconn->callback(err, disconn->user_data); }
int main(int argc, char *argv[]) { pthread_t c1, c2; sd_bus *bus; void *p; int q, r; r = server_init(&bus); if (r < 0) { log_info("Failed to connect to bus, skipping tests."); return EXIT_TEST_SKIP; } log_info("Initialized..."); r = pthread_create(&c1, NULL, client1, bus); if (r != 0) return EXIT_FAILURE; r = pthread_create(&c2, NULL, client2, bus); if (r != 0) return EXIT_FAILURE; r = server(bus); q = pthread_join(c1, &p); if (q != 0) return EXIT_FAILURE; if (PTR_TO_INT(p) < 0) return EXIT_FAILURE; q = pthread_join(c2, &p); if (q != 0) return EXIT_FAILURE; if (PTR_TO_INT(p) < 0) return EXIT_FAILURE; if (r < 0) return EXIT_FAILURE; return EXIT_SUCCESS; }
static void remove_timeout(DBusTimeout *timeout, void *data) { EpollData _cleanup_free_ *e = NULL; assert(timeout); e = dbus_timeout_get_data(timeout); if (!e) return; assert_se(epoll_ctl(PTR_TO_INT(data), EPOLL_CTL_DEL, e->fd, NULL) >= 0); close_nointr_nofail(e->fd); }
/* * send TK extension OAM message Get igmp group info from the ONU */ int TkExtOamGetIgmpGroupInfo(uint8 pathId, uint8 LinkId, OamIgmpGroupConfig * pIgmpGroupInfo) { uint32 DataLen; uint8 *rxTmpBuf = (uint8*)TkGetApiBuf(pathId); if ((LinkId > 7) || (NULL == pIgmpGroupInfo)) { TkDbgTrace(TkDbgErrorEnable); return ERROR; } if (OK == TkExtOamGetMulti(pathId, LinkId, OamBranchAction, OamExtActGetIgmpGroupInfo, (uint8 *) rxTmpBuf, &DataLen)) { OamVarContainer *var; uint8 *numGroups; OamIgmpGroupConfig *info = NULL; OamIgmpGroupInfo *pGgroup; numGroups = (uint8 *) pIgmpGroupInfo; pGgroup = (OamIgmpGroupInfo *) INT_TO_PTR(PTR_TO_INT(pIgmpGroupInfo) + sizeof(uint8)); *numGroups = 0; var = (OamVarContainer *) rxTmpBuf; while (var->branch != OamBranchTermination) { if ((var->branch == OamBranchAction) && (var->leaf == soc_ntohs(OamExtActGetIgmpGroupInfo))) { info = (OamIgmpGroupConfig *) (var->value); *numGroups += info->numGroups; bcopy((uint8 *) info->group, (uint8 *) pGgroup, info->numGroups * sizeof(OamIgmpGroupInfo)); } /*info may be NULL*/ if(NULL != info){ pGgroup += info->numGroups; var = NextCont(var); }else{ break; } } return (OK); } else { TkDbgTrace(TkDbgErrorEnable); return ERROR; } }
static void toggle_powered(const void *test_data) { struct test_data *data = tester_get_data(); bool power = PTR_TO_INT(test_data); unsigned char param[1]; param[0] = power ? 0x01 : 0x00; tester_print("Powering %s controller", power ? "on" : "off"); mgmt_send(data->mgmt, MGMT_OP_SET_POWERED, data->mgmt_index, sizeof(param), param, toggle_powered_client_callback, INT_TO_PTR(power), NULL); }
static void remove_watch(DBusWatch *watch, void *data) { EpollData _cleanup_free_ *e = NULL; assert(watch); e = dbus_watch_get_data(watch); if (!e) return; assert_se(epoll_ctl(PTR_TO_INT(data), EPOLL_CTL_DEL, e->fd, NULL) >= 0); if (e->fd_is_dupped) close_nointr_nofail(e->fd); }
static int child_handler(sd_event_source *s, const siginfo_t *si, void *userdata) { assert_se(s); assert_se(si); log_info("got child on %c", PTR_TO_INT(userdata)); assert_se(userdata == INT_TO_PTR('f')); assert_se(sd_event_exit(sd_event_source_get_event(s), 0) >= 0); sd_event_source_unref(s); return 1; }
static void test_mnt_id(void) { _cleanup_fclose_ FILE *f = NULL; Hashmap *h; Iterator i; char *p; void *k; int r; log_info("/* %s */", __func__); assert_se(f = fopen("/proc/self/mountinfo", "re")); assert_se(h = hashmap_new(&trivial_hash_ops)); for (;;) { _cleanup_free_ char *line = NULL, *path = NULL; int mnt_id; r = read_line(f, LONG_LINE_MAX, &line); if (r == 0) break; assert_se(r > 0); assert_se(sscanf(line, "%i %*s %*s %*s %ms", &mnt_id, &path) == 2); log_debug("mountinfo: %s → %i", path, mnt_id); assert_se(hashmap_put(h, INT_TO_PTR(mnt_id), path) >= 0); path = NULL; } HASHMAP_FOREACH_KEY(p, k, h, i) { int mnt_id = PTR_TO_INT(k), mnt_id2; r = path_get_mnt_id(p, &mnt_id2); if (r < 0) { log_debug_errno(r, "Failed to get the mnt id of %s: %m\n", p); continue; } log_debug("mnt ids of %s are %i, %i\n", p, mnt_id, mnt_id2); if (mnt_id == mnt_id2) continue; /* The ids don't match? If so, then there are two mounts on the same path, let's check if * that's really the case */ char *t = hashmap_get(h, INT_TO_PTR(mnt_id2)); log_debug("the other path for mnt id %i is %s\n", mnt_id2, t); assert_se(path_equal(p, t)); }