/** * PEERINFO calls this function to let us know about a possible peer * that we might want to connect to. * * @param cls closure (not used) * @param peer potential peer to connect to * @param hello HELLO for this peer (or NULL) * @param err_msg NULL if successful, otherwise contains error message */ static void process_peer (void *cls, const struct GNUNET_PeerIdentity *peer, const struct GNUNET_HELLO_Message *hello, const char *err_msg) { struct Peer *pos; if (err_msg != NULL) { GNUNET_log (GNUNET_ERROR_TYPE_DEBUG, _("Error in communication with PEERINFO service: %s\n"), err_msg); GNUNET_PEERINFO_notify_cancel (peerinfo_notify); peerinfo_notify = GNUNET_PEERINFO_notify (cfg, &process_peer, NULL); return; } GNUNET_assert (peer != NULL); if (0 == memcmp (&my_identity, peer, sizeof (struct GNUNET_PeerIdentity))) return; /* that's me! */ if (hello == NULL) { /* free existing HELLO, if any */ pos = GNUNET_CONTAINER_multihashmap_get (peers, &peer->hashPubKey); if (NULL != pos) { GNUNET_free_non_null (pos->hello); pos->hello = NULL; if (pos->filter != NULL) { GNUNET_CONTAINER_bloomfilter_free (pos->filter); pos->filter = NULL; } if ((GNUNET_NO == pos->is_connected) && (GNUNET_NO == pos->is_friend) && (0 == GNUNET_TIME_absolute_get_remaining (pos-> greylisted_until).rel_value)) free_peer (NULL, &pos->pid.hashPubKey, pos); } return; } consider_for_advertising (hello); pos = GNUNET_CONTAINER_multihashmap_get (peers, &peer->hashPubKey); if (pos == NULL) pos = make_peer (peer, hello, GNUNET_NO); GNUNET_assert (NULL != pos); if (GNUNET_YES == pos->is_connected) { GNUNET_log (GNUNET_ERROR_TYPE_DEBUG, "Already connected to peer `%s'\n", GNUNET_i2s (peer)); return; } if (GNUNET_TIME_absolute_get_remaining (pos->greylisted_until).rel_value > 0) { GNUNET_log (GNUNET_ERROR_TYPE_DEBUG, "Already tried peer `%s' recently\n", GNUNET_i2s (peer)); return; /* peer still greylisted */ } GNUNET_log (GNUNET_ERROR_TYPE_DEBUG, "Considering connecting to peer `%s'\n", GNUNET_i2s (peer)); schedule_attempt_connect (pos); }
/** * Free `struct PreferencePeer` entry in map. * * @param cls the `struct PreferenceClient` with the map * @param key the peer the entry is for * @param value the `struct PreferencePeer` entry to free * @return #GNUNET_OK (continue to iterate) */ static int free_preference (void *cls, const struct GNUNET_PeerIdentity *key, void *value) { struct PreferenceClient *pc = cls; struct PreferencePeer *p = value; struct PeerRelative *pr; GNUNET_assert (GNUNET_OK == GNUNET_CONTAINER_multipeermap_remove (pc->peer2pref, key, p)); GNUNET_free (p); pr = GNUNET_CONTAINER_multipeermap_get (preference_peers, key); GNUNET_assert (NULL != pr); GNUNET_assert (pr->num_clients > 0); pr->num_clients--; if (0 == pr->num_clients) { free_peer (NULL, key, pr); } return GNUNET_OK; }
static void free_all_peers(struct peer** p, int count) { int i; for (i = 0; i < count; i++) free_peer(p[i]); if (count > 0) free(p); }
/** * Timer function for peer management. * This is registered as a timer by peer_manager_init() and gets called every * #PEER_MANAGER_TIMER seconds. Then it looks on what changed and triggers events. * @param now - time of call * @param ptr - generic pointer for timers - not used */ void peer_timer(time_t now,void *ptr) { peer *p,*n; LOG(L_DBG,"DBG:peer_timer(): taking care of peers...\n"); lock_get(peer_list_lock); p = peer_list->head; while(p){ lock_get(p->lock); n = p->next; if (p->activity+config->tc<=now){ LOG(L_INFO,"DBG:peer_timer(): Peer %.*s \tState %d \n",p->fqdn.len,p->fqdn.s,p->state); switch (p->state){ /* initiating connection */ case Closed: if (p->is_dynamic && config->drop_unknown_peers){ remove_peer(p); free_peer(p,1); break; } touch_peer(p); sm_process(p,Start,0,1,0); break; /* timeouts */ case Wait_Conn_Ack: case Wait_I_CEA: case Closing: case Wait_Returns: touch_peer(p); sm_process(p,Timeout,0,1,0); break; /* inactivity detected */ case I_Open: case R_Open: if (p->waitingDWA){ p->waitingDWA = 0; if (p->state==I_Open) sm_process(p,I_Peer_Disc,0,1,p->I_sock); if (p->state==R_Open) sm_process(p,R_Peer_Disc,0,1,p->R_sock); } else { p->waitingDWA = 1; Snd_DWR(p); touch_peer(p); } break; /* ignored states */ /* unknown states */ default: LOG(L_ERR,"ERROR:peer_timer(): Peer %.*s inactive in state %d\n", p->fqdn.len,p->fqdn.s,p->state); } } lock_release(p->lock); p = n; } lock_release(peer_list_lock); log_peer_list(L_INFO); }
/** * PEERINFO calls this function to let us know about a possible peer * that we might want to connect to. * * @param cls closure (not used) * @param peer potential peer to connect to * @param hello HELLO for this peer (or NULL) * @param err_msg NULL if successful, otherwise contains error message */ static void process_peer (void *cls, const struct GNUNET_PeerIdentity *peer, const struct GNUNET_HELLO_Message *hello, const char *err_msg) { struct Peer *pos; if (NULL != err_msg) { GNUNET_log (GNUNET_ERROR_TYPE_DEBUG, _("Error in communication with PEERINFO service: %s\n"), err_msg); GNUNET_PEERINFO_notify_cancel (peerinfo_notify); peerinfo_notify = GNUNET_PEERINFO_notify (cfg, GNUNET_NO, &process_peer, NULL); return; } GNUNET_assert (NULL != peer); if (0 == memcmp (&my_identity, peer, sizeof (struct GNUNET_PeerIdentity))) return; /* that's me! */ if (NULL == hello) { /* free existing HELLO, if any */ pos = GNUNET_CONTAINER_multipeermap_get (peers, peer); if (NULL != pos) { GNUNET_free_non_null (pos->hello); pos->hello = NULL; if (NULL != pos->filter) { GNUNET_CONTAINER_bloomfilter_free (pos->filter); pos->filter = NULL; } if ( (NULL == pos->mq) && (GNUNET_NO == pos->is_friend) ) free_peer (NULL, &pos->pid, pos); } return; } consider_for_advertising (hello); pos = GNUNET_CONTAINER_multipeermap_get (peers, peer); if (NULL == pos) pos = make_peer (peer, hello, GNUNET_NO); attempt_connect (pos); }
static void on_client_event(struct bufferevent* bev, short ev, void *arg) { struct peer* p = (struct peer*)arg; if (ev & BEV_EVENT_EOF || ev & BEV_EVENT_ERROR) { int i; struct peer** clients = p->peers->clients; for (i = p->id; i < p->peers->clients_count-1; ++i) { clients[i] = clients[i+1]; clients[i]->id = i; } p->peers->clients_count--; p->peers->clients = realloc(p->peers->clients, sizeof(struct peer*) * (p->peers->clients_count)); free_peer(p); } else { paxos_log_error("Event %d not handled", ev); } }
/* * unpeer - remove peer structure from hash table and free structure */ void unpeer( struct peer *peer ) { mprintf_event(PEVNT_DEMOBIL, peer, "assoc %u", peer->associd); restrict_source(&peer->srcadr, 1, 0); set_peerdstadr(peer, NULL); peer_demobilizations++; peer_associations--; if (FLAG_PREEMPT & peer->flags) peer_preempt--; #ifdef REFCLOCK /* * If this peer is actually a clock, shut it down first */ if (FLAG_REFCLOCK & peer->flags) refclock_unpeer(peer); #endif free_peer(peer, TRUE); }
/** * Destroys the Peer Manager and disconnects all peer sockets. */ void peer_manager_destroy() { peer *foo,*bar; lock_get(peer_list_lock); foo = peer_list->head; while(foo){ if (foo->I_sock>0) close(foo->I_sock); if (foo->R_sock>0) close(foo->R_sock); bar = foo->next; free_peer(foo,1); foo = bar; } /* lock_get(msg_id_lock); */ shm_free(hopbyhop_id); shm_free(endtoend_id); lock_destroy(msg_id_lock); lock_dealloc((void*)msg_id_lock); shm_free(peer_list); lock_destroy(peer_list_lock); lock_dealloc((void*)peer_list_lock); LOG(L_DBG,"DBG:peer_manager_init(): ...Peer Manager destroyed\n"); }
/** * Discard peer entries for greylisted peers * where the greylisting has expired. * * @param cls 'struct Peer' to greylist * @param tc scheduler context */ static void remove_from_greylist (void *cls, const struct GNUNET_SCHEDULER_TaskContext *tc) { struct Peer *pos = cls; struct GNUNET_TIME_Relative rem; pos->greylist_clean_task = GNUNET_SCHEDULER_NO_TASK; rem = GNUNET_TIME_absolute_get_remaining (pos->greylisted_until); if (rem.rel_value == 0) { schedule_attempt_connect (pos); } else { pos->greylist_clean_task = GNUNET_SCHEDULER_add_delayed (rem, &remove_from_greylist, pos); } if ((GNUNET_NO == pos->is_friend) && (GNUNET_NO == pos->is_connected) && (NULL == pos->hello)) { free_peer (NULL, &pos->pid.hashPubKey, pos); return; } }
/** * Timer function for peer management. * This is registered as a timer by peer_manager_init() and gets called every * #PEER_MANAGER_TIMER seconds. Then it looks on what changed and triggers events. * @param now - time of call * @param ptr - generic pointer for timers - not used */ int peer_timer(time_t now,void *ptr) { peer *p,*n; int i; LM_DBG("peer_timer(): taking care of peers...\n"); lock_get(peer_list_lock); p = peer_list->head; while(p){ lock_get(p->lock); n = p->next; if (p->disabled && (p->state != Closed || p->state != Closing)) { LM_DBG("Peer [%.*s] has been disabled - shutting down\n", p->fqdn.len, p->fqdn.s); if (p->state == I_Open) sm_process(p, Stop, 0, 1, p->I_sock); if (p->state == R_Open) sm_process(p, Stop, 0, 1, p->R_sock); lock_release(p->lock); p = n; continue; } if (p->activity+config->tc<=now){ LM_DBG("peer_timer(): Peer %.*s State %d \n",p->fqdn.len,p->fqdn.s,p->state); switch (p->state){ /* initiating connection */ case Closed: if (p->is_dynamic && config->drop_unknown_peers){ remove_peer(p); free_peer(p,1); break; } if (!p->disabled) { touch_peer(p); sm_process(p,Start,0,1,0); } break; /* timeouts */ case Wait_Conn_Ack: case Wait_I_CEA: case Closing: case Wait_Returns: case Wait_Conn_Ack_Elect: touch_peer(p); sm_process(p,Timeout,0,1,0); break; /* inactivity detected */ case I_Open: case R_Open: if (p->waitingDWA){ p->waitingDWA = 0; if (p->state==I_Open) sm_process(p,I_Peer_Disc,0,1,p->I_sock); if (p->state==R_Open) sm_process(p,R_Peer_Disc,0,1,p->R_sock); LM_WARN("Inactivity on peer [%.*s] and no DWA, Closing peer...\n", p->fqdn.len, p->fqdn.s); } else { p->waitingDWA = 1; Snd_DWR(p); touch_peer(p); if (debug_heavy) { LM_DBG("Inactivity on peer [%.*s], sending DWR... - if we don't get a reply, the peer will be closed\n", p->fqdn.len, p->fqdn.s); } } break; /* ignored states */ /* unknown states */ default: LM_ERR("peer_timer(): Peer %.*s inactive in state %d\n", p->fqdn.len,p->fqdn.s,p->state); } } lock_release(p->lock); p = n; } lock_release(peer_list_lock); log_peer_list(); i = config->tc/5; if (i<=0) i=1; return i; }