/* fsm driver routine-1, this procedure might be called from arbitrary context */ LOCAL void peer_event(int code, void * e1, int e2,int e3,int e4) { int c; ke_toggle_flag(FKERN_LED_BUSB); switch(code){ case HSE_PEER_COLLISION: c=peer_lock(); set_state(hsp_s_collision); peer_unlock(c); break; case HSE_PEER_RX: g_kernel.peer->lastMsgTime = ke_get_time(); if(e2 < sizeof(struct peer_cmd_t)){ return; } peer_rx_event((struct peer_cmd_t*)e1,e2); break; case HSE_PEER_STATUS_BROADCAST: c=peer_lock(); /*reset watchdog*/ g_kernel.peer->lastMsgTime=ke_get_time(); #ifdef __VXWORKS__ memcpy(g_kernel.peer->peerAddress,((struct ether_header*)e1)->ether_shost,6); #endif g_kernel.peer_status = *((struct kstatus_t*)e2); if(kernelState == KERN_S_RUNNING && peerState==KERN_S_RUNNING){ ki_log(&g_kernel, F8_IOBUS_COLLISION,0,0,0); set_state(hsp_s_collision); } peer_unlock(c); break; } }
/* void peer_arbitrate() setup the initial kernel state by monitoring hot-standby port and IO bus activities. */ LOCAL void peer_arbitrate() { int cookie; f8_u8 st; int i; struct peer_cmd_t tp; for(i=0;i<3;i++){ cookie = peer_lock(); set_state(hsp_s_arbitrate); tp.cmd = Pr_query_status; tp.code = F8_SUCCESS; tp.time = ke_get_time(); //peer_flush(); peer_write(&tp, sizeof tp); peer_unlock(cookie); miliSleep(HSP_ARBITRATION_TIMEOUT); /* is there an answer? */ cookie = peer_lock(); st = get_state(); set_state(hsp_s_idle); peer_unlock(cookie); if(st==hsp_s_complete) break; } // mode_show(); // logMsg("Peer is %s, state=%s\n", ke_get_mode_name(peerState, state_name(st))); if(st == hsp_s_complete){ switch(peerState){ case KERN_S_RUNNING: ki_switch_to(KERN_S_STANDBY); return; case KERN_S_ARBITRATING: if(check_priority()) ki_switch_to(KERN_S_RUNNING); else ki_switch_to(KERN_S_STANDBY); break; default: ki_switch_to(KERN_S_RUNNING); break; } }else{ ki_switch_to(KERN_S_RUNNING); } hsp_log(("arbitration finished, mode is:%d\n", kernelState)); // ki_switch_to(KERN_S_HALTED); }
/* Free route table. */ static void bgp_table_free (struct bgp_table *rt) { struct bgp_node *tmp_node; struct bgp_node *node; if (rt == NULL) return; node = rt->top; /* Bulk deletion of nodes remaining in this table. This function is not called until workers have completed their dependency on this table. A final bgp_unlock_node() will not be called for these nodes. */ while (node) { if (node->l_left) { node = node->l_left; continue; } if (node->l_right) { node = node->l_right; continue; } tmp_node = node; node = node->parent; tmp_node->table->count--; tmp_node->lock = 0; /* to cause assert if unlocked after this */ bgp_node_free (tmp_node); if (node != NULL) { if (node->l_left == tmp_node) node->l_left = NULL; else node->l_right = NULL; } else { break; } } assert (rt->count == 0); if (rt->owner) { peer_unlock (rt->owner); rt->owner = NULL; } XFREE (MTYPE_BGP_TABLE, rt); return; }
void bgp_adj_in_remove (struct bgp_node *rn, struct bgp_adj_in *bai) { bgp_attr_unintern (bai->attr); BGP_ADJ_IN_DEL (rn, bai); peer_unlock (bai->peer); /* adj_in peer reference */ XFREE (MTYPE_BGP_ADJ_IN, bai); }
void bgp_table_unlock (struct bgp_table *rt) { assert (rt->lock > 0); rt->lock--; if (rt->lock != 0) { return; } route_table_finish (rt->route_table); rt->route_table = NULL; if (rt->owner) { peer_unlock (rt->owner); rt->owner = NULL; } XFREE (MTYPE_BGP_TABLE, rt); }
/* BGP adjacency keeps minimal advertisement information. */ static void bgp_adj_out_free (struct bgp_adj_out *adj) { peer_unlock (adj->peer); /* adj_out peer reference */ XFREE (MTYPE_BGP_ADJ_OUT, adj); }
/* standby_sync() - standby phase of the BPC */ LOCAL f8_status standby_sync() { int cookie; ktime_t t; f8_uint i, size; long size2; f8_status ret = F8_BUSY; f8_u8 st; static ktime_t syncTime; t = ke_get_time(); /* check automata */ cookie = peer_lock(); st = get_state(); switch(st){ case hsp_s_complete: size = 0; ke_toggle_flag(FKERN_LED_DBG1); syncTime=t; ke_set_flag(FKERN_LED_SYNCHRONIZED,1); for(i=0; i<KERN_NUM_SECTIONS; i++){ if(peerHdr.x_mem_sizes[i] != g_kernel.x_mem_sizes[i]){ ret = F8_VERSION_MISMATCH; set_state(hsp_s_idle); goto __done; } size += peerHdr.x_mem_sizes[i]; } size += sizeof(struct marshalled_timer_t) * peerHdr.timer_q_len + sizeof(struct marshalled_event_t) * peerHdr.event_q_len + peerHdr.i_mem_size; if(size + sizeof(struct kpeer_hdr_t) > F8_VOLATILE_MEM_SIZE){ ret = F8_LOW_MEMORY; set_state(hsp_s_idle); goto __done; } size2=sizeof(peerData); if(uncompress(peerData, &size2, peerDataZipped, peerHdr.zipped_data_len) != Z_OK){ ret=F8_INVALID_DATA; set_state(hsp_s_idle); break; } /* indicate how much memory we can use */ peerGuardian = peerData + size2; ki_load_volatile(&g_kernel); /* start another session */ //peer_flush(); set_state(hsp_s_idle); ret = F8_SUCCESS; peerCounters[1]++; break; case hsp_s_idle: break; case hsp_s_active: if(!ke_get_flag(FKERN_LED_SYNCHRONIZED)){ //peer_flush(); set_state(hsp_s_idle); } break; default: /* last error */ peerCounters[5]++; peerCounters[3] = st; //peer_flush(); set_state(hsp_s_idle); break; } __done: peer_unlock(cookie); if(t>syncTime+ki_get_primary_life()*5) ke_set_flag(FKERN_LED_SYNCHRONIZED,0); if(!ke_get_flag(FKERN_LED_SOFT_STOP)){ if(t > g_kernel.peer->lastMsgTime + ki_get_primary_life()|| (peerState!=KERN_S_RUNNING && !ke_get_peer_flag(FKERN_LED_SOFT_LOCK)) ){ /* primary failure detected */ /* try switch to primary state */ if(g_kernel.peer_status.prog_id == g_kernel.status.prog_id && ke_get_flag(FKERN_LED_SYNCHRONIZED)){ /* adjust kernel clock */ kern_time_bias += hspTimeOffset; ki_log(&g_kernel, F8_PRIMARY_FAILURE,0,0,0); ki_switch_to(KERN_S_ARBITRATING); set_state(hsp_s_idle); } /* reset peer to unknown state */ memset(&g_kernel.peer_status, 0, sizeof g_kernel.peer_status); } } return ret; }
LOCAL f8_status primary_sync() { struct peer_cmd_t tp; int i, cookie; enum hsp_state_t st; long size2; st = get_state(); if(st == hsp_s_collision){ if(!check_priority()){ ki_switch_to(KERN_S_STANDBY); return F8_SUCCESS; } } ki_save_volatile(&g_kernel); cookie = peer_lock(); /* prepare volatile data */ size2=sizeof(peerDataZipped); if(compress(peerDataZipped, &size2, peerData, peerPointer-peerData) == Z_OK){ peerHdr.zipped_data_len=size2; }else{ /* might continue with un-compressed data */ return F8_LOW_MEMORY; } /* start automata */ // set_state(hsp_s_connecting); _state = hsp_s_connecting; memset(&tp, 0, sizeof(tp)); tp.cmd = Pr_connect; tp.code = F8_SUCCESS; tp.time = ke_get_time(); //peer_flush(); if(peer_write3(&tp, sizeof(tp), &g_kernel.status, sizeof(g_kernel.status), &peerHdr, sizeof(peerHdr)) < 0){ set_state(hsp_s_idle); peer_unlock(cookie); return F8_CONNECTION_LOST; } peer_unlock(cookie); /* wait 5 times to see if connection established */ for(i=0; i<5; i++){ if(get_state() == hsp_s_active || get_state() >= 10){ break; } miliSleep(hspConnTimeout); } st = get_state(); if(st == hsp_s_active){ /* expected transfer time */ ////i = size2*8/HSP_LINE_SPEED; ////if(!i) i=1; // i is packet count, chenj 2009-3-5 i=size2/1024+1; while(i > 0){ if(get_state() >= 10){ break; } ////miliSleep(hspConnTimeout); ////i -= hspConnTimeout; miliSleep(2); i--; } } st = get_state(); // set_state(hsp_s_idle); _state = hsp_s_idle; if(st == hsp_s_complete){ return F8_SUCCESS; }else if(st == hsp_s_collision){ ki_switch_to(KERN_S_ARBITRATING); }else{ peerCounters[3] = st; peerCounters[5]++; // hsp_log(("primary timeout %d\n", i)); /* reset peer state */ memset(&g_kernel.peer_status, 0, sizeof g_kernel.peer_status); } return F8_TIMEOUT; }