/*Local clock is becoming Master. Table 13 (9.3.5) of the spec.*/ void m1(RunTimeOpts *rtOpts, PtpClock *ptpClock) { /*Current data set update*/ ptpClock->stepsRemoved = 0; clearTime(&ptpClock->offsetFromMaster); clearTime(&ptpClock->meanPathDelay); /*Parent data set*/ copyClockIdentity(ptpClock->parentPortIdentity.clockIdentity, ptpClock->clockIdentity); ptpClock->parentPortIdentity.portNumber = 0; ptpClock->parentStats = DEFAULT_PARENTS_STATS; ptpClock->observedParentClockPhaseChangeRate = 0; ptpClock->observedParentOffsetScaledLogVariance = 0; copyClockIdentity(ptpClock->grandmasterIdentity, ptpClock->clockIdentity); ptpClock->grandmasterClockQuality.clockAccuracy = ptpClock->clockQuality.clockAccuracy; ptpClock->grandmasterClockQuality.clockClass = ptpClock->clockQuality.clockClass; ptpClock->grandmasterClockQuality.offsetScaledLogVariance = ptpClock->clockQuality.offsetScaledLogVariance; ptpClock->grandmasterPriority1 = ptpClock->priority1; ptpClock->grandmasterPriority2 = ptpClock->priority2; /*Time Properties data set*/ ptpClock->timeSource = INTERNAL_OSCILLATOR; /* UTC vs TAI timescales */ ptpClock->currentUtcOffsetValid = DEFAULT_UTC_VALID; ptpClock->currentUtcOffset = rtOpts->currentUtcOffset; }
void initClockVars(RunTimeOpts *rtOpts, PtpClock *ptpClock) { DBG("initClockVars:\n"); /* clear vars */ clearTime(&ptpClock->master_to_slave_delay); clearTime(&ptpClock->slave_to_master_delay); clearTime(&ptpClock->offset_from_master); /* AKB: 9/18/2007 Clear offset from master */ ptpClock->ofm_filt.y = 0; ptpClock->ofm_filt.nsec_prev = -1; /* AKB: -1 used for non-valid nsec time */ ptpClock->observed_v1_variance = 0; ptpClock->observed_drift = 0; /* clears clock servo accumulator (the I term) */ ptpClock->owd_filt.s_exp = 0; /* clears one-way delay filter */ ptpClock->halfEpoch = ptpClock->halfEpoch || rtOpts->halfEpoch; rtOpts->halfEpoch = 0; }
/** * 本地时钟即将进入PTP_MASTER状态 */ void m1(const RunTimeOpts *rtOpts, PtpClock *ptpClock) { /*Current data set update*/ ptpClock->stepsRemoved = 0; clearTime(&ptpClock->offsetFromMaster); clearTime(&ptpClock->meanPathDelay); copyClockIdentity(ptpClock->parentPortIdentity.clockIdentity, ptpClock->clockIdentity); ptpClock->parentPortIdentity.portNumber = ptpClock->portIdentity.portNumber; ptpClock->parentStats = DEFAULT_PARENTS_STATS; ptpClock->observedParentClockPhaseChangeRate = 0; ptpClock->observedParentOffsetScaledLogVariance = 0; copyClockIdentity(ptpClock->grandmasterIdentity, ptpClock->clockIdentity); ptpClock->grandmasterClockQuality.clockAccuracy = ptpClock->clockQuality.clockAccuracy; ptpClock->grandmasterClockQuality.clockClass = ptpClock->clockQuality.clockClass; ptpClock->grandmasterClockQuality.offsetScaledLogVariance = ptpClock->clockQuality.offsetScaledLogVariance; ptpClock->grandmasterPriority1 = ptpClock->priority1; ptpClock->grandmasterPriority2 = ptpClock->priority2; ptpClock->logMinDelayReqInterval = rtOpts->logMinDelayReqInterval; /*Time Properties data set*/ ptpClock->timePropertiesDS.currentUtcOffsetValid = rtOpts->timeProperties.currentUtcOffsetValid; ptpClock->timePropertiesDS.currentUtcOffset = rtOpts->timeProperties.currentUtcOffset; ptpClock->timePropertiesDS.timeTraceable = rtOpts->timeProperties.timeTraceable; ptpClock->timePropertiesDS.frequencyTraceable = rtOpts->timeProperties.frequencyTraceable; ptpClock->timePropertiesDS.ptpTimescale = rtOpts->timeProperties.ptpTimescale; ptpClock->timePropertiesDS.timeSource = rtOpts->timeProperties.timeSource; if(ptpClock->timePropertiesDS.ptpTimescale && (secondsToMidnight() < rtOpts->leapSecondNoticePeriod)) { ptpClock->timePropertiesDS.leap59 = ptpClock->clockStatus.leapDelete; ptpClock->timePropertiesDS.leap61 = ptpClock->clockStatus.leapInsert; } else { ptpClock->timePropertiesDS.leap59 = FALSE; ptpClock->timePropertiesDS.leap61 = FALSE; } }
void initClock(RunTimeOpts * rtOpts, PtpClock * ptpClock) { DBG("initClock\n"); #ifdef PTPD_NTPDC /* If we've been suppressing ntpdc error messages, show them once again */ ptpClock->ntpControl.requestFailed = FALSE; #endif /* PTPD_NTPDC */ /* do not reset frequency here - restoreDrift will do it if necessary */ #ifdef HAVE_SYS_TIMEX_H ptpClock->servo.observedDrift = 0; #endif /* HAVE_SYS_TIMEX_H */ /* clear vars */ /* clean more original filter variables */ clearTime(&ptpClock->offsetFromMaster); clearTime(&ptpClock->meanPathDelay); clearTime(&ptpClock->delaySM); clearTime(&ptpClock->delayMS); FilterClear(ptpClock->owd_filt); /* clears one-way delay filter */ FilterClear(ptpClock->ofm_filt); /* clears offset from master filter */ rtOpts->offset_first_updated = FALSE; ptpClock->char_last_msg='I'; reset_operator_messages(rtOpts, ptpClock); /* For Hybrid mode */ ptpClock->masterAddr = 0; }
void initClock(const RunTimeOpts * rtOpts, PtpClock * ptpClock) { DBG("initClock\n"); /* If we've been suppressing ntpdc error messages, show them once again */ ptpClock->ntpControl.requestFailed = FALSE; ptpClock->disabled = rtOpts->portDisabled; /* do not reset frequency here - restoreDrift will do it if necessary */ /* 2.3.1: restoreDrift now always compiled - this is no longer needed */ #if 0 ptpClock->servo.observedDrift = 0; #endif /* clear vars */ /* clean more original filter variables */ clearTime(&ptpClock->offsetFromMaster); clearTime(&ptpClock->meanPathDelay); clearTime(&ptpClock->delaySM); clearTime(&ptpClock->delayMS); ptpClock->ofm_filt.y = 0; ptpClock->ofm_filt.nsec_prev = 0; ptpClock->owd_filt.s_exp = 0; /* clears one-way delay filter */ ptpClock->offsetFirstUpdated = FALSE; ptpClock->char_last_msg='I'; resetWarnings(rtOpts, ptpClock); /* For Hybrid mode */ // ptpClock->masterAddr = 0; ptpClock->maxDelayRejected = 0; }
void Output::begin() { Serial.println("Starting Output config"); _matrix = Adafruit_7segment(); _lcd.begin(20,4); _matrix.begin(0x70); Serial.print("DEBUG:redPin:");Serial.println(_redPin); Serial.print("DEBUG:yellowPin:");Serial.println(_yellowPin); Serial.print("DEBUG:greenPin:");Serial.println(_greenPin); Serial.print("DEBUG:speakerPin:");Serial.println(_speakerPin); setReady(); clearTime(); Serial.println("Completed Output config"); noTone(_speakerPin); }
/* Init ptpClock with run time values (initialization constants are in constants.h)*/ void initData(RunTimeOpts *rtOpts, PtpClock *ptpClock) { int i,j; j=0; DBG("initData\n"); /* Default data set */ ptpClock->twoStepFlag = TWO_STEP_FLAG; /* * init clockIdentity with MAC address and 0xFF and 0xFE. see * spec 7.5.2.2.2 */ for (i=0;i<CLOCK_IDENTITY_LENGTH;i++) { if (i==3) ptpClock->clockIdentity[i]=0xFF; else if (i==4) ptpClock->clockIdentity[i]=0xFE; else { ptpClock->clockIdentity[i]=ptpClock->port_uuid_field[j]; j++; } } ptpClock->numberPorts = NUMBER_PORTS; ptpClock->clockQuality.clockAccuracy = rtOpts->clockQuality.clockAccuracy; ptpClock->clockQuality.clockClass = rtOpts->clockQuality.clockClass; ptpClock->clockQuality.offsetScaledLogVariance = rtOpts->clockQuality.offsetScaledLogVariance; ptpClock->priority1 = rtOpts->priority1; ptpClock->priority2 = rtOpts->priority2; ptpClock->domainNumber = rtOpts->domainNumber; ptpClock->slaveOnly = rtOpts->slaveOnly; if(rtOpts->slaveOnly) rtOpts->clockQuality.clockClass = SLAVE_ONLY_CLOCK_CLASS; /* Port configuration data set */ /* * PortIdentity Init (portNumber = 1 for an ardinary clock spec * 7.5.2.3) */ copyClockIdentity(ptpClock->portIdentity.clockIdentity, ptpClock->clockIdentity); ptpClock->portIdentity.portNumber = NUMBER_PORTS; /* select the initial rate of delayreqs until we receive the first announce message */ ptpClock->logMinDelayReqInterval = rtOpts->initial_delayreq; clearTime(&ptpClock->peerMeanPathDelay); ptpClock->logAnnounceInterval = rtOpts->announceInterval; ptpClock->announceReceiptTimeout = rtOpts->announceReceiptTimeout; ptpClock->logSyncInterval = rtOpts->syncInterval; ptpClock->delayMechanism = rtOpts->delayMechanism; ptpClock->logMinPdelayReqInterval = DEFAULT_PDELAYREQ_INTERVAL; ptpClock->versionNumber = VERSION_PTP; /* * Initialize random number generator using same method as ptpv1: * seed is now initialized from the last bytes of our mac addres (collected in net.c:findIface()) */ srand((ptpClock->port_uuid_field[PTP_UUID_LENGTH - 1] << 8) + ptpClock->port_uuid_field[PTP_UUID_LENGTH - 2]); /*Init other stuff*/ ptpClock->number_foreign_records = 0; ptpClock->max_foreign_records = rtOpts->max_foreign_records; }
/* Init ptpClock with run time values (initialization constants are in constants.h)*/ void initData(RunTimeOpts *rtOpts, PtpClock *ptpClock) { int i,j; j=0; DBG("initData\n"); /* Default data set */ ptpClock->twoStepFlag = TWO_STEP_FLAG; /* * init clockIdentity with MAC address and 0xFF and 0xFE. see * spec 7.5.2.2.2 */ for (i=0;i<CLOCK_IDENTITY_LENGTH;i++) { if (i==3) ptpClock->clockIdentity[i]=0xFF; else if (i==4) ptpClock->clockIdentity[i]=0xFE; else { ptpClock->clockIdentity[i]=ptpClock->netPath.interfaceID[j]; j++; } } if(rtOpts->pidAsClockId) { uint16_t pid = htons(getpid()); memcpy(ptpClock->clockIdentity + 3, &pid, 2); } ptpClock->bestMaster = NULL; ptpClock->numberPorts = NUMBER_PORTS; ptpClock->disabled = rtOpts->portDisabled; memset(ptpClock->userDescription, 0, sizeof(ptpClock->userDescription)); memcpy(ptpClock->userDescription, rtOpts->portDescription, strlen(rtOpts->portDescription)); memset(&ptpClock->profileIdentity,0,6); if(rtOpts->ipMode == IPMODE_UNICAST && rtOpts->unicastNegotiation) { memcpy(&ptpClock->profileIdentity, &PROFILE_ID_TELECOM,6); } if(rtOpts->ipMode == IPMODE_MULTICAST &&rtOpts->delayMechanism == E2E) { memcpy(&ptpClock->profileIdentity, &PROFILE_ID_DEFAULT_E2E,6); } if(rtOpts->ipMode == IPMODE_MULTICAST &&rtOpts->delayMechanism == P2P) { memcpy(&ptpClock->profileIdentity, &PROFILE_ID_DEFAULT_P2P,6); } if(rtOpts->dot1AS) { memcpy(&ptpClock->profileIdentity, &PROFILE_ID_802_1AS,6); } ptpClock->clockQuality.clockAccuracy = rtOpts->clockQuality.clockAccuracy; ptpClock->clockQuality.clockClass = rtOpts->clockQuality.clockClass; ptpClock->clockQuality.offsetScaledLogVariance = rtOpts->clockQuality.offsetScaledLogVariance; ptpClock->priority1 = rtOpts->priority1; ptpClock->priority2 = rtOpts->priority2; ptpClock->domainNumber = rtOpts->domainNumber; if(rtOpts->slaveOnly) { ptpClock->slaveOnly = TRUE; rtOpts->clockQuality.clockClass = SLAVE_ONLY_CLOCK_CLASS; ptpClock->clockQuality.clockClass = SLAVE_ONLY_CLOCK_CLASS; } /* Port configuration data set */ /* * PortIdentity Init (portNumber = 1 for an ardinary clock spec * 7.5.2.3) */ copyClockIdentity(ptpClock->portIdentity.clockIdentity, ptpClock->clockIdentity); ptpClock->portIdentity.portNumber = rtOpts->portNumber; /* select the initial rate of delayreqs until we receive the first announce message */ ptpClock->logMinDelayReqInterval = rtOpts->initial_delayreq; clearTime(&ptpClock->peerMeanPathDelay); ptpClock->logAnnounceInterval = rtOpts->logAnnounceInterval; ptpClock->announceReceiptTimeout = rtOpts->announceReceiptTimeout; ptpClock->logSyncInterval = rtOpts->logSyncInterval; ptpClock->delayMechanism = rtOpts->delayMechanism; ptpClock->logMinPdelayReqInterval = rtOpts->logMinPdelayReqInterval; ptpClock->versionNumber = VERSION_PTP; if(rtOpts->dot1AS) { ptpClock->transportSpecific = TSP_ETHERNET_AVB; } else { ptpClock->transportSpecific = TSP_DEFAULT; } /* * Initialize random number generator using same method as ptpv1: * seed is now initialized from the last bytes of our mac addres (collected in net.c:findIface()) */ srand((ptpClock->netPath.interfaceID[PTP_UUID_LENGTH - 1] << 8) + ptpClock->netPath.interfaceID[PTP_UUID_LENGTH - 2]); /*Init other stuff*/ ptpClock->number_foreign_records = 0; ptpClock->max_foreign_records = rtOpts->max_foreign_records; }
void updatePathDelay(one_way_delay_filter *owd_filt, // one way delay filter RunTimeOpts *rtOpts, // run time options PtpClock *ptpClock // PTP main data structure ) { Integer16 s; TimeInternal remote_time; DBGV("updatePathDelay:\n"); DBGV(" t1 PDelay Req Tx time %10.10ds.%9.9dns\n", ptpClock->t1_pdelay_req_tx_time.seconds, ptpClock->t1_pdelay_req_tx_time.nanoseconds ); DBGV(" t2 PDelay Req Rx time %10.10ds.%9.9dns\n", ptpClock->t2_pdelay_req_rx_time.seconds, ptpClock->t2_pdelay_req_rx_time.nanoseconds ); DBGV(" t3 PDelay Resp Tx time %10.10ds.%9.9dns\n", ptpClock->t3_pdelay_resp_tx_time.seconds, ptpClock->t3_pdelay_resp_tx_time.nanoseconds ); DBGV(" t4 PDelay Resp Rx time %10.10ds.%9.9dns\n", ptpClock->t4_pdelay_resp_rx_time.seconds, ptpClock->t4_pdelay_resp_rx_time.nanoseconds ); DBGV(" PDelay Resp correction %10.10ds.%9.9dns\n", ptpClock->pdelay_resp_correction.seconds, ptpClock->pdelay_resp_correction.nanoseconds ); DBGV(" PDelay Resp follow up %10.10ds.%9.9dns\n", ptpClock->pdelay_followup_correction.seconds, ptpClock->pdelay_followup_correction.nanoseconds ); /* calc 'slave_to_master_delay' */ subTime(&ptpClock->one_way_delay, // Result &ptpClock->t4_pdelay_resp_rx_time, // PDelay Response Receive time &ptpClock->t1_pdelay_req_tx_time // minus PDelay Request Transmit time ); DBGV(" (t4-t1) %10.10ds.%9.9dns\n", ptpClock->one_way_delay.seconds, ptpClock->one_way_delay.nanoseconds ); subTime(&remote_time, // Result &ptpClock->t3_pdelay_resp_tx_time, // PDelay Resp Transmit time (from responder) &ptpClock->t2_pdelay_req_rx_time // minus PDelay Request Receive time (from responder) ); DBGV(" (t3-t2) %10.10ds.%9.9dns\n", remote_time.seconds, remote_time.nanoseconds ); subTime(&ptpClock->one_way_delay, // Result &ptpClock->one_way_delay, // (T4-T1) &remote_time // minus (T3-T2) ); DBGV(" (t4-t1)-(t3-t2) %10.10ds.%9.9dns\n", ptpClock->one_way_delay.seconds, ptpClock->one_way_delay.nanoseconds ); subTime(&ptpClock->one_way_delay, // Result &ptpClock->one_way_delay, // Current Calculation &ptpClock->pdelay_resp_correction // minus PDelay Resp Correction ); DBGV(" minus 1st correction %10.10ds.%9.9dns\n", ptpClock->one_way_delay.seconds, ptpClock->one_way_delay.nanoseconds ); subTime(&ptpClock->one_way_delay, // Result &ptpClock->one_way_delay, // Current Calculation &ptpClock->pdelay_followup_correction // minus PDelay Resp Correction ); DBGV(" minus 2nd correction %10.10ds.%9.9dns\n", ptpClock->one_way_delay.seconds, ptpClock->one_way_delay.nanoseconds ); halveTime(&ptpClock->one_way_delay); DBGV(" divided by 2 %10.10ds.%9.9dns\n", ptpClock->one_way_delay.seconds, ptpClock->one_way_delay.nanoseconds ); copyTime( &ptpClock->slave_to_master_delay, // Destination &ptpClock->one_way_delay // Source ); clearTime(&ptpClock->t1_pdelay_req_tx_time); clearTime(&ptpClock->t2_pdelay_req_rx_time); clearTime(&ptpClock->t3_pdelay_resp_tx_time); clearTime(&ptpClock->t4_pdelay_resp_rx_time); clearTime(&ptpClock->pdelay_resp_correction); clearTime(&ptpClock->pdelay_followup_correction); clearTime(&ptpClock->t1_sync_delta_time); clearTime(&ptpClock->t2_sync_delta_time); if(ptpClock->one_way_delay.seconds) // Check if delay is larger than one second { /* Delay is larger than one second, clear s_exp and timestamp * of previously received sent time of Sync message (usually from * preciseOriginTimestamp of follow up message) and return */ DBG("updatePathDelay: One way delay seconds != 0\n"); DBG("updatePathDelay: Clearing one way delay filter s_exp, nsec_prev\n"); owd_filt->s_exp = 0; owd_filt->nsec_prev = 0; return; } /* avoid overflowing filter */ s = rtOpts->s; while(abs(owd_filt->y)>>(31-s)) --s; DBGV("updatePathDelay: rtOpts->s: %d, s:%d\n", rtOpts->s, s ); DBGV("updatePathDelay: current owd_filt->y: %d, s_exp: %d\n", owd_filt->y, owd_filt->s_exp ); /* crank down filter cutoff by increasing 's_exp' */ if(owd_filt->s_exp < 1) owd_filt->s_exp = 1; else if(owd_filt->s_exp < 1<<s) ++owd_filt->s_exp; else if(owd_filt->s_exp > 1<<s) owd_filt->s_exp = 1<<s; /* filter 'one_way_delay' */ owd_filt->y = (owd_filt->s_exp-1) *owd_filt->y/owd_filt->s_exp + (ptpClock->one_way_delay.nanoseconds/2 + owd_filt->nsec_prev/2 ) /owd_filt->s_exp; /* Record previous one way delay nanosecond value * and update it with value calculated above */ owd_filt->nsec_prev = ptpClock->one_way_delay.nanoseconds; ptpClock->one_way_delay.nanoseconds = owd_filt->y; DBGV("updatePathDelay: delay filter y:%d, s_exp:%d\n", owd_filt->y, owd_filt->s_exp ); }
ssize_t netSendPeerEvent(Octet * buf, UInteger16 length, NetPath * netPath, RunTimeOpts *rtOpts, TimeInternal * tim) { ssize_t ret; struct sockaddr_in addr; addr.sin_family = AF_INET; addr.sin_port = htons(PTP_EVENT_PORT); #ifdef PTPD_PCAP if ((netPath->pcapGeneral != NULL) && (rtOpts->transport == IEEE_802_3)) { ret = netSendPcapEther(buf, length, &netPath->peerEtherDest, (struct ether_addr *)netPath->interfaceID, netPath->pcapGeneral); if (ret <= 0) DBG("error sending ether multi-cast general message\n"); } else if (netPath->unicastAddr) #else if (netPath->unicastAddr) #endif { addr.sin_addr.s_addr = netPath->unicastAddr; ret = sendto(netPath->eventSock, buf, length, 0, (struct sockaddr *)&addr, sizeof(struct sockaddr_in)); if (ret <= 0) DBG("Error sending unicast peer event message\n"); else netPath->sentPackets++; #ifndef SO_TIMESTAMPING /* * Need to forcibly loop back the packet since * we are not using multicast. */ addr.sin_addr.s_addr = netPath->interfaceAddr.s_addr; ret = sendto(netPath->eventSock, buf, length, 0, (struct sockaddr *)&addr, sizeof(struct sockaddr_in)); if (ret <= 0) DBG("Error looping back unicast peer event message\n"); #else if(!netPath->txTimestampFailure) { if(!getTxTimestamp(netPath, tim)) { netPath->txTimestampFailure = TRUE; if (tim) { clearTime(tim); } } } if(netPath->txTimestampFailure) { /* We've had a TX timestamp receipt timeout - falling back to packet looping */ addr.sin_addr.s_addr = netPath->interfaceAddr.s_addr; ret = sendto(netPath->eventSock, buf, length, 0, (struct sockaddr *)&addr, sizeof(struct sockaddr_in)); if (ret <= 0) DBG("Error looping back unicast event message\n"); } #endif /* SO_TIMESTAMPING */ } else { addr.sin_addr.s_addr = netPath->peerMulticastAddr; /* is TTL already 1 ? */ if(netPath->ttlEvent != 1) { /* Try setting TTL to 1 */ if (netSetMulticastTTL(netPath->eventSock,1)) { netPath->ttlEvent = 1; } } ret = sendto(netPath->eventSock, buf, length, 0, (struct sockaddr *)&addr, sizeof(struct sockaddr_in)); if (ret <= 0) DBG("Error sending multicast peer event message\n"); else netPath->sentPackets++; #ifdef SO_TIMESTAMPING if(!netPath->txTimestampFailure) { if(!getTxTimestamp(netPath, tim)) { if (tim) { clearTime(tim); } netPath->txTimestampFailure = TRUE; /* Try re-enabling MULTICAST_LOOP */ netSetMulticastLoopback(netPath, TRUE); } } #endif /* SO_TIMESTAMPING */ } if (ret > 0) netPath->sentPackets++; return ret; }
// // alt_dst: alternative destination. // if filled, send to this unicast dest; // if zero, do the normal operation (send to unicast with -u, or send to the multcast group) // /// /// TODO: merge these 2 functions into one /// ssize_t netSendEvent(Octet * buf, UInteger16 length, NetPath * netPath, RunTimeOpts *rtOpts, Integer32 alt_dst, TimeInternal * tim) { ssize_t ret; struct sockaddr_in addr; addr.sin_family = AF_INET; addr.sin_port = htons(PTP_EVENT_PORT); #ifdef PTPD_PCAP /* In PCAP Ethernet mode, we use pcapEvent for receiving all messages * and pcapGeneral for sending all messages */ if ((netPath->pcapGeneral != NULL) && (rtOpts->transport == IEEE_802_3 )) { ret = netSendPcapEther(buf, length, &netPath->etherDest, (struct ether_addr *)netPath->interfaceID, netPath->pcapGeneral); if (ret <= 0) DBG("Error sending ether multicast event message\n"); else netPath->sentPackets++; } else { #endif if (netPath->unicastAddr || alt_dst ) { if (netPath->unicastAddr) { addr.sin_addr.s_addr = netPath->unicastAddr; } else { addr.sin_addr.s_addr = alt_dst; } /* * This function is used for PTP only anyway... * If we're sending to a unicast address, set the UNICAST flag. */ *(char *)(buf + 6) |= PTP_UNICAST; ret = sendto(netPath->eventSock, buf, length, 0, (struct sockaddr *)&addr, sizeof(struct sockaddr_in)); if (ret <= 0) DBG("Error sending unicast event message\n"); else netPath->sentPackets++; #ifndef SO_TIMESTAMPING /* * Need to forcibly loop back the packet since * we are not using multicast. */ addr.sin_addr.s_addr = netPath->interfaceAddr.s_addr; ret = sendto(netPath->eventSock, buf, length, 0, (struct sockaddr *)&addr, sizeof(struct sockaddr_in)); if (ret <= 0) DBG("Error looping back unicast event message\n"); #else if(!netPath->txTimestampFailure) { if(!getTxTimestamp(netPath, tim)) { netPath->txTimestampFailure = TRUE; if (tim) { clearTime(tim); } } } if(netPath->txTimestampFailure) { /* We've had a TX timestamp receipt timeout - falling back to packet looping */ addr.sin_addr.s_addr = netPath->interfaceAddr.s_addr; ret = sendto(netPath->eventSock, buf, length, 0, (struct sockaddr *)&addr, sizeof(struct sockaddr_in)); if (ret <= 0) DBG("Error looping back unicast event message\n"); } #endif /* SO_TIMESTAMPING */ } else { addr.sin_addr.s_addr = netPath->multicastAddr; /* Is TTL OK? */ if(netPath->ttlEvent != rtOpts->ttl) { /* Try restoring TTL */ /* set socket time-to-live */ if (netSetMulticastTTL(netPath->eventSock,rtOpts->ttl)) { netPath->ttlEvent = rtOpts->ttl; } } ret = sendto(netPath->eventSock, buf, length, 0, (struct sockaddr *)&addr, sizeof(struct sockaddr_in)); if (ret <= 0) DBG("Error sending multicast event message\n"); else netPath->sentPackets++; #ifdef SO_TIMESTAMPING if(!netPath->txTimestampFailure) { if(!getTxTimestamp(netPath, tim)) { if (tim) { clearTime(tim); } netPath->txTimestampFailure = TRUE; /* Try re-enabling MULTICAST_LOOP */ netSetMulticastLoopback(netPath, TRUE); } } #endif /* SO_TIMESTAMPING */ } #ifdef PTPD_PCAP } #endif return ret; }
/* perform actions required when leaving 'port_state' and entering 'state' */ void toState(UInteger8 state, RunTimeOpts *rtOpts, PtpClock *ptpClock) { ptpClock->message_activity = TRUE; /* leaving state tasks */ switch (ptpClock->portState) { case PTP_MASTER: timerStop(SYNC_INTERVAL_TIMER, ptpClock->itimer); timerStop(ANNOUNCE_INTERVAL_TIMER, ptpClock->itimer); timerStop(PDELAYREQ_INTERVAL_TIMER, ptpClock->itimer); break; case PTP_SLAVE: timerStop(ANNOUNCE_RECEIPT_TIMER, ptpClock->itimer); if (ptpClock->delayMechanism == E2E) timerStop(DELAYREQ_INTERVAL_TIMER, ptpClock->itimer); else if (ptpClock->delayMechanism == P2P) timerStop(PDELAYREQ_INTERVAL_TIMER, ptpClock->itimer); initClock(rtOpts, ptpClock); break; case PTP_PASSIVE: timerStop(PDELAYREQ_INTERVAL_TIMER, ptpClock->itimer); timerStop(ANNOUNCE_RECEIPT_TIMER, ptpClock->itimer); break; case PTP_LISTENING: timerStop(ANNOUNCE_RECEIPT_TIMER, ptpClock->itimer); break; default: break; } /* entering state tasks */ /* * No need of PRE_MASTER state because of only ordinary clock * implementation. */ switch (state) { case PTP_INITIALIZING: DBG("state PTP_INITIALIZING\n"); ptpClock->portState = PTP_INITIALIZING; break; case PTP_FAULTY: DBG("state PTP_FAULTY\n"); ptpClock->portState = PTP_FAULTY; break; case PTP_DISABLED: DBG("state PTP_DISABLED\n"); ptpClock->portState = PTP_DISABLED; break; case PTP_LISTENING: /* in Listening mode, make sure we don't send anything. Instead we just expect/wait for announces (started below) */ timerStop(SYNC_INTERVAL_TIMER, ptpClock->itimer); timerStop(ANNOUNCE_INTERVAL_TIMER, ptpClock->itimer); timerStop(PDELAYREQ_INTERVAL_TIMER, ptpClock->itimer); timerStop(DELAYREQ_INTERVAL_TIMER, ptpClock->itimer); /* * Count how many _unique_ timeouts happen to us. * If we were already in Listen mode, then do not count this as a seperate reset, but stil do a new IGMP refresh */ if (ptpClock->portState != PTP_LISTENING) { ptpClock->reset_count++; } /* Revert to the original DelayReq interval, and ignore the one for the last master */ ptpClock->logMinDelayReqInterval = rtOpts->initial_delayreq; /* force a IGMP refresh per reset */ if (rtOpts->do_IGMP_refresh) { netRefreshIGMP(&ptpClock->netPath, rtOpts, ptpClock); } DBG("state PTP_LISTENING\n"); INFO(" now in state PTP_LISTENING\n"); timerStart(ANNOUNCE_RECEIPT_TIMER, (ptpClock->announceReceiptTimeout) * (pow(2,ptpClock->logAnnounceInterval)), ptpClock->itimer); ptpClock->portState = PTP_LISTENING; break; case PTP_MASTER: DBG("state PTP_MASTER\n"); INFO(" now in state PTP_MASTER\n"); timerStart(SYNC_INTERVAL_TIMER, pow(2,ptpClock->logSyncInterval), ptpClock->itimer); DBG("SYNC INTERVAL TIMER : %f \n", pow(2,ptpClock->logSyncInterval)); timerStart(ANNOUNCE_INTERVAL_TIMER, pow(2,ptpClock->logAnnounceInterval), ptpClock->itimer); timerStart(PDELAYREQ_INTERVAL_TIMER, pow(2,ptpClock->logMinPdelayReqInterval), ptpClock->itimer); ptpClock->portState = PTP_MASTER; break; case PTP_PASSIVE: DBG("state PTP_PASSIVE\n"); INFO(" now in state PTP_PASSIVE\n"); timerStart(PDELAYREQ_INTERVAL_TIMER, pow(2,ptpClock->logMinPdelayReqInterval), ptpClock->itimer); timerStart(ANNOUNCE_RECEIPT_TIMER, (ptpClock->announceReceiptTimeout) * (pow(2,ptpClock->logAnnounceInterval)), ptpClock->itimer); ptpClock->portState = PTP_PASSIVE; p1(ptpClock, rtOpts); break; case PTP_UNCALIBRATED: DBG("state PTP_UNCALIBRATED\n"); ptpClock->portState = PTP_UNCALIBRATED; break; case PTP_SLAVE: DBG("state PTP_SLAVE\n"); INFO(" now in state PTP_SLAVE\n"); initClock(rtOpts, ptpClock); ptpClock->waitingForFollow = FALSE; ptpClock->waitingForDelayResp = FALSE; // FIXME: clear these vars inside initclock clearTime(&ptpClock->pdelay_req_send_time); clearTime(&ptpClock->pdelay_req_receive_time); clearTime(&ptpClock->pdelay_resp_send_time); clearTime(&ptpClock->pdelay_resp_receive_time); timerStart(OPERATOR_MESSAGES_TIMER, OPERATOR_MESSAGES_INTERVAL, ptpClock->itimer); timerStart(ANNOUNCE_RECEIPT_TIMER, (ptpClock->announceReceiptTimeout) * (pow(2,ptpClock->logAnnounceInterval)), ptpClock->itimer); /* * Previously, this state transition would start the delayreq timer immediately. * However, if this was faster than the first received sync, then the servo would drop the delayResp * Now, we only start the timer after we receive the first sync (in handle_sync()) */ ptpClock->waiting_for_first_sync = TRUE; ptpClock->waiting_for_first_delayresp = TRUE; ptpClock->portState = PTP_SLAVE; break; default: DBG("to unrecognized state\n"); break; } if (rtOpts->displayStats) displayStats(rtOpts, ptpClock); }