void parallel_for_pixel_wise_runner<openmp, OPTS, Params...>::run_col_first_parallel(F fun) { auto p1 = std::get<0>(ranges_).first_point_coordinates(); auto p2 = std::get<0>(ranges_).last_point_coordinates(); // int col_start = p1[1]; // int col_end = p2[1]; // const bool row_reverse = options_.has(_row_backward) || options_.has(_mem_backward); // const bool col_reverse = options_.has(_col_backward) || options_.has(_mem_backward); // const int config[4] = { options_.has(_row_backward), options_.has(_row_forward), // options_.has(_col_backward), options_.has(_col_forward) }; // const int config_sum = config[0] + config[1] + config[2] + config[3]; // const bool parallel = // (config_sum == 0 || !((config[0] || config[1]) && // (config[2] || config[3]))) && // no dependency or either row_* or col_* is activated (not both). // !options_.has(_no_threads); // user did not specify serial const int bs = std::min(options_.get(_block_size, 32), p2[1] - p1[1]); block_wise(vint2{1 + p2[0] - p1[0], bs}, ranges_)(_tie_arguments) | [this, &fun] (auto& b) { if (options_.has(_col_backward)) iod::static_if<OPTS().has(_tie_arguments)>( [&b] (auto& fun) { return pixel_wise(b)(_col_backward, _no_threads, _tie_arguments) | fun; }, [&b] (auto& fun) { return pixel_wise(b)(_col_backward, _no_threads) | fun; }, fun); else iod::static_if<OPTS().has(_tie_arguments)>( [&b] (auto& fun) { return pixel_wise(b)(_no_threads, _tie_arguments) | fun; }, [&b] (auto& fun) { return pixel_wise(b)(_no_threads) | fun; }, fun); }; }
/* Pack and send on event multicast ip adress a Sync message */ int msg_issue_sync_followup(struct pp_instance *ppi) { Timestamp tstamp; TimeInternal now, *time_snt; int e; /* Send sync on the event channel with the "current" timestamp */ ppi->t_ops->get(ppi, &now); from_TimeInternal(&now, &tstamp); msg_pack_sync(ppi, &tstamp); e = __send_and_log(ppi, PP_SYNC_LENGTH, PPM_SYNC, PP_NP_EVT); if (e) return e; /* Send followup on general channel with sent-stamp of sync */ time_snt = &ppi->last_snt_time; add_TimeInternal(time_snt, time_snt, &OPTS(ppi)->outbound_latency); from_TimeInternal(time_snt, &tstamp); msg_pack_follow_up(ppi, &tstamp); return __send_and_log(ppi, PP_FOLLOW_UP_LENGTH, PPM_FOLLOW_UP, PP_NP_GEN); }
int pp_slave(struct pp_instance *ppi, unsigned char *pkt, int plen) { int e = 0; /* error var, to check errors in msg handling */ MsgHeader *hdr = &ppi->received_ptp_header; MsgDelayResp resp; int d1, d2; if (ppi->is_new_state) { pp_servo_init(ppi); if (pp_hooks.new_slave) e = pp_hooks.new_slave(ppi, pkt, plen); if (e) goto out; ppi->waiting_for_follow = FALSE; pp_timeout_restart_annrec(ppi); pp_timeout_rand(ppi, PP_TO_DELAYREQ, DSPOR(ppi)->logMinDelayReqInterval); } if (plen == 0) goto out; switch (hdr->messageType) { case PPM_ANNOUNCE: e = st_com_slave_handle_announce(ppi, pkt, plen); break; case PPM_SYNC: e = st_com_slave_handle_sync(ppi, pkt, plen); break; case PPM_FOLLOW_UP: e = st_com_slave_handle_followup(ppi, pkt, plen); break; case PPM_DELAY_REQ: /* Being slave, we are not waiting for a delay request */ break; case PPM_DELAY_RESP: e = (plen < PP_DELAY_RESP_LENGTH); if (e) break; msg_unpack_delay_resp(pkt, &resp); if ((memcmp(&DSPOR(ppi)->portIdentity.clockIdentity, &resp.requestingPortIdentity.clockIdentity, PP_CLOCK_IDENTITY_LENGTH) == 0) && ((ppi->sent_seq[PPM_DELAY_REQ]) == hdr->sequenceId) && (DSPOR(ppi)->portIdentity.portNumber == resp.requestingPortIdentity.portNumber) && ppi->is_from_cur_par) { to_TimeInternal(&ppi->t4, &resp.receiveTimestamp); /* * FIXME: how is correctionField handled in t3/t4? * I think the master should consider it when * generating t4, and report back a modified t4 */ if (pp_hooks.handle_resp) e = pp_hooks.handle_resp(ppi); else pp_servo_got_resp(ppi); if (e) goto out; ppi->log_min_delay_req_interval = hdr->logMessageInterval; } else { pp_diag(ppi, frames, 2, "pp_slave : " "Delay Resp doesn't match Delay Req\n"); } break; /* * We are not supporting pdelay (not configured to, see * 9.5.13.1, p 106), so all the code about pdelay is removed * as a whole by one commit in our history. It can be recoverd * and fixed if needed */ default: /* disregard, nothing to do */ break; } out: if (e == 0) e = st_com_execute_slave(ppi); if (pp_timeout_z(ppi, PP_TO_DELAYREQ)) { e = msg_issue_delay_req(ppi); ppi->t3 = ppi->last_snt_time; /* Restart the timeout for next time */ pp_timeout_rand(ppi, PP_TO_DELAYREQ, DSPOR(ppi)->logMinDelayReqInterval); /* Add latency */ add_TimeInternal(&ppi->t3, &ppi->t3, &OPTS(ppi)->outbound_latency); } if (e) { ppi->next_state = PPS_FAULTY; return 0; } /* Leaving this state */ if (ppi->next_state != ppi->state) { pp_timeout_clr(ppi, PP_TO_ANN_RECEIPT); pp_timeout_clr(ppi, PP_TO_DELAYREQ); pp_servo_init(ppi); } d1 = d2 = pp_ms_to_timeout(ppi, PP_TO_ANN_RECEIPT); if (ppi->timeouts[PP_TO_DELAYREQ]) d2 = pp_ms_to_timeout(ppi, PP_TO_DELAYREQ); ppi->next_delay = d1 < d2 ? d1 : d2; return 0; }
int pp_initializing(struct pp_instance *ppi, unsigned char *pkt, int plen) { unsigned char *id, *mac; struct DSPort *port = DSPOR(ppi); struct pp_runtime_opts *opt = OPTS(ppi); int ret = 0; if (ppi->n_ops->init(ppi) < 0) /* it must handle being called twice */ goto failure; /* Clock identity comes from mac address with 0xff:0xfe intermixed */ id = (unsigned char *)&DSDEF(ppi)->clockIdentity; mac = NP(ppi)->ch[PP_NP_GEN].addr; id[0] = mac[0]; id[1] = mac[1]; id[2] = mac[2]; id[3] = 0xff; id[4] = 0xfe; id[5] = mac[3]; id[6] = mac[4]; id[7] = mac[5]; /* * Initialize port data set */ memcpy(&port->portIdentity.clockIdentity, &DSDEF(ppi)->clockIdentity, PP_CLOCK_IDENTITY_LENGTH); port->portIdentity.portNumber = 1; port->logMinDelayReqInterval = PP_DEFAULT_DELAYREQ_INTERVAL; port->logAnnounceInterval = opt->announce_intvl; port->announceReceiptTimeout = PP_DEFAULT_ANNOUNCE_RECEIPT_TIMEOUT; port->logSyncInterval = opt->sync_intvl; port->versionNumber = PP_VERSION_PTP; if (pp_hooks.init) ret = pp_hooks.init(ppi, pkt, plen); if (ret) { pp_diag(ppi, ext, 1, "%s: can't init extension\n", __func__); goto failure; } if (ret) { pp_diag(ppi, time, 1, "%s: can't init timers\n", __func__); goto failure; } pp_init_clock(ppi); pp_diag(ppi, bmc, 1, "clock class = %d\n", DSDEF(ppi)->clockQuality.clockClass); pp_diag(ppi, bmc, 1, "clock accuracy = %d\n", DSDEF(ppi)->clockQuality.clockAccuracy); m1(ppi); msg_pack_header(ppi, ppi->tx_ptp); /* This is used for all tx */ if (!ppi->master_only) ppi->next_state = PPS_LISTENING; else ppi->next_state = PPS_MASTER; return 0; failure: ppi->next_delay = 1000; /* wait 1s before retrying */ return 0; }