// the caller must have checked there are static int nf_msg_head_decode(struct nf_msg *msg, void const *src) { struct nf_msg_ll const *msg_ll = src; // FIXME: won't work if not properly aligned unsigned const version = ntohs(msg_ll->version); if (version != 5) { SLOG(LOG_DEBUG, "Skip netflow version %u", version); return -1; } CONV_16(msg, version); CONV_16(msg, nb_flows); CONV_32(msg, sys_uptime); msg->ts.tv_sec = ntohl(msg_ll->ts_sec); msg->ts.tv_usec = ntohl(msg_ll->ts_nsec) / 1000; CONV_32(msg, seqnum); CONV_8(msg, engine_type); CONV_8(msg, engine_id); uint16_t sampling = ntohs(msg_ll->sampling); msg->sampling_mode = sampling >> 14U; msg->sample_rate = sampling & 0x2FFF; return 0; }
// Decode the flow in src into flow. We already checked there are enough bytes to read in src. static int nf_flow_decode(struct nf_flow *flow, struct nf_msg const *head, void const *src) { struct nf_flow_ll const *flow_ll = src; // FIXME: won't work if not properly aligned CONV_IP(flow, addr[0]); CONV_IP(flow, addr[1]); CONV_IP(flow, next_hop); CONV_16(flow, port[0]); CONV_16(flow, port[1]); CONV_16(flow, in_iface); CONV_16(flow, out_iface); CONV_32(flow, packets); CONV_32(flow, bytes); CONV_8(flow, tcp_flags); CONV_8(flow, ip_proto); CONV_8(flow, ip_tos); CONV_16(flow, as[0]); CONV_16(flow, as[1]); CONV_8(flow, mask[0]); CONV_8(flow, mask[1]); /* The first/last fields of the netflow are the uptime at the first/last pkt of the flow. * We find a timestamp more interesting, so we get it from sysuptime and localtime of the header. * But this imply trusting the netflow header localtime. */ SLOG(LOG_DEBUG, "Decoding a flow which sys_uptime=%"PRIu32", now=%s, first=%u, last=%u", head->sys_uptime, timeval_2_str(&head->ts), ntohl(flow_ll->first), ntohl(flow_ll->last)); flow->first = head->ts; timeval_sub_usec(&flow->first, (int64_t)(head->sys_uptime - ntohl(flow_ll->first)) * 1000); flow->last = head->ts; timeval_sub_usec(&flow->last, (int64_t)(head->sys_uptime - ntohl(flow_ll->last)) * 1000); SLOG(LOG_DEBUG, "...yielding: %s->%s", timeval_2_str(&flow->first), timeval_2_str(&flow->last)); return 0; }
mlib_status mlib_v_conv2x2_u16nw_mask( mlib_image *dst, const mlib_image *src, const mlib_s32 *kernel, mlib_s32 scalef_expon, mlib_s32 cmask) { /* pointers to dst row */ mlib_u16 *da, *d_a; /* pointers to src, dst data */ mlib_u16 *adr_dst, *adr_src, *dend; /* pointers to src rows */ mlib_u16 *sa, *sa1, *sa2, *sa_2; /* pointers to rows in interm. src buf */ mlib_u16 *buff_src, *sbuf1, *sbuf2, *prow; mlib_u16 *s_buf1; /* mlib_d64 pointers to rows in interm. src buf */ mlib_d64 *s1, *s2; /* src, dst and interm. buf. strides */ mlib_s32 dlb, slb, buf_slb; mlib_s32 dh, dw; mlib_d64 out0, out1, tmp0, tmp1, tmp2, tmp3; /* data */ mlib_d64 d1, d2, d_1, d_2; /* shifted data */ mlib_d64 d21, d22; /* coefficients */ mlib_f32 k1, k2, k3, k4; int gsr_scale, i, j, nchannel, chan, testchan; mlib_u16 t1, t2, t3, t4, t5, t6, t7, t8; type_mlib_d64 str; mlib_d64 ker_off, mask8000 = vis_to_double_dup(0x80008000); nchannel = mlib_ImageGetChannels(src); GET_SRC_DST_PARAMETERS(); LOAD_KERNEL_INTO_FLOAT(); gsr_scale = 32 - scalef_expon; vis_write_gsr((gsr_scale << 3) + 2); /* buf_slb - 8-byte aligned */ buf_slb = (2 * dw + 26) & (~7); /* alloc. interm. src buffer */ buff_src = (mlib_u16 *)__mlib_malloc(2 * buf_slb * sizeof (mlib_u8) + 8); if (buff_src == NULL) return (MLIB_FAILURE); buf_slb >>= 1; sbuf1 = (mlib_u16 *)((mlib_addr)(buff_src + 8) & (~7)); sbuf2 = sbuf1 + buf_slb; dw -= 1; /* edge - no write */ dh -= 1; testchan = 1; for (chan = nchannel - 1; chan >= 0; chan--) { if ((cmask & testchan) == 0) { testchan <<= 1; continue; } testchan <<= 1; sa = adr_src + chan; sa1 = sa + slb; sa_2 = sa2 = sa1 + slb; d_a = adr_dst + chan; /* load interm. src buff */ for (i = 0, j = 0; j < (dw + 1); i += nchannel, j++) { sbuf1[j] = sa1[i]; sbuf2[j] = sa[i]; } for (j = 0; j < dh - 1; j++) { da = d_a; prow = sbuf1; sbuf1 = sbuf2; sbuf2 = prow; s1 = (mlib_d64 *)sbuf1; s2 = (mlib_d64 *)sbuf2; dend = da + (dw - 1) * nchannel; s_buf1 = sbuf1; d1 = *s1; d2 = *s2; d1 = vis_fxor(d1, mask8000); d2 = vis_fxor(d2, mask8000); d_1 = *(s1 + 1); d_2 = *(s2 + 1); d_1 = vis_fxor(d_1, mask8000); d_2 = vis_fxor(d_2, mask8000); CONV_16_BEGIN(d1, k1); CONV_16(d2, k3); d21 = vis_faligndata(d1, d_1); d22 = vis_faligndata(d2, d_2); CONV_16(d21, k2); CONV_16(d22, k4); str.value = vis_fxor(vis_fpackfix_pair(out0, out1), mask8000); d1 = d_1; d2 = d_2; s1++; s2++; /* * in each iteration store result from prev. iterat. * and load data for processing next row */ #pragma pipeloop(0) for (i = 0; i < dw - 4; i += 4) { t1 = *sa_2; sa_2 += nchannel; t2 = *sa_2; sa_2 += nchannel; d_1 = *(s1 + 1); d_2 = *(s2 + 1); d_1 = vis_fxor(d_1, mask8000); d_2 = vis_fxor(d_2, mask8000); CONV_16_BEGIN(d1, k1); t3 = *sa_2; sa_2 += nchannel; t4 = *sa_2; sa_2 += nchannel; CONV_16(d2, k3); t5 = str.forshort.ushort0; t6 = str.forshort.ushort1; d21 = vis_faligndata(d1, d_1); t7 = str.forshort.ushort2; d22 = vis_faligndata(d2, d_2); t8 = str.forshort.ushort3; CONV_16(d21, k2); (*s_buf1++) = t1; (*s_buf1++) = t2; CONV_16(d22, k4); (*s_buf1++) = t3; (*s_buf1++) = t4; *da = t5; da += nchannel; str.value = vis_fxor(vis_fpackfix_pair(out0, out1), mask8000); *da = t6; da += nchannel; d1 = d_1; d2 = d_2; *da = t7; da += nchannel; s1++; s2++; *da = t8; da += nchannel; } for (; i < dw + 1; i++) { (*s_buf1++) = *sa_2; sa_2 += nchannel; } if ((mlib_addr)da <= (mlib_addr)dend) { *da = str.forshort.ushort0; da += nchannel; } if ((mlib_addr)da <= (mlib_addr)dend) { *da = str.forshort.ushort1; da += nchannel; } if ((mlib_addr)da <= (mlib_addr)dend) { *da = str.forshort.ushort2; da += nchannel; } if ((mlib_addr)da <= (mlib_addr)dend) { *da = str.forshort.ushort3; } sa_2 = sa2 = sa2 + slb; d_a += dlb; } /* process last row - no need to load data */ da = d_a; prow = sbuf1; sbuf1 = sbuf2; sbuf2 = prow; s1 = (mlib_d64 *)sbuf1; s2 = (mlib_d64 *)sbuf2; dend = da + (dw - 1) * nchannel; d1 = *s1; d2 = *s2; d1 = vis_fxor(d1, mask8000); d2 = vis_fxor(d2, mask8000); d_1 = *(s1 + 1); d_2 = *(s2 + 1); d_1 = vis_fxor(d_1, mask8000); d_2 = vis_fxor(d_2, mask8000); CONV_16_BEGIN(d1, k1); CONV_16(d2, k3); d21 = vis_faligndata(d1, d_1); d22 = vis_faligndata(d2, d_2); CONV_16(d21, k2); CONV_16(d22, k4); d1 = d_1; d2 = d_2; s1++; s2++; #pragma pipeloop(0) for (i = 4; i < dw; i += 4) { str.value = vis_fxor(vis_fpackfix_pair(out0, out1), mask8000); d_1 = *(s1 + 1); d_2 = *(s2 + 1); d_1 = vis_fxor(d_1, mask8000); d_2 = vis_fxor(d_2, mask8000); CONV_16_BEGIN(d1, k1); t5 = str.forshort.ushort0; CONV_16(d2, k3); d21 = vis_faligndata(d1, d_1); t6 = str.forshort.ushort1; d22 = vis_faligndata(d2, d_2); CONV_16(d21, k2); t7 = str.forshort.ushort2; CONV_16(d22, k4); t8 = str.forshort.ushort3; *da = t5; da += nchannel; *da = t6; da += nchannel; *da = t7; da += nchannel; d1 = d_1; d2 = d_2; *da = t8; da += nchannel; s1++; s2++; } str.value = vis_fxor(vis_fpackfix_pair(out0, out1), mask8000); if ((mlib_addr)da <= (mlib_addr)dend) { *da = str.forshort.ushort0; da += nchannel; } if ((mlib_addr)da <= (mlib_addr)dend) { *da = str.forshort.ushort1; da += nchannel; } if ((mlib_addr)da <= (mlib_addr)dend) { *da = str.forshort.ushort2; da += nchannel; } if ((mlib_addr)da <= (mlib_addr)dend) { *da = str.forshort.ushort3; } } __mlib_free(buff_src); return (MLIB_SUCCESS); }
mlib_status mlib_v_conv2x2_u16nw_4( mlib_image *dst, const mlib_image *src, const mlib_s32 *kernel, mlib_s32 scalef_expon) { /* pointers to dst row */ mlib_u16 *da, *d_a; /* pointers to src, dst data */ mlib_u16 *adr_dst, *adr_src, *dend; /* pointers to src rows */ mlib_u16 *sa, *sa1; /* pointers to rows in interm. src buf */ mlib_d64 *buff_src, *sbuf1, *sbuf2, *prow; /* pointer to row in interm. dst buf */ mlib_d64 *dbuf; /* mlib_d64 pointers to rows in interm. src buf */ mlib_d64 *s1, *s2; /* mlib_d64 pointer to row in interm. dst buf */ mlib_d64 *ddst; /* data */ mlib_d64 d1, d2, d_1, d_2; mlib_f32 k1, k2, k3, k4; /* src, dst and interm. buf. strides */ mlib_s32 dlb, slb, buf_slb; mlib_s32 dh, dw; mlib_d64 out0, out1, tmp0, tmp1, tmp2, tmp3; mlib_d64 *dsa, *dp; mlib_d64 sd0, sd1; mlib_s32 emask; int gsr_scale, i, j; mlib_d64 ker_off, mask8000 = vis_to_double_dup(0x80008000); GET_SRC_DST_PARAMETERS(); LOAD_KERNEL_INTO_FLOAT(); gsr_scale = 32 - scalef_expon; vis_write_gsr((gsr_scale << 3)); buf_slb = (8 * dw + 16) >> 3; PREPARE_INTERM_BUFFERS(); dw -= 1; dw *= 4; dh -= 1; sa = adr_src; sa1 = sa + slb; d_a = adr_dst; /* load interm. src buff */ #pragma pipeloop(0) LOAD_LINE_INTO_BUFFER(sbuf2, sa, 4); #pragma pipeloop(0) for (j = 0; j < dh; j++) { LOOP_INI(); #pragma pipeloop(0) LOAD_LINE_INTO_BUFFER(sbuf2, sa1, 4); d1 = *s1; d2 = *s2; d1 = vis_fxor(d1, mask8000); d2 = vis_fxor(d2, mask8000); #pragma pipeloop(0) for (i = 0; i < dw; i += 4) { d_1 = *(s1 + 1); d_2 = *(s2 + 1); d_1 = vis_fxor(d_1, mask8000); d_2 = vis_fxor(d_2, mask8000); CONV_16_BEGIN(d1, k1); CONV_16(d2, k3); CONV_16(d_1, k2); CONV_16(d_2, k4); (*ddst++) = vis_fxor(vis_fpackfix_pair(out0, out1), mask8000); d1 = d_1; d2 = d_2; s1++; s2++; } PREPARE_TO_COPY_INTERM_BUF_TO_DST(); #pragma pipeloop(0) COPY_INTERM_BUF_TO_DST(); COPY_TAIL(); sa1 = sa1 + slb; d_a += dlb; } __mlib_free(buff_src); return (MLIB_SUCCESS); }