예제 #1
0
void
randv<int>(vsip::Vector<int> v)
{
  vsip::Rand<float> rgen(1, true);

  v = rgen.randu(v.size()) * v.size();
}
예제 #2
0
파일: test_ramp.hpp 프로젝트: bambang/vsipl
vsip::Vector<T, B>
test_ramp(vsip::Vector<T, B> view, T a, T b)
{
  for (vsip::index_type i=0; i<view.size(); ++i)
    view.put(i, a + T(i)*b);
  return view;
}
예제 #3
0
파일: ref_conv.hpp 프로젝트: bambang/vsipl
void
conv(
  vsip::symmetry_type           sym,
  vsip::support_region_type     sup,
  vsip::const_Vector<T, Block1> coeff,
  vsip::const_Vector<T, Block2> in,
  vsip::Vector<T, Block3>       out,
  vsip::length_type             D)
{
  using vsip::index_type;
  using vsip::length_type;
  using vsip::stride_type;
  using vsip::Vector;
  using vsip::const_Vector;
  using vsip::Domain;
  using vsip::unbiased;

  using vsip::impl::convert_to_local;
  using vsip::impl::Working_view_holder;

  typedef typename vsip::impl::scalar_of<T>::type scalar_type;

  Working_view_holder<const_Vector<T, Block1> > w_coeff(coeff);
  Working_view_holder<const_Vector<T, Block2> > w_in(in);
  Working_view_holder<Vector<T, Block3> >       w_out(out);

  Vector<T> kernel = kernel_from_coeff(sym, w_coeff.view);

  length_type M = kernel.size(0);
  length_type N = in.size(0);
  length_type P = out.size(0);

  stride_type shift      = conv_expected_shift(sup, M);

  // expected_P == conv_output_size(sup, M, N, D) == P;
  assert(conv_output_size(sup, M, N, D) == P);

  Vector<T> sub(M);

  // Check result
  for (index_type i=0; i<P; ++i)
  {
    sub = T();
    index_type pos = i*D + shift;

    if (pos+1 < M)
      sub(Domain<1>(0, 1, pos+1)) = w_in.view(Domain<1>(pos, -1, pos+1));
    else if (pos >= N)
    {
      index_type start = pos - N + 1;
      sub(Domain<1>(start, 1, M-start)) = w_in.view(Domain<1>(N-1, -1, M-start));
    }
    else
      sub = w_in.view(Domain<1>(pos, -1, M));
      
    w_out.view(i) = dot(kernel, sub);
  }
}
예제 #4
0
void
corr(
  vsip::bias_type               bias,
  vsip::support_region_type     sup,
  vsip::const_Vector<T, Block1> ref,
  vsip::const_Vector<T, Block2> in,
  vsip::Vector<T, Block3>       out)
{
  using vsip::index_type;
  using vsip::length_type;
  using vsip::stride_type;
  using vsip::Vector;
  using vsip::Domain;
  using vsip::unbiased;

  typedef typename vsip::impl::Scalar_of<T>::type scalar_type;

  length_type M = ref.size(0);
  length_type N = in.size(0);
  length_type P = out.size(0);

  length_type expected_P = corr_output_size(sup, M, N);
  stride_type shift      = expected_shift(sup, M);

  assert(expected_P == P);

  Vector<T> sub(M);

  // compute correlation
  for (index_type i=0; i<P; ++i)
  {
    sub = T();
    stride_type pos = static_cast<stride_type>(i) + shift;
    scalar_type scale;

    if (pos < 0)
    {
      sub(Domain<1>(-pos, 1, M + pos)) = in(Domain<1>(0, 1, M+pos));
      scale = scalar_type(M + pos);
    }
    else if (pos + M > N)
    {
      sub(Domain<1>(0, 1, N-pos)) = in(Domain<1>(pos, 1, N-pos));
      scale = scalar_type(N - pos);
    }
    else
    {
      sub = in(Domain<1>(pos, 1, M));
      scale = scalar_type(M);
    }

#if VSIP_IMPL_CORR_CORRECT_SAME_SUPPORT_SCALING
#else
    if (sup == vsip::support_same)
    {
      if      (i < (M/2))     scale = i + (M+1)/2;         // i + ceil(M/2)
      else if (i < N - (M/2)) scale = M;                   // M
      else                    scale = N - 1 + (M+1)/2 - i; // N-1+ceil(M/2)-i
    }
#endif
      
    T val = dot(ref, impl_conj(sub));
    if (bias == vsip::unbiased)
      val /= scale;

    out(i) = val;
  }
}