/* XTR xtr_d function */ void FP4_xtr_D(FP4 *r,FP4 *x) { FP4 w; FP4_copy(r,x); FP4_conj(&w,r); FP4_add(&w,&w,&w); FP4_sqr(r,r); FP4_sub(r,r,&w); FP4_reduce(r); /* reduce here as multiple calls trigger automatic reductions */ }
/* r=x^n using XTR method on traces of FP12s */ void FP4_xtr_pow(FP4 *r,FP4 *x,BIG n) { int i,par,nb; BIG v; FP2 w; FP4 t,a,b,c; BIG_zero(v); BIG_inc(v,3); FP2_from_BIG(&w,v); FP4_from_FP2(&a,&w); FP4_copy(&b,x); FP4_xtr_D(&c,x); BIG_norm(n); par=BIG_parity(n); BIG_copy(v,n); BIG_shr(v,1); if (par==0) {BIG_dec(v,1); BIG_norm(v);} nb=BIG_nbits(v); for (i=nb-1;i>=0;i--) { if (!BIG_bit(v,i)) { FP4_copy(&t,&b); FP4_conj(x,x); FP4_conj(&c,&c); FP4_xtr_A(&b,&a,&b,x,&c); FP4_conj(x,x); FP4_xtr_D(&c,&t); FP4_xtr_D(&a,&a); } else { FP4_conj(&t,&a); FP4_xtr_D(&a,&b); FP4_xtr_A(&b,&c,&b,x,&t); FP4_xtr_D(&c,&c); } } if (par==0) FP4_copy(r,&c); else FP4_copy(r,&b); FP4_reduce(r); }
/* SU= 240 */ void FP4_pow(FP4 *r,FP4* a,BIG b) { FP4 w; BIG z,zilch; int bt; BIG_zero(zilch); BIG_norm(b); BIG_copy(z,b); FP4_copy(&w,a); FP4_one(r); while(1) { bt=BIG_parity(z); BIG_shr(z,1); if (bt) FP4_mul(r,r,&w); if (BIG_comp(z,zilch)==0) break; FP4_sqr(&w,&w); } FP4_reduce(r); }
/* SU= 8 */ void FP12_trace(FP4 *w,FP12 *x) { FP4_imul(w,&(x->a),3); FP4_reduce(w); }
/* reduce all components of w */ void FP12_reduce(FP12 *w) { FP4_reduce(&(w->a)); FP4_reduce(&(w->b)); FP4_reduce(&(w->c)); }