void vector_add(VECTOR *v, void* item){ vector_connect(v); lock_vector(v); _vector_add(v, item); unlock_vector(v); vector_disconnect(v); }
static void _add_punycode_if_needed(_psl_vector_t *v) { int it, n; /* do not use 'it < v->cur' since v->cur is changed by _vector_add() ! */ for (it = 0, n = v->cur; it < n; it++) { _psl_entry_t *e = _vector_get(v, it); if (_str_needs_encoding(e->label_buf)) { _psl_entry_t suffix, *suffixp; char lookupname[64] = ""; /* this is much slower than the libidn2 API but should have no license issues */ FILE *pp; char cmd[16 + sizeof(e->label_buf)]; snprintf(cmd, sizeof(cmd), "idn2 '%s'", e->label_buf); if ((pp = popen(cmd, "r"))) { if (fscanf(pp, "%63s", lookupname) >= 1 && strcmp(e->label_buf, lookupname)) { /* fprintf(stderr, "idn2 '%s' -> '%s'\n", e->label_buf, lookupname); */ _suffix_init(&suffix, lookupname, strlen(lookupname)); suffix.wildcard = e->wildcard; suffixp = _vector_get(v, _vector_add(v, &suffix)); suffixp->label = suffixp->label_buf; /* set label to changed address */ } pclose(pp); } else fprintf(stderr, "Failed to call popen(%s, \"r\")\n", cmd); } } _vector_sort(v); }
/* direction +t */ void boundary_D_0(spinor * const r, spinor * const s, su3 * const u) { static su3_vector chi, psi; _vector_add(psi,s->s0,s->s2); _su3_multiply(chi,(*u),psi); _complex_times_vector(r->s0, phase_0, chi); _vector_assign(r->s2,r->s0); _vector_add(psi,s->s1,s->s3); _su3_multiply(chi,(*u),psi); _complex_times_vector(r->s1, phase_0, chi); _vector_assign(r->s3, r->s1); return; }
void _vector_insert(VECTOR *v, void* item, unsigned i){ if(i >= v->flags[VECTOR_SIZE_FLAG]){ _vector_add(v, item); } else { upsize_vector(v); memmove( v->data + i * v->flags[VECTOR_ITEM_WIDTH_FLAG], v->data + (i + 1) * v->flags[VECTOR_ITEM_WIDTH_FLAG], (v->flags[VECTOR_SIZE_FLAG] - i) * v->flags[VECTOR_ITEM_WIDTH_FLAG] ); v->flags[VECTOR_SIZE_FLAG]++; memcpy(v->data + i * v->flags[VECTOR_ITEM_WIDTH_FLAG], item, v->flags[VECTOR_ITEM_WIDTH_FLAG]); } }
static void _add_punycode_if_needed(_psl_vector_t *v, _psl_entry_t *e) { char *lookupname = NULL; int rc; uint8_t *lower, resbuf[256]; size_t len = sizeof(resbuf) - 1; /* leave space for additional \0 byte */ if (_str_is_ascii(e->label_buf)) return; /* we need a conversion to lowercase */ lower = u8_tolower((uint8_t *)e->label_buf, u8_strlen((uint8_t *)e->label_buf), 0, UNINORM_NFKC, resbuf, &len); if (!lower) { /* fprintf(stderr, "u8_tolower(%s) failed (%d)\n", e->label_buf, errno); */ return; } /* u8_tolower() does not terminate the result string */ if (lower == resbuf) { lower[len]=0; } else { uint8_t *tmp = lower; lower = (uint8_t *)strndup((char *)lower, len); free(tmp); } if ((rc = idn2_lookup_u8(lower, (uint8_t **)&lookupname, 0)) == IDN2_OK) { if (strcmp(e->label_buf, lookupname)) { _psl_entry_t suffix, *suffixp; /* fprintf(stderr, "libidn '%s' -> '%s'\n", e->label_buf, lookupname); */ _suffix_init(&suffix, lookupname, strlen(lookupname)); suffix.wildcard = e->wildcard; suffixp = _vector_get(v, _vector_add(v, &suffix)); suffixp->label = suffixp->label_buf; /* set label to changed address */ } /* else ignore */ } /* else fprintf(stderr, "toASCII(%s) failed (%d): %s\n", lower, rc, idn2_strerror(rc)); */ if (lower != resbuf) free(lower); }
int vector_bsearch(VECTOR *v, void* key, void* buf, int (__cdecl *replace)(void*, void*), void* new_item, int *replaced, int add){ char *res = NULL; int result; vector_connect(v); lock_vector(v); if(replaced != NULL){ *replaced = 0; } if(v->compare != NULL){ if(v->flags[VECTOR_SIZE_FLAG] > 0){ res = (char*)bsearch(key, v->data, v->flags[VECTOR_SIZE_FLAG], v->flags[VECTOR_ITEM_WIDTH_FLAG], v->compare); } if(res != NULL){ if(replace != NULL){ if(replace(res, new_item) == 0){ if(replaced != NULL){ *replaced = 1; } } } if(buf != NULL){ memcpy(buf, res, v->flags[VECTOR_ITEM_WIDTH_FLAG]); } result = 0; } else { result = 1; if(add){ _vector_add(v, key); if(buf != NULL){ memcpy(buf, key, v->flags[VECTOR_ITEM_WIDTH_FLAG]); } } } } unlock_vector(v); vector_disconnect(v); return result; }
static void _add_punycode_if_needed(_psl_vector_t *v, _psl_entry_t *e) { char *lookupname = NULL; int rc; if (_str_is_ascii(e->label_buf)) return; /* idna_to_ascii_8z() automatically converts UTF-8 to lowercase */ if ((rc = idna_to_ascii_8z(e->label_buf, &lookupname, IDNA_USE_STD3_ASCII_RULES)) == IDNA_SUCCESS) { if (strcmp(e->label_buf, lookupname)) { _psl_entry_t suffix, *suffixp; /* fprintf(stderr, "libidn '%s' -> '%s'\n", e->label_buf, lookupname); */ _suffix_init(&suffix, lookupname, strlen(lookupname)); suffix.wildcard = e->wildcard; suffixp = _vector_get(v, _vector_add(v, &suffix)); suffixp->label = suffixp->label_buf; /* set label to changed address */ } /* else ignore */ } /* else fprintf(_(stderr, "toASCII failed (%d): %s\n"), rc, idna_strerror(rc)); */ }
static void _add_punycode_if_needed(UIDNA *idna, _psl_vector_t *v, _psl_entry_t *e) { if (_str_is_ascii(e->label_buf)) return; /* IDNA2008 UTS#46 punycode conversion */ if (idna) { char lookupname[128] = ""; UErrorCode status = 0; UIDNAInfo info = UIDNA_INFO_INITIALIZER; UChar utf16_dst[128], utf16_src[128]; int32_t utf16_src_length; u_strFromUTF8(utf16_src, sizeof(utf16_src)/sizeof(utf16_src[0]), &utf16_src_length, e->label_buf, -1, &status); if (U_SUCCESS(status)) { int32_t dst_length = uidna_nameToASCII(idna, utf16_src, utf16_src_length, utf16_dst, sizeof(utf16_dst)/sizeof(utf16_dst[0]), &info, &status); if (U_SUCCESS(status)) { u_strToUTF8(lookupname, sizeof(lookupname), NULL, utf16_dst, dst_length, &status); if (U_SUCCESS(status)) { if (strcmp(e->label_buf, lookupname)) { _psl_entry_t suffix, *suffixp; /* fprintf(stderr, "libicu '%s' -> '%s'\n", e->label_buf, lookupname); */ _suffix_init(&suffix, lookupname, strlen(lookupname)); suffix.wildcard = e->wildcard; suffixp = _vector_get(v, _vector_add(v, &suffix)); suffixp->label = suffixp->label_buf; /* set label to changed address */ } /* else ignore */ } /* else fprintf(stderr, "Failed to convert UTF-16 to UTF-8 (status %d)\n", status); */ } /* else fprintf(stderr, "Failed to convert to ASCII (status %d)\n", status); */ } /* else fprintf(stderr, "Failed to convert UTF-8 to UTF-16 (status %d)\n", status); */ } }
void deriv_Sb_D_psi(spinor * const l, spinor * const k, hamiltonian_field_t * const hf, const double factor) { #ifdef BGL __alignx(16, l); __alignx(16, k); #endif /* for parallelization */ #ifdef MPI xchange_lexicfield(k); xchange_lexicfield(l); #endif #ifdef OMP #define static #pragma omp parallel { #endif int ix,iy; su3 * restrict up ALIGN; su3 * restrict um ALIGN; static su3 v1,v2; static su3_vector psia,psib,phia,phib; static spinor rr; /* spinor * restrict r ALIGN; */ spinor * restrict sp ALIGN; spinor * restrict sm ALIGN; #ifdef OMP #undef static #endif #ifdef _KOJAK_INST #pragma pomp inst begin(derivSb) #endif #ifdef XLC #pragma disjoint(*sp, *sm, *up, *um) #endif /************** loop over all lattice sites ****************/ #ifdef OMP #pragma omp for #endif for(ix = 0; ix < (VOLUME); ix++){ rr = (*(l + ix)); /* rr=g_spinor_field[l][icx-ioff]; */ /*multiply the left vector with gamma5*/ _vector_minus_assign(rr.s2, rr.s2); _vector_minus_assign(rr.s3, rr.s3); /*********************** direction +0 ********************/ iy=g_iup[ix][0]; sp = k + iy; up=&hf->gaugefield[ix][0]; _vector_add(psia,(*sp).s0,(*sp).s2); _vector_add(psib,(*sp).s1,(*sp).s3); _vector_add(phia,rr.s0,rr.s2); _vector_add(phib,rr.s1,rr.s3); _vector_tensor_vector_add(v1, phia, psia, phib, psib); _su3_times_su3d(v2,*up,v1); _complex_times_su3(v1,ka0,v2); _trace_lambda_mul_add_assign_nonlocal(hf->derivative[ix][0], 2.*factor, v1); /************** direction -0 ****************************/ iy=g_idn[ix][0]; sm = k + iy; um=&hf->gaugefield[iy][0]; _vector_sub(psia,(*sm).s0,(*sm).s2); _vector_sub(psib,(*sm).s1,(*sm).s3); _vector_sub(phia,rr.s0,rr.s2); _vector_sub(phib,rr.s1,rr.s3); _vector_tensor_vector_add(v1, psia, phia, psib, phib); _su3_times_su3d(v2,*um,v1); _complex_times_su3(v1,ka0,v2); _trace_lambda_mul_add_assign_nonlocal(hf->derivative[iy][0], 2.*factor, v1); /*************** direction +1 **************************/ iy=g_iup[ix][1]; sp = k + iy; up=&hf->gaugefield[ix][1]; _vector_i_add(psia,(*sp).s0,(*sp).s3); _vector_i_add(psib,(*sp).s1,(*sp).s2); _vector_i_add(phia,rr.s0,rr.s3); _vector_i_add(phib,rr.s1,rr.s2); _vector_tensor_vector_add(v1, phia, psia, phib, psib); _su3_times_su3d(v2,*up,v1); _complex_times_su3(v1,ka1,v2); _trace_lambda_mul_add_assign_nonlocal(hf->derivative[ix][1], 2.*factor, v1); /**************** direction -1 *************************/ iy=g_idn[ix][1]; sm = k + iy; um=&hf->gaugefield[iy][1]; _vector_i_sub(psia,(*sm).s0,(*sm).s3); _vector_i_sub(psib,(*sm).s1,(*sm).s2); _vector_i_sub(phia,rr.s0,rr.s3); _vector_i_sub(phib,rr.s1,rr.s2); _vector_tensor_vector_add(v1, psia, phia, psib, phib); _su3_times_su3d(v2,*um,v1); _complex_times_su3(v1,ka1,v2); _trace_lambda_mul_add_assign_nonlocal(hf->derivative[iy][1], 2.*factor, v1); /*************** direction +2 **************************/ iy=g_iup[ix][2]; sp = k + iy; up=&hf->gaugefield[ix][2]; _vector_add(psia,(*sp).s0,(*sp).s3); _vector_sub(psib,(*sp).s1,(*sp).s2); _vector_add(phia,rr.s0,rr.s3); _vector_sub(phib,rr.s1,rr.s2); _vector_tensor_vector_add(v1, phia, psia, phib, psib); _su3_times_su3d(v2,*up,v1); _complex_times_su3(v1,ka2,v2); _trace_lambda_mul_add_assign_nonlocal(hf->derivative[ix][2], 2.*factor, v1); /***************** direction -2 ************************/ iy=g_idn[ix][2]; sm = k + iy; um=&hf->gaugefield[iy][2]; _vector_sub(psia,(*sm).s0,(*sm).s3); _vector_add(psib,(*sm).s1,(*sm).s2); _vector_sub(phia,rr.s0,rr.s3); _vector_add(phib,rr.s1,rr.s2); _vector_tensor_vector_add(v1, psia, phia, psib, phib); _su3_times_su3d(v2,*um,v1); _complex_times_su3(v1,ka2,v2); _trace_lambda_mul_add_assign_nonlocal(hf->derivative[iy][2], 2.*factor, v1); /****************** direction +3 ***********************/ iy=g_iup[ix][3]; sp = k + iy; up=&hf->gaugefield[ix][3]; _vector_i_add(psia,(*sp).s0,(*sp).s2); _vector_i_sub(psib,(*sp).s1,(*sp).s3); _vector_i_add(phia,rr.s0,rr.s2); _vector_i_sub(phib,rr.s1,rr.s3); _vector_tensor_vector_add(v1, phia, psia, phib, psib); _su3_times_su3d(v2,*up,v1); _complex_times_su3(v1,ka3,v2); _trace_lambda_mul_add_assign_nonlocal(hf->derivative[ix][3], 2.*factor, v1); /***************** direction -3 ************************/ iy=g_idn[ix][3]; sm = k + iy; um=&hf->gaugefield[iy][3]; _vector_i_sub(psia,(*sm).s0,(*sm).s2); _vector_i_add(psib,(*sm).s1,(*sm).s3); _vector_i_sub(phia,rr.s0,rr.s2); _vector_i_add(phib,rr.s1,rr.s3); _vector_tensor_vector_add(v1, psia, phia, psib, phib); _su3_times_su3d(v2,*um,v1); _complex_times_su3(v1,ka3,v2); _trace_lambda_mul_add_assign_nonlocal(hf->derivative[iy][3], 2.*factor, v1); /****************** end of loop ************************/ } #ifdef _KOJAK_INST #pragma pomp inst end(derivSb) #endif #ifdef OMP } /*OpenMP closing brace */ #endif }
/* for ieo=0, k resides on odd sites and l on even sites */ void Hopping_Matrix(int ieo, spinor * const l, spinor * const k){ int ix,iy; int ioff,ioff2,icx,icy; su3 * restrict up, * restrict um; spinor * restrict r, * restrict sp, * restrict sm; spinor temp; #ifdef _GAUGE_COPY if(g_update_gauge_copy) { update_backward_gauge(); } #endif /* for parallelization */ # if (defined MPI && !(defined _NO_COMM)) xchange_field(k, ieo); # endif if(k == l){ printf("Error in H_psi (simple.c):\n"); printf("Arguments k and l must be different\n"); printf("Program aborted\n"); exit(1); } if(ieo == 0){ ioff = 0; } else{ ioff = (VOLUME+RAND)/2; } ioff2 = (VOLUME+RAND)/2-ioff; /**************** loop over all lattice sites ****************/ for (icx = ioff; icx < (VOLUME/2 + ioff); icx++){ ix=g_eo2lexic[icx]; r=l+(icx-ioff); /*********************** direction +0 ************************/ iy=g_iup[ix][0]; icy=g_lexic2eosub[iy]; sp=k+icy; # if ((defined _GAUGE_COPY)) up=&g_gauge_field_copy[icx][0]; # else up=&g_gauge_field[ix][0]; # endif _vector_add(psi,(*sp).s0,(*sp).s2); _su3_multiply(chi,(*up),psi); _complex_times_vector(psi,ka0,chi); _vector_assign(temp.s0,psi); _vector_assign(temp.s2,psi); _vector_add(psi,(*sp).s1,(*sp).s3); _su3_multiply(chi,(*up),psi); _complex_times_vector(psi,ka0,chi); _vector_assign(temp.s1,psi); _vector_assign(temp.s3,psi); /*********************** direction -0 ************************/ iy=g_idn[ix][0]; icy=g_lexic2eosub[iy]; sm=k+icy; # if ((defined _GAUGE_COPY)) um = up+1; # else um=&g_gauge_field[iy][0]; # endif _vector_sub(psi,(*sm).s0,(*sm).s2); _su3_inverse_multiply(chi,(*um),psi); _complexcjg_times_vector(psi,ka0,chi); _vector_add_assign(temp.s0,psi); _vector_sub_assign(temp.s2,psi); _vector_sub(psi,(*sm).s1,(*sm).s3); _su3_inverse_multiply(chi,(*um),psi); _complexcjg_times_vector(psi,ka0,chi); _vector_add_assign(temp.s1,psi); _vector_sub_assign(temp.s3,psi); /*********************** direction +1 ************************/ iy=g_iup[ix][1]; icy=g_lexic2eosub[iy]; sp=k+icy; # if ((defined _GAUGE_COPY)) up=um+1; # else up+=1; # endif _vector_i_add(psi,(*sp).s0,(*sp).s3); _su3_multiply(chi,(*up),psi); _complex_times_vector(psi,ka1,chi); _vector_add_assign(temp.s0,psi); _vector_i_sub_assign(temp.s3,psi); _vector_i_add(psi,(*sp).s1,(*sp).s2); _su3_multiply(chi,(*up),psi); _complex_times_vector(psi,ka1,chi); _vector_add_assign(temp.s1,psi); _vector_i_sub_assign(temp.s2,psi); /*********************** direction -1 ************************/ iy=g_idn[ix][1]; icy=g_lexic2eosub[iy]; sm=k+icy; # ifndef _GAUGE_COPY um=&g_gauge_field[iy][1]; # else um=up+1; # endif _vector_i_sub(psi,(*sm).s0,(*sm).s3); _su3_inverse_multiply(chi,(*um),psi); _complexcjg_times_vector(psi,ka1,chi); _vector_add_assign(temp.s0,psi); _vector_i_add_assign(temp.s3,psi); _vector_i_sub(psi,(*sm).s1,(*sm).s2); _su3_inverse_multiply(chi,(*um),psi); _complexcjg_times_vector(psi,ka1,chi); _vector_add_assign(temp.s1,psi); _vector_i_add_assign(temp.s2,psi); /*********************** direction +2 ************************/ iy=g_iup[ix][2]; icy=g_lexic2eosub[iy]; sp=k+icy; # if ((defined _GAUGE_COPY)) up=um+1; # else up+=1; # endif _vector_add(psi,(*sp).s0,(*sp).s3); _su3_multiply(chi,(*up),psi); _complex_times_vector(psi,ka2,chi); _vector_add_assign(temp.s0,psi); _vector_add_assign(temp.s3,psi); _vector_sub(psi,(*sp).s1,(*sp).s2); _su3_multiply(chi,(*up),psi); _complex_times_vector(psi,ka2,chi); _vector_add_assign(temp.s1,psi); _vector_sub_assign(temp.s2,psi); /*********************** direction -2 ************************/ iy=g_idn[ix][2]; icy=g_lexic2eosub[iy]; sm=k+icy; # ifndef _GAUGE_COPY um = &g_gauge_field[iy][2]; # else um = up +1; # endif _vector_sub(psi,(*sm).s0,(*sm).s3); _su3_inverse_multiply(chi,(*um),psi); _complexcjg_times_vector(psi,ka2,chi); _vector_add_assign(temp.s0,psi); _vector_sub_assign(temp.s3,psi); _vector_add(psi,(*sm).s1,(*sm).s2); _su3_inverse_multiply(chi,(*um),psi); _complexcjg_times_vector(psi,ka2,chi); _vector_add_assign(temp.s1,psi); _vector_add_assign(temp.s2,psi); /*********************** direction +3 ************************/ iy=g_iup[ix][3]; icy=g_lexic2eosub[iy]; sp=k+icy; # if ((defined _GAUGE_COPY)) up=um+1; # else up+=1; # endif _vector_i_add(psi,(*sp).s0,(*sp).s2); _su3_multiply(chi,(*up),psi); _complex_times_vector(psi,ka3,chi); _vector_add_assign(temp.s0,psi); _vector_i_sub_assign(temp.s2,psi); _vector_i_sub(psi,(*sp).s1,(*sp).s3); _su3_multiply(chi,(*up),psi); _complex_times_vector(psi,ka3,chi); _vector_add_assign(temp.s1,psi); _vector_i_add_assign(temp.s3,psi); /*********************** direction -3 ************************/ iy=g_idn[ix][3]; icy=g_lexic2eosub[iy]; sm=k+icy; # ifndef _GAUGE_COPY um = &g_gauge_field[iy][3]; # else um = up+1; # endif _vector_i_sub(psi,(*sm).s0,(*sm).s2); _su3_inverse_multiply(chi,(*um),psi); _complexcjg_times_vector(psi,ka3,chi); _vector_add((*r).s0, temp.s0, psi); _vector_i_add((*r).s2, temp.s2, psi); _vector_i_add(psi,(*sm).s1,(*sm).s3); _su3_inverse_multiply(chi,(*um),psi); _complexcjg_times_vector(psi,ka3,chi); _vector_add((*r).s1, temp.s1, psi); _vector_i_sub((*r).s3, temp.s3, psi); /************************ end of loop ************************/ } }
void deriv_Sb_D_psi(spinor * const l, spinor * const k) { /* const int l, const int k){ */ int ix,iy; su3 * restrict up ALIGN; su3 * restrict um ALIGN; /* su3adj * restrict ddd; */ /* static su3adj der; */ static su3 v1,v2; static su3_vector psia,psib,phia,phib; static spinor rr; /* spinor * restrict r ALIGN; */ spinor * restrict sp ALIGN; spinor * restrict sm ALIGN; #ifdef _KOJAK_INST #pragma pomp inst begin(derivSb) #endif #ifdef XLC #pragma disjoint(*sp, *sm, *up, *um) #endif #ifdef BGL __alignx(16, l); __alignx(16, k); #endif /* for parallelization */ #ifdef MPI xchange_lexicfield(k); xchange_lexicfield(l); #endif /************** loop over all lattice sites ****************/ for(ix = 0; ix < (VOLUME); ix++){ rr = (*(l + ix)); /* rr=g_spinor_field[l][icx-ioff]; */ /*multiply the left vector with gamma5*/ _vector_minus_assign(rr.s2, rr.s2); _vector_minus_assign(rr.s3, rr.s3); /*********************** direction +0 ********************/ iy=g_iup[ix][0]; sp = k + iy; /* sp=&g_spinor_field[k][icy]; */ up=&g_gauge_field[ix][0]; _vector_add(psia,(*sp).s0,(*sp).s2); _vector_add(psib,(*sp).s1,(*sp).s3); _vector_add(phia,rr.s0,rr.s2); _vector_add(phib,rr.s1,rr.s3); _vector_tensor_vector_add(v1, phia, psia, phib, psib); /* _vector_tensor_vector(v1,phia,psia); */ /* _vector_tensor_vector(v2,phib,psib); */ /* _su3_plus_su3(v1,v1,v2); */ _su3_times_su3d(v2,*up,v1); _complex_times_su3(v1,ka0,v2); _trace_lambda_add_assign(df0[ix][0], v1); /* _trace_lambda(der,v1); */ /* ddd=&df0[ix][0]; */ /* _add_su3adj(*ddd,der); */ /************** direction -0 ****************************/ iy=g_idn[ix][0]; sm = k + iy; /* sm=&g_spinor_field[k][icy]; */ um=&g_gauge_field[iy][0]; _vector_sub(psia,(*sm).s0,(*sm).s2); _vector_sub(psib,(*sm).s1,(*sm).s3); _vector_sub(phia,rr.s0,rr.s2); _vector_sub(phib,rr.s1,rr.s3); _vector_tensor_vector_add(v1, psia, phia, psib, phib); /* _vector_tensor_vector(v1,psia,phia); */ /* _vector_tensor_vector(v2,psib,phib); */ /* _su3_plus_su3(v1,v1,v2); */ _su3_times_su3d(v2,*um,v1); _complex_times_su3(v1,ka0,v2); _trace_lambda_add_assign(df0[iy][0], v1); /* _trace_lambda(der,v1); */ /* ddd=&df0[iy][0]; */ /* _add_su3adj(*ddd,der); */ /*************** direction +1 **************************/ iy=g_iup[ix][1]; sp = k + iy; /* sp=&g_spinor_field[k][icy]; */ up=&g_gauge_field[ix][1]; _vector_i_add(psia,(*sp).s0,(*sp).s3); _vector_i_add(psib,(*sp).s1,(*sp).s2); _vector_i_add(phia,rr.s0,rr.s3); _vector_i_add(phib,rr.s1,rr.s2); _vector_tensor_vector_add(v1, phia, psia, phib, psib); /* _vector_tensor_vector(v1,phia,psia); */ /* _vector_tensor_vector(v2,phib,psib); */ /* _su3_plus_su3(v1,v1,v2); */ _su3_times_su3d(v2,*up,v1); _complex_times_su3(v1,ka1,v2); _trace_lambda_add_assign(df0[ix][1], v1); /* _trace_lambda(der,v1); */ /* ddd=&df0[ix][1]; */ /* _add_su3adj(*ddd,der); */ /**************** direction -1 *************************/ iy=g_idn[ix][1]; sm = k + iy; /* sm=&g_spinor_field[k][icy]; */ um=&g_gauge_field[iy][1]; _vector_i_sub(psia,(*sm).s0,(*sm).s3); _vector_i_sub(psib,(*sm).s1,(*sm).s2); _vector_i_sub(phia,rr.s0,rr.s3); _vector_i_sub(phib,rr.s1,rr.s2); _vector_tensor_vector_add(v1, psia, phia, psib, phib); /* _vector_tensor_vector(v1,psia,phia); */ /* _vector_tensor_vector(v2,psib,phib); */ /* _su3_plus_su3(v1,v1,v2); */ _su3_times_su3d(v2,*um,v1); _complex_times_su3(v1,ka1,v2); _trace_lambda_add_assign(df0[iy][1], v1); /* _trace_lambda(der,v1); */ /* ddd=&df0[iy][1]; */ /* _add_su3adj(*ddd,der); */ /*************** direction +2 **************************/ iy=g_iup[ix][2]; sp = k + iy; /* sp=&g_spinor_field[k][icy]; */ up=&g_gauge_field[ix][2]; _vector_add(psia,(*sp).s0,(*sp).s3); _vector_sub(psib,(*sp).s1,(*sp).s2); _vector_add(phia,rr.s0,rr.s3); _vector_sub(phib,rr.s1,rr.s2); _vector_tensor_vector_add(v1, phia, psia, phib, psib); /* _vector_tensor_vector(v1,phia,psia); */ /* _vector_tensor_vector(v2,phib,psib); */ /* _su3_plus_su3(v1,v1,v2); */ _su3_times_su3d(v2,*up,v1); _complex_times_su3(v1,ka2,v2); _trace_lambda_add_assign(df0[ix][2], v1); /* _trace_lambda(der,v1); */ /* ddd=&df0[ix][2]; */ /* _add_su3adj(*ddd,der); */ /***************** direction -2 ************************/ iy=g_idn[ix][2]; sm = k + iy; /* sm=&g_spinor_field[k][icy]; */ um=&g_gauge_field[iy][2]; _vector_sub(psia,(*sm).s0,(*sm).s3); _vector_add(psib,(*sm).s1,(*sm).s2); _vector_sub(phia,rr.s0,rr.s3); _vector_add(phib,rr.s1,rr.s2); _vector_tensor_vector_add(v1, psia, phia, psib, phib); /* _vector_tensor_vector(v1,psia,phia); */ /* _vector_tensor_vector(v2,psib,phib); */ /* _su3_plus_su3(v1,v1,v2); */ _su3_times_su3d(v2,*um,v1); _complex_times_su3(v1,ka2,v2); _trace_lambda_add_assign(df0[iy][2], v1); /* _trace_lambda(der,v1); */ /* ddd=&df0[iy][2]; */ /* _add_su3adj(*ddd,der); */ /****************** direction +3 ***********************/ iy=g_iup[ix][3]; sp = k + iy; /* sp=&g_spinor_field[k][icy]; */ up=&g_gauge_field[ix][3]; _vector_i_add(psia,(*sp).s0,(*sp).s2); _vector_i_sub(psib,(*sp).s1,(*sp).s3); _vector_i_add(phia,rr.s0,rr.s2); _vector_i_sub(phib,rr.s1,rr.s3); _vector_tensor_vector_add(v1, phia, psia, phib, psib); /* _vector_tensor_vector(v1,phia,psia); */ /* _vector_tensor_vector(v2,phib,psib); */ /* _su3_plus_su3(v1,v1,v2); */ _su3_times_su3d(v2,*up,v1); _complex_times_su3(v1,ka3,v2); _trace_lambda_add_assign(df0[ix][3], v1); /* _trace_lambda(der,v1); */ /* ddd=&df0[ix][3]; */ /* _add_su3adj(*ddd,der); */ /***************** direction -3 ************************/ iy=g_idn[ix][3]; sm = k + iy; /* sm=&g_spinor_field[k][icy]; */ um=&g_gauge_field[iy][3]; _vector_i_sub(psia,(*sm).s0,(*sm).s2); _vector_i_add(psib,(*sm).s1,(*sm).s3); _vector_i_sub(phia,rr.s0,rr.s2); _vector_i_add(phib,rr.s1,rr.s3); _vector_tensor_vector_add(v1, psia, phia, psib, phib); /* _vector_tensor_vector(v1,psia,phia); */ /* _vector_tensor_vector(v2,psib,phib); */ /* _su3_plus_su3(v1,v1,v2); */ _su3_times_su3d(v2,*um,v1); _complex_times_su3(v1,ka3,v2); _trace_lambda_add_assign(df0[iy][3], v1); /* _trace_lambda(der,v1); */ /* ddd=&df0[iy][3]; */ /* _add_su3adj(*ddd,der); */ /****************** end of loop ************************/ } #ifdef _KOJAK_INST #pragma pomp inst end(derivSb) #endif }
/* for ieo=0, k resides on odd sites and l on even sites */ void Hopping_Matrix(const int ieo, spinor * const l, spinor * const k){ int i,ix; su3 * restrict U ALIGN; spinor * restrict s ALIGN; spinor rs; static su3_vector psi, chi, psi2, chi2; halfspinor * restrict * phi ALIGN; halfspinor32 * restrict * phi32 ALIGN; #ifdef _KOJAK_INST #pragma pomp inst begin(hoppingmatrix) #endif #ifdef XLC #pragma disjoint(*l, *k, *U, *s) #endif #ifdef _GAUGE_COPY if(g_update_gauge_copy) { update_backward_gauge(); } #endif if(k == l){ printf("Error in H_psi (simple.c):\n"); printf("Arguments k and l must be different\n"); printf("Program aborted\n"); exit(1); } s = k; if(ieo == 0) { U = g_gauge_field_copy[0][0]; } else { U = g_gauge_field_copy[1][0]; } if(g_sloppy_precision == 1 && g_sloppy_precision_flag == 1) { phi32 = NBPointer32[ieo]; /**************** loop over all lattice sites ****************/ ix=0; for(i = 0; i < (VOLUME)/2; i++){ _vector_assign(rs.s0, (*s).s0); _vector_assign(rs.s1, (*s).s1); _vector_assign(rs.s2, (*s).s2); _vector_assign(rs.s3, (*s).s3); s++; /*********************** direction +0 ************************/ _vector_add(psi, rs.s0, rs.s2); _su3_multiply(chi,(*U),psi); _complex_times_vector((*phi32[ix]).s0, ka0, chi); _vector_add(psi, rs.s1, rs.s3); _su3_multiply(chi,(*U),psi); _complex_times_vector((*phi32[ix]).s1, ka0, chi); U++; ix++; /*********************** direction -0 ************************/ _vector_sub((*phi32[ix]).s0, rs.s0, rs.s2); _vector_sub((*phi32[ix]).s1, rs.s1, rs.s3); ix++; /*********************** direction +1 ************************/ _vector_i_add(psi, rs.s0, rs.s3); _su3_multiply(chi, (*U), psi); _complex_times_vector((*phi32[ix]).s0, ka1, chi); _vector_i_add(psi, rs.s1, rs.s2); _su3_multiply(chi, (*U), psi); _complex_times_vector((*phi32[ix]).s1, ka1, chi); U++; ix++; /*********************** direction -1 ************************/ _vector_i_sub((*phi32[ix]).s0, rs.s0, rs.s3); _vector_i_sub((*phi32[ix]).s1, rs.s1, rs.s2); ix++; /*********************** direction +2 ************************/ _vector_add(psi, rs.s0, rs.s3); _su3_multiply(chi,(*U),psi); _complex_times_vector((*phi32[ix]).s0, ka2, chi); _vector_sub(psi, rs.s1, rs.s2); _su3_multiply(chi,(*U),psi); _complex_times_vector((*phi32[ix]).s1, ka2, chi); U++; ix++; /*********************** direction -2 ************************/ _vector_sub((*phi32[ix]).s0, rs.s0, rs.s3); _vector_add((*phi32[ix]).s1, rs.s1, rs.s2); ix++; /*********************** direction +3 ************************/ _vector_i_add(psi, rs.s0, rs.s2); _su3_multiply(chi, (*U), psi); _complex_times_vector((*phi32[ix]).s0, ka3, chi); _vector_i_sub(psi, rs.s1, rs.s3); _su3_multiply(chi,(*U),psi); _complex_times_vector((*phi32[ix]).s1, ka3, chi); U++; ix++; /*********************** direction -3 ************************/ _vector_i_sub((*phi32[ix]).s0, rs.s0, rs.s2); _vector_i_add((*phi32[ix]).s1, rs.s1, rs.s3); ix++; /************************ end of loop ************************/ } # if (defined MPI && !defined _NO_COMM) xchange_halffield32(); # endif s = l; phi32 = NBPointer32[2 + ieo]; if(ieo == 0) { U = g_gauge_field_copy[1][0]; } else { U = g_gauge_field_copy[0][0]; } ix = 0; for(i = 0; i < (VOLUME)/2; i++){ /*********************** direction +0 ************************/ _vector_assign(rs.s0, (*phi32[ix]).s0); _vector_assign(rs.s2, (*phi32[ix]).s0); _vector_assign(rs.s1, (*phi32[ix]).s1); _vector_assign(rs.s3, (*phi32[ix]).s1); ix++; /*********************** direction -0 ************************/ _vector_assign(psi, (*phi32[ix]).s0); _su3_inverse_multiply(chi,(*U), psi); _complexcjg_times_vector(psi,ka0,chi); _vector_add_assign(rs.s0, psi); _vector_sub_assign(rs.s2, psi); _vector_assign(psi, (*phi32[ix]).s1); _su3_inverse_multiply(chi,(*U), psi); _complexcjg_times_vector(psi,ka0,chi); _vector_add_assign(rs.s1, psi); _vector_sub_assign(rs.s3, psi); ix++; U++; /*********************** direction +1 ************************/ _vector_add_assign(rs.s0, (*phi32[ix]).s0); _vector_i_sub_assign(rs.s3, (*phi32[ix]).s0); _vector_add_assign(rs.s1, (*phi32[ix]).s1); _vector_i_sub_assign(rs.s2, (*phi32[ix]).s1); ix++; /*********************** direction -1 ************************/ _vector_assign(psi, (*phi32[ix]).s0); _su3_inverse_multiply(chi,(*U), psi); _complexcjg_times_vector(psi,ka1,chi); _vector_add_assign(rs.s0, psi); _vector_i_add_assign(rs.s3, psi); _vector_assign(psi, (*phi32[ix]).s1); _su3_inverse_multiply(chi,(*U), psi); _complexcjg_times_vector(psi,ka1,chi); _vector_add_assign(rs.s1, psi); _vector_i_add_assign(rs.s2, psi); U++; ix++; /*********************** direction +2 ************************/ _vector_add_assign(rs.s0, (*phi32[ix]).s0); _vector_add_assign(rs.s3, (*phi32[ix]).s0); _vector_add_assign(rs.s1, (*phi32[ix]).s1); _vector_sub_assign(rs.s2, (*phi32[ix]).s1); ix++; /*********************** direction -2 ************************/ _vector_assign(psi, (*phi32[ix]).s0); _su3_inverse_multiply(chi,(*U), psi); _complexcjg_times_vector(psi,ka2,chi); _vector_add_assign(rs.s0, psi); _vector_sub_assign(rs.s3, psi); _vector_assign(psi, (*phi32[ix]).s1); _su3_inverse_multiply(chi, (*U), psi); _complexcjg_times_vector(psi,ka2,chi); _vector_add_assign(rs.s1, psi); _vector_add_assign(rs.s2, psi); U++; ix++; /*********************** direction +3 ************************/ _vector_add_assign(rs.s0, (*phi32[ix]).s0); _vector_i_sub_assign(rs.s2, (*phi32[ix]).s0); _vector_add_assign(rs.s1, (*phi32[ix]).s1); _vector_i_add_assign(rs.s3, (*phi32[ix]).s1); ix++; /*********************** direction -3 ************************/ _vector_assign(psi, (*phi32[ix]).s0); _su3_inverse_multiply(chi,(*U), psi); _complexcjg_times_vector(psi,ka3,chi); _vector_add((*s).s0, rs.s0, psi); _vector_i_add((*s).s2, rs.s2, psi); _vector_assign(psi, (*phi32[ix]).s1); _su3_inverse_multiply(chi,(*U), psi); _complexcjg_times_vector(psi,ka3,chi); _vector_add((*s).s1, rs.s1, psi); _vector_i_sub((*s).s3, rs.s3, psi); U++; ix++; s++; } } else { phi = NBPointer[ieo]; /**************** loop over all lattice sites ****************/ ix=0; /* #pragma ivdep*/ for(i = 0; i < (VOLUME)/2; i++){ _vector_assign(rs.s0, (*s).s0); _vector_assign(rs.s1, (*s).s1); _vector_assign(rs.s2, (*s).s2); _vector_assign(rs.s3, (*s).s3); s++; /*********************** direction +0 ************************/ _vector_add(psi, rs.s0, rs.s2); _vector_add(psi2, rs.s1, rs.s3); _su3_multiply(chi,(*U),psi); _su3_multiply(chi2,(*U),psi2); _complex_times_vector((*phi[ix]).s0, ka0, chi); _complex_times_vector((*phi[ix]).s1, ka0, chi2); U++; ix++; /*********************** direction -0 ************************/ _vector_sub((*phi[ix]).s0, rs.s0, rs.s2); _vector_sub((*phi[ix]).s1, rs.s1, rs.s3); ix++; /*********************** direction +1 ************************/ _vector_i_add(psi, rs.s0, rs.s3); _vector_i_add(psi2, rs.s1, rs.s2); _su3_multiply(chi, (*U), psi); _su3_multiply(chi2, (*U), psi2); _complex_times_vector((*phi[ix]).s0, ka1, chi); _complex_times_vector((*phi[ix]).s1, ka1, chi2); U++; ix++; /*********************** direction -1 ************************/ _vector_i_sub((*phi[ix]).s0, rs.s0, rs.s3); _vector_i_sub((*phi[ix]).s1, rs.s1, rs.s2); ix++; /*********************** direction +2 ************************/ _vector_add(psi, rs.s0, rs.s3); _vector_sub(psi2, rs.s1, rs.s2); _su3_multiply(chi,(*U),psi); _su3_multiply(chi2,(*U),psi2); _complex_times_vector((*phi[ix]).s0, ka2, chi); _complex_times_vector((*phi[ix]).s1, ka2, chi2); U++; ix++; /*********************** direction -2 ************************/ _vector_sub((*phi[ix]).s0, rs.s0, rs.s3); _vector_add((*phi[ix]).s1, rs.s1, rs.s2); ix++; /*********************** direction +3 ************************/ _vector_i_add(psi, rs.s0, rs.s2); _vector_i_sub(psi2, rs.s1, rs.s3); _su3_multiply(chi, (*U), psi); _su3_multiply(chi2,(*U),psi2); _complex_times_vector((*phi[ix]).s0, ka3, chi); _complex_times_vector((*phi[ix]).s1, ka3, chi2); U++; ix++; /*********************** direction -3 ************************/ _vector_i_sub((*phi[ix]).s0, rs.s0, rs.s2); _vector_i_add((*phi[ix]).s1, rs.s1, rs.s3); ix++; /************************ end of loop ************************/ } # if (defined MPI && !defined _NO_COMM) xchange_halffield(); # endif s = l; phi = NBPointer[2 + ieo]; if(ieo == 0) { U = g_gauge_field_copy[1][0]; } else { U = g_gauge_field_copy[0][0]; } ix = 0; /* #pragma ivdep */ for(i = 0; i < (VOLUME)/2; i++){ /*********************** direction +0 ************************/ _vector_assign(rs.s0, (*phi[ix]).s0); _vector_assign(rs.s2, (*phi[ix]).s0); _vector_assign(rs.s1, (*phi[ix]).s1); _vector_assign(rs.s3, (*phi[ix]).s1); ix++; /*********************** direction -0 ************************/ _su3_inverse_multiply(chi,(*U),(*phi[ix]).s0); _su3_inverse_multiply(chi2,(*U),(*phi[ix]).s1); _complexcjg_times_vector(psi,ka0,chi); _complexcjg_times_vector(psi2,ka0,chi2); _vector_add_assign(rs.s0, psi); _vector_sub_assign(rs.s2, psi); _vector_add_assign(rs.s1, psi2); _vector_sub_assign(rs.s3, psi2); ix++; U++; /*********************** direction +1 ************************/ _vector_add_assign(rs.s0, (*phi[ix]).s0); _vector_i_sub_assign(rs.s3, (*phi[ix]).s0); _vector_add_assign(rs.s1, (*phi[ix]).s1); _vector_i_sub_assign(rs.s2, (*phi[ix]).s1); ix++; /*********************** direction -1 ************************/ _su3_inverse_multiply(chi,(*U), (*phi[ix]).s0); _su3_inverse_multiply(chi2, (*U), (*phi[ix]).s1); _complexcjg_times_vector(psi,ka1,chi); _complexcjg_times_vector(psi2,ka1,chi2); _vector_add_assign(rs.s0, psi); _vector_i_add_assign(rs.s3, psi); _vector_add_assign(rs.s1, psi2); _vector_i_add_assign(rs.s2, psi2); U++; ix++; /*********************** direction +2 ************************/ _vector_add_assign(rs.s0, (*phi[ix]).s0); _vector_add_assign(rs.s3, (*phi[ix]).s0); _vector_add_assign(rs.s1, (*phi[ix]).s1); _vector_sub_assign(rs.s2, (*phi[ix]).s1); ix++; /*********************** direction -2 ************************/ _su3_inverse_multiply(chi,(*U), (*phi[ix]).s0); _su3_inverse_multiply(chi2, (*U), (*phi[ix]).s1); _complexcjg_times_vector(psi,ka2,chi); _complexcjg_times_vector(psi2,ka2,chi2); _vector_add_assign(rs.s0, psi); _vector_sub_assign(rs.s3, psi); _vector_add_assign(rs.s1, psi2); _vector_add_assign(rs.s2, psi2); U++; ix++; /*********************** direction +3 ************************/ _vector_add_assign(rs.s0, (*phi[ix]).s0); _vector_i_sub_assign(rs.s2, (*phi[ix]).s0); _vector_add_assign(rs.s1, (*phi[ix]).s1); _vector_i_add_assign(rs.s3, (*phi[ix]).s1); ix++; /*********************** direction -3 ************************/ _su3_inverse_multiply(chi,(*U), (*phi[ix]).s0); _su3_inverse_multiply(chi2, (*U), (*phi[ix]).s1); _complexcjg_times_vector(psi,ka3,chi); _complexcjg_times_vector(psi2,ka3,chi2); _vector_add((*s).s0, rs.s0, psi); _vector_i_add((*s).s2, rs.s2, psi); _vector_add((*s).s1, rs.s1, psi2); _vector_i_sub((*s).s3, rs.s3, psi2); U++; ix++; s++; } } #ifdef _KOJAK_INST #pragma pomp inst end(hoppingmatrix) #endif }
/** * psl_load_fp: * @fp: FILE pointer * * This function loads the public suffixes from a FILE pointer. * To free the allocated resources, call psl_free(). * * The suffixes are expected to be lowercase UTF-8 encoded if they are international. * * Returns: Pointer to a PSL context or %NULL on failure. * * Since: 0.1 */ psl_ctx_t *psl_load_fp(FILE *fp) { psl_ctx_t *psl; _psl_entry_t suffix, *suffixp; char buf[256], *linep, *p; #ifdef WITH_LIBICU UIDNA *idna; UErrorCode status = 0; #endif if (!fp) return NULL; if (!(psl = calloc(1, sizeof(psl_ctx_t)))) return NULL; #ifdef WITH_LIBICU idna = uidna_openUTS46(UIDNA_USE_STD3_RULES, &status); #endif /* * as of 02.11.2012, the list at http://publicsuffix.org/list/ contains ~6000 rules and 40 exceptions. * as of 19.02.2014, the list at http://publicsuffix.org/list/ contains ~6500 rules and 19 exceptions. */ psl->suffixes = _vector_alloc(8*1024, _suffix_compare); psl->suffix_exceptions = _vector_alloc(64, _suffix_compare); while ((linep = fgets(buf, sizeof(buf), fp))) { while (isspace(*linep)) linep++; /* ignore leading whitespace */ if (!*linep) continue; /* skip empty lines */ if (*linep == '/' && linep[1] == '/') continue; /* skip comments */ /* parse suffix rule */ for (p = linep; *linep && !isspace(*linep);) linep++; *linep = 0; if (*p == '!') { /* add to exceptions */ if (_suffix_init(&suffix, p + 1, linep - p - 1) == 0) { suffixp = _vector_get(psl->suffix_exceptions, _vector_add(psl->suffix_exceptions, &suffix)); suffixp->label = suffixp->label_buf; /* set label to changed address */ #ifdef WITH_LIBICU _add_punycode_if_needed(idna, psl->suffix_exceptions, suffixp); #elif defined(WITH_LIBIDN2) || defined(WITH_LIBIDN) _add_punycode_if_needed(psl->suffix_exceptions, suffixp); #endif } } else { /* add to suffixes */ if (_suffix_init(&suffix, p, linep - p) == 0) { suffixp = _vector_get(psl->suffixes, _vector_add(psl->suffixes, &suffix)); suffixp->label = suffixp->label_buf; /* set label to changed address */ #ifdef WITH_LIBICU _add_punycode_if_needed(idna, psl->suffixes, suffixp); #elif defined(WITH_LIBIDN2) || defined(WITH_LIBIDN) _add_punycode_if_needed(psl->suffixes, suffixp); #endif } } } _vector_sort(psl->suffix_exceptions); _vector_sort(psl->suffixes); #ifdef WITH_LIBICU if (idna) uidna_close(idna); #endif return psl; }