static int nnpi_neighbours_process(nnpi* nn, point* p, int n, int* nids) { delaunay* d = nn->d; int i; for (i = 0; i < n; ++i) { int im1 = (i + n - 1) % n; int ip1 = (i + 1) % n; point* p0 = &d->points[nids[i]]; point* pp1 = &d->points[nids[ip1]]; point* pm1 = &d->points[nids[im1]]; double nom1, nom2, denom1, denom2; denom1 = (p0->x - p->x) * (pp1->y - p->y) - (p0->y - p->y) * (pp1->x - p->x); denom2 = (p0->x - p->x) * (pm1->y - p->y) - (p0->y - p->y) * (pm1->x - p->x); if (denom1 == 0.0) { if (p->x == p0->x && p->y == p0->y) { nnpi_add_weight(nn, nids[i], BIGNUMBER); return 1; } else if (p->x == pp1->x && p->y == pp1->y) { nnpi_add_weight(nn, nids[ip1], BIGNUMBER); return 1; } else { nn->dx = EPS_SHIFT * (pp1->y - p0->y); nn->dy = -EPS_SHIFT * (pp1->x - p0->x); return 0; } } if (denom2 == 0.0) { if (p->x == pm1->x && p->y == pm1->y) { nnpi_add_weight(nn, nids[im1], BIGNUMBER); return 1; } else { nn->dx = EPS_SHIFT * (pm1->y - p0->y); nn->dy = -EPS_SHIFT * (pm1->x - p0->x); return 0; } } nom1 = (p0->x - pp1->x) * (pp1->x - p->x) + (p0->y - pp1->y) * (pp1->y - p->y); nom2 = (p0->x - pm1->x) * (pm1->x - p->x) + (p0->y - pm1->y) * (pm1->y - p->y); nnpi_add_weight(nn, nids[i], nom1 / denom1 - nom2 / denom2); } return 1; }
void nnpi_calculate_weights(nnpi* nn, point* p) { point pp; int nvertices = 0; int* vertices = NULL; double* weights = NULL; int i; nnpi_reset(nn); if (_nnpi_calculate_weights(nn, p)) { nnpi_normalize_weights(nn); return; } nnpi_reset(nn); pp.x = p->x + nn->dx; pp.y = p->y + nn->dy; /* * If the triangles are extremely thin, then making a small step in * perpendicular direction may turn out between another pair of data * points. A very rare event. Take care of this. */ while (!_nnpi_calculate_weights(nn, &pp)) { pp.x = p->x + nn->dx * RANDOM; pp.y = p->y + nn->dy * RANDOM; } nnpi_normalize_weights(nn); nvertices = nn->nvertices; if (nvertices > 0) { vertices = malloc(nvertices * sizeof(int)); memcpy(vertices, nn->vertices, nvertices * sizeof(int)); weights = malloc(nvertices * sizeof(double)); memcpy(weights, nn->weights, nvertices * sizeof(double)); } nnpi_reset(nn); pp.x = p->x - nn->dx; pp.y = p->y - nn->dy; while (!_nnpi_calculate_weights(nn, &pp)) { pp.x = p->x - nn->dx * RANDOM; pp.y = p->y - nn->dy * RANDOM; } nnpi_normalize_weights(nn); for (i = 0; i < nn->nvertices; ++i) nn->weights[i] /= 2.0; for (i = 0; i < nvertices; ++i) nnpi_add_weight(nn, vertices[i], weights[i] / 2.0); if (nvertices > 0) { free(vertices); free(weights); } }
static int _nnpi_calculate_weights(nnpi* nn, point* p) { int* tids = NULL; int i; delaunay_circles_find(nn->d, p, &nn->ncircles, &tids); if (nn->ncircles == 0) return 1; /* * The algorithms of calculating weights for Sibson and non-Sibsonian * interpolations are quite different; in the first case, the weights are * calculated by processing Delaunay triangles whose tricircles contain * the interpolated point; in the second case, they are calculated by * processing triplets of natural neighbours by moving clockwise or * counterclockwise around the interpolated point. */ if (nn_rule == SIBSON) { for (i = 0; i < nn->ncircles; ++i) nnpi_triangle_process(nn, p, tids[i]); if (nn->bad != NULL) { if (ht_getnentries(nn->bad) != 0) { /* * The idea behind this hack is that if the "infinite circle" * hash table has not been cleared at the end of the weight * calculation process for a point, then this is caused by * misbehavior of some in-circle tests due to the numeric * round-up in cases when the interpolation point is close to * one of the data points. The code below effectively replaces * the interpolated value by the data value in the closest * point after detecting a non-cleared hash table. */ int vid_closest = -1; double dist_closest = DBL_MAX; for (i = 0; i < nn->nvertices; ++i) { point* pp = &nn->d->points[nn->vertices[i]]; double dist = hypot(p->x - pp->x, p->y - pp->y); if (dist < dist_closest) { vid_closest = nn->vertices[i]; dist_closest = dist; } } nnpi_add_weight(nn, vid_closest, BIGNUMBER); } } return 1; } else if (nn_rule == NON_SIBSONIAN) { int nneigh = 0; int* nids = NULL; int status; nnpi_getneighbours(nn, p, nn->ncircles, tids, &nneigh, &nids); status = nnpi_neighbours_process(nn, p, nneigh, nids); free(nids); return status; } else nn_quit("programming error"); return 0; }
/* This is a central procedure for the Natural Neighbours interpolation. It * uses the Watson's algorithm for the required areas calculation and implies * that the vertices of the delaunay triangulation are listed in uniform * (clockwise or counterclockwise) order. */ static void nnpi_triangle_process(nnpi* nn, point* p, int i) { delaunay* d = nn->d; triangle* t = &d->triangles[i]; circle* c = &d->circles[i]; circle cs[3]; int j; /* * There used to be a useful assertion here: * * assert(circle_contains(c, p)); * * I removed it after introducing flag `contains' to * delaunay_circles_find(). It looks like the code is robust enough to * run without this assertion. */ /* * Sibson interpolation by using Watson's algorithm */ for (j = 0; j < 3; ++j) { int j1 = (j + 1) % 3; int j2 = (j + 2) % 3; int v1 = t->vids[j1]; int v2 = t->vids[j2]; if (!circle_build2(&cs[j], &d->points[v1], &d->points[v2], p)) { point* p1 = &d->points[v1]; point* p2 = &d->points[v2]; if ((fabs(p1->x - p->x) + fabs(p1->y - p->y)) / c->r < EPS_SAME) { /* * if (p1->x == p->x && p1->y == p->y) { */ nnpi_add_weight(nn, v1, BIGNUMBER); return; } else if ((fabs(p2->x - p->x) + fabs(p2->y - p->y)) / c->r < EPS_SAME) { /* * } else if (p2->x == p->x && p2->y == p->y) { */ nnpi_add_weight(nn, v2, BIGNUMBER); return; } } } for (j = 0; j < 3; ++j) { int j1 = (j + 1) % 3; int j2 = (j + 2) % 3; double det = ((cs[j1].x - c->x) * (cs[j2].y - c->y) - (cs[j2].x - c->x) * (cs[j1].y - c->y)); if (isnan(det)) { /* * If the determinant is NaN, then the interpolation point lies * almost in between two data points. This case is difficult to * handle robustly because the areas calculated are obtained as a * diference between two big numbers. * * Here this is handles in the following way. If a circle is * recognised as very large in circle_build2(), then it parameters * are replaced by NaNs, which results in det above being NaN. * The resulting area to be calculated for a vertex does not * change if the circle center is moved along some line. The closer * it is moved to the actual data point positions, the more * numerically robust the calculation of areas becomes. In * particular, it can be moved to coincide with one of the other * circle centers. When this is done, it is ticked by placing the * edge parameters into the hash table, so that when the * "cancelling" would be about to be done, this new position is * used instead. * * One complicated circumstance is that sometimes a circle is * recognised as very large in cases when it is actually not, * when the interpolated point is close to a data point. This is * handled by a special treatment in _nnpi_calculate_weights(). */ int remove = 1; circle* cc = NULL; int key[2]; key[0] = t->vids[j]; if (nn->bad == NULL) nn->bad = ht_create_i2(HT_SIZE); if (isnan(cs[j1].x)) { key[1] = t->vids[j2]; cc = ht_find(nn->bad, &key); if (cc == NULL) { remove = 0; cc = malloc(sizeof(circle)); cc->x = cs[j2].x; cc->y = cs[j2].y; assert(ht_insert(nn->bad, &key, cc) == NULL); } det = ((cc->x - c->x) * (cs[j2].y - c->y) - (cs[j2].x - c->x) * (cc->y - c->y)); } else { /* j2 */ key[1] = t->vids[j1]; cc = ht_find(nn->bad, &key); if (cc == NULL) { remove = 0; cc = malloc(sizeof(circle)); cc->x = cs[j1].x; cc->y = cs[j1].y; assert(ht_insert(nn->bad, &key, cc) == NULL); } det = ((cs[j1].x - c->x) * (cc->y - c->y) - (cc->x - c->x) * (cs[j1].y - c->y)); } if (remove) assert(ht_delete(nn->bad, &key) != NULL); } nnpi_add_weight(nn, t->vids[j], det); } }
void nnpi_calculate_weights(nnpi* nn, point* p) { point pp; int nvertices = 0; int* vertices = NULL; double* weights = NULL; int i; nnpi_reset(nn); if (_nnpi_calculate_weights(nn, p)) { nnpi_normalize_weights(nn); return; } nnpi_reset(nn); nn->dx = (nn->d->xmax - nn->d->xmin) * EPS_SHIFT; nn->dy = (nn->d->ymax - nn->d->ymin) * EPS_SHIFT; pp.x = p->x + nn->dx; pp.y = p->y + nn->dy; while (!_nnpi_calculate_weights(nn, &pp)) { nnpi_reset(nn); pp.x = p->x + nn->dx * RANDOM; pp.y = p->y + nn->dy * RANDOM; } nnpi_normalize_weights(nn); nvertices = nn->nvertices; if (nvertices > 0) { vertices = malloc(nvertices * sizeof(int)); memcpy(vertices, nn->vertices, nvertices * sizeof(int)); weights = malloc(nvertices * sizeof(double)); memcpy(weights, nn->weights, nvertices * sizeof(double)); } nnpi_reset(nn); pp.x = 2.0 * p->x - pp.x; pp.y = 2.0 * p->y - pp.y; while (!_nnpi_calculate_weights(nn, &pp) || nn->nvertices == 0) { nnpi_reset(nn); pp.x = p->x + nn->dx * RANDOM; pp.y = p->y + nn->dy * RANDOM; } nnpi_normalize_weights(nn); if (nvertices > 0) for (i = 0; i < nn->nvertices; ++i) nn->weights[i] /= 2.0; for (i = 0; i < nvertices; ++i) nnpi_add_weight(nn, vertices[i], weights[i] / 2.0); if (nvertices > 0) { free(vertices); free(weights); } }
/* This is a central procedure for the Natural Neighbours interpolation. It * uses the Watson's algorithm for the required areas calculation and implies * that the vertices of the delaunay triangulation are listed in uniform * (clockwise or counterclockwise) order. */ static void nnpi_triangle_process(nnpi* nn, point* p, int i) { delaunay* d = nn->d; triangle* t = &d->triangles[i]; circle* c = &d->circles[i]; circle cs[3]; int j; /* * There used to be a useful assertion here: * * assert(circle_contains(c, p)); * * I removed it after introducing flag `contains' to * delaunay_circles_find(). It looks like the code is robust enough to * run without this assertion. */ /* * Sibson interpolation by using Watson's algorithm */ for (j = 0; j < 3; ++j) { int j1 = (j + 1) % 3; int j2 = (j + 2) % 3; int v1 = t->vids[j1]; int v2 = t->vids[j2]; if (!circle_build2(&cs[j], &d->points[v1], &d->points[v2], p)) { point* p1 = &d->points[v1]; point* p2 = &d->points[v2]; if ((fabs(p1->x - p->x) + fabs(p1->y - p->y)) / c->r < EPS_SAME) { /* * if (p1->x == p->x && p1->y == p->y) { */ nnpi_add_weight(nn, v1, BIGNUMBER); return; } else if ((fabs(p2->x - p->x) + fabs(p2->y - p->y)) / c->r < EPS_SAME) { /* * } else if (p2->x == p->x && p2->y == p->y) { */ nnpi_add_weight(nn, v2, BIGNUMBER); return; } } } for (j = 0; j < 3; ++j) { int j1 = (j + 1) % 3; int j2 = (j + 2) % 3; double det = ((cs[j1].x - c->x) * (cs[j2].y - c->y) - (cs[j2].x - c->x) * (cs[j1].y - c->y)); if (isnan(det)) { /* * Here, if the determinant is NaN, then the interpolation point * lies almost in between two data points. This case is difficult to * handle robustly because the areas (determinants) calculated by * Watson's algorithm are obtained as a diference between two big * numbers. This case is handled here in the following way. * * If a circle is recognised as very large in circle_build2(), then * its parameters are replaced by NaNs, which results in the * variable `det' above being NaN. * * When this happens inside convex hall of the data, there is * always a triangle on another side of the edge, processing of * which also produces an invalid circle. Processing of this edge * yields two pairs of infinite determinants, with singularities * of each pair cancelling if the point moves slightly off the edge. * * Each of the determinants corresponds to the (signed) area of a * triangle, and an inifinite determinant corresponds to the area of * a triangle with one vertex moved to infinity. "Subtracting" one * triangle from another within each pair yields a valid * quadrilateral (in fact, a trapezoid). The doubled area of these * quadrilaterals is calculated in the cycle over ii below. */ int j1bad = isnan(cs[j1].x); int key[2]; double* v = NULL; key[0] = t->vids[j]; if (nn->bad == NULL) nn->bad = ht_create_i2(HT_SIZE); key[1] = (j1bad) ? t->vids[j2] : t->vids[j1]; v = ht_find(nn->bad, &key); if (v == NULL) { v = malloc(8 * sizeof(double)); if (j1bad) { v[0] = cs[j2].x; v[1] = cs[j2].y; } else { v[0] = cs[j1].x; v[1] = cs[j1].y; } v[2] = c->x; v[3] = c->y; (void) ht_insert(nn->bad, &key, v); det = 0.0; } else { int ii; if (j1bad) { v[6] = cs[j2].x; v[7] = cs[j2].y; } else { v[6] = cs[j1].x; v[7] = cs[j1].y; } v[4] = c->x; v[5] = c->y; det = 0; for (ii = 0; ii < 4; ++ii) { int ii1 = (ii + 1) % 4; det += (v[ii * 2] + v[ii1 * 2]) * (v[ii * 2 + 1] - v[ii1 * 2 + 1]); } det = fabs(det); free(v); ht_delete(nn->bad, &key); } } nnpi_add_weight(nn, t->vids[j], det); } }