/* heap sort, based on Matt Mackall's linux kernel version */ static void sort(void *base0, size_t num, size_t size, struct fdisk_context *cxt, int (*cmp_func)(struct fdisk_context *, const void *, const void *)) { /* pre-scale counters for performance */ int i = (num/2 - 1) * size; size_t n = num * size, c, r; char *base = base0; /* heapify */ for ( ; i >= 0; i -= size) { for (r = i; r * 2 + size < n; r = c) { c = r * 2 + size; if (c < n - size && cmp_func(cxt, base + c, base + c + size) < 0) c += size; if (cmp_func(cxt, base + r, base + c) >= 0) break; generic_swap(base + r, base + c, size); } } /* sort */ for (i = n - size; i > 0; i -= size) { generic_swap(base, base + i, size); for (r = 0; r * 2 + size < (size_t) i; r = c) { c = r * 2 + size; if (c < i - size && cmp_func(cxt, base + c, base + c + size) < 0) c += size; if (cmp_func(cxt, base + r, base + c) >= 0) break; generic_swap(base + r, base + c, size); } } }
/** * sort - sort an array of elements * @base: pointer to data to sort * @num: number of elements * @size: size of each element * @cmp: pointer to comparison function * @swap: pointer to swap function or NULL * * This function does a heapsort on the given array. You may provide a * swap function optimized to your element type. * * Sorting time is O(n log n) both on average and worst-case. While * qsort is about 20% faster on average, it suffers from exploitable * O(n*n) worst-case behavior and extra memory requirements that make * it less suitable for kernel use. */ void sort (void *base, unsigned int *index, size_t num, size_t size, int (*cmp) (const void *, const void *), void (*swap) (void *, void *, int size)) { /* pre-scale counters for performance */ // ssize_t i = (num/2 - 1) * size, n = num * size, c, r; long i = (num / 2 - 1) * size, idxi = (num / 2 - 1) * sizeof(unsigned int); long n = num * size, idxn = num * sizeof(unsigned int), idxc, idxr, c, r; void *idx = (void *)index; if (!swap) swap = (size == 4 ? u32_swap : generic_swap); /* heapify */ for ( ; i >= 0; i -= size, idxi -= sizeof(unsigned int)) { for (r = i, idxr = idxi; r * 2 + size < n; r = c, idxr = idxc) { c = r * 2 + size; idxc = idxr * 2 + sizeof(unsigned int); if (c < n - size && cmp (base + c, base + c + size) < 0) { c += size; idxc += sizeof(unsigned int); } if (cmp (base + r, base + c) >= 0) break; swap (base + r, base + c, size); generic_swap (idx + idxr, idx + idxc, sizeof(unsigned int)); } } /* sort */ for (i = n - size, idxi = idxn - sizeof(unsigned int); i > 0; i -= size, idxi -= sizeof(unsigned int)) { swap (base, base + i, size); generic_swap (idx, idx + idxi, sizeof(unsigned int)); for (r = 0, idxr = 0; r * 2 + size < i; r = c, idxr = idxc) { c = r * 2 + size; idxc = idxr * 2 + sizeof(unsigned int); if (c < i - size && cmp (base + c, base + c + size) < 0) { c += size; idxc += sizeof(unsigned int); } if (cmp (base + r, base + c) >= 0) break; swap (base + r, base + c, size); generic_swap (idx + idxr, idx + idxc, sizeof(unsigned int)); } } }