Exemplo n.º 1
0
/**
 * n_dhcp4_client_lease_link() - link lease into probe
 * @lease:                      the lease to operate on
 * @probe:                      the probe to link the lease into
 *
 * Associate a lease with a probe. The lease may not already be linked.
 */
void n_dhcp4_client_lease_link(NDhcp4ClientLease *lease, NDhcp4ClientProbe *probe) {
        c_assert(!lease->probe);
        c_assert(!c_list_is_linked(&lease->probe_link));

        lease->probe = probe;
        c_list_link_tail(&probe->lease_list, &lease->probe_link);
}
Exemplo n.º 2
0
static void test_parent_end(TestContext *ctx) {
        size_t i;
        int r;

        for (i = 0; i < ctx->n_nodes; ++i)
                c_assert(!c_rbnode_is_linked(ctx->nodes[i]));

        r = munmap(ctx->map, ctx->mapsize);
        c_assert(r >= 0);
}
Exemplo n.º 3
0
static cg_texture_t *
make_texture (void)
{
  void *tex_data;
  uint32_t *p;
  cg_texture_t *tex;
  int partx, party, width, height;

  p = tex_data = c_malloc (TEXTURE_SIZE * TEXTURE_SIZE * 4);

  /* Make a texture with a different color for each part */
  for (party = 0; party < PARTS; party++)
    {
      height = (party < PARTS - 1
                ? PART_SIZE
                : TEXTURE_SIZE - PART_SIZE * (PARTS - 1));

      for (partx = 0; partx < PARTS; partx++)
        {
          uint32_t color = corner_colors[party * PARTS + partx];
          width = (partx < PARTS - 1
                   ? PART_SIZE
                   : TEXTURE_SIZE - PART_SIZE * (PARTS - 1));

          while (width-- > 0)
            *(p++) = C_UINT32_TO_BE (color);
        }

      while (--height > 0)
        {
          memcpy (p, p - TEXTURE_SIZE, TEXTURE_SIZE * 4);
          p += TEXTURE_SIZE;
        }
    }

  tex = test_cg_texture_new_from_data (test_dev,
                                          TEXTURE_SIZE,
                                          TEXTURE_SIZE,
                                          TEST_CG_TEXTURE_NO_ATLAS,
                                          CG_PIXEL_FORMAT_RGBA_8888_PRE,
                                          TEXTURE_SIZE * 4,
                                          tex_data);

  c_free (tex_data);

  if (test_verbose ())
    {
      if (cg_texture_is_sliced (tex))
        c_print ("Texture is sliced\n");
      else
        c_print ("Texture is not sliced\n");
    }

  /* The texture should be sliced unless NPOTs are supported */
  c_assert (cg_has_feature (test_dev, CG_FEATURE_ID_TEXTURE_NPOT)
            ? !cg_texture_is_sliced (tex)
            : cg_texture_is_sliced (tex));

  return tex;
}
Exemplo n.º 4
0
static void test_parent_start(TestContext *ctx) {
        size_t i;

        /*
         * Generate a tree with @n_nodes entries. We store the entries in
         * @ctx->node_mem, generate a randomized access-map in @ctx->nodes
         * (i.e., an array of pointers to entries in @ctx->node_mem, but in
         * random order), and a temporary cache for free use in the parent.
         *
         * All this is stored in a MAP_SHARED memory region so it is equivalent
         * in child and parent.
         */

        ctx->n_nodes = 32;
        ctx->mapsize = sizeof(CRBTree);
        ctx->mapsize += ctx->n_nodes * sizeof(TestNode);
        ctx->mapsize += ctx->n_nodes * sizeof(CRBNode*);
        ctx->mapsize += ctx->n_nodes * sizeof(CRBNode*);

        ctx->map = mmap(NULL, ctx->mapsize, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANON, -1, 0);
        c_assert(ctx->map != MAP_FAILED);

        ctx->tree = (void *)ctx->map;
        ctx->node_mem = (void *)(ctx->tree + 1);
        ctx->nodes = (void *)(ctx->node_mem + ctx->n_nodes);
        ctx->cache = (void *)(ctx->nodes + ctx->n_nodes);

        for (i = 0; i < ctx->n_nodes; ++i) {
                ctx->nodes[i] = &ctx->node_mem[i].rb;
                c_rbnode_init(ctx->nodes[i]);
        }

        shuffle(ctx->nodes, ctx->n_nodes);
}
Exemplo n.º 5
0
/**
 * socket_bind_if() - bind socket to a network interface
 * @socket:                     socket to operate on
 * @ifindex:                    index of network interface to bind to, or 0
 *
 * This binds the socket given via @socket to the network interface specified
 * via @ifindex. It uses the underlying SO_BINDTODEVICE ioctl of the linux
 * kernel. However, if available, if prefers the newer SO_BINDTOIFINDEX ioctl,
 * which avoids resolving the interface name temporarily, and thus does not
 * suffer from a race-condition.
 *
 * Return: 0 on success, negative error code on failure.
 */
int socket_bind_if(int socket, int ifindex) {
        char ifname[IFNAMSIZ] = {};
        int r;

        c_assert(ifindex >= 0);

        /*
         * We first try the newer SO_BINDTOIFINDEX. If it is not available on
         * the running kernel, we fall back to SO_BINDTODEVICE. This, however,
         * requires us to first resolve the ifindex to an ifname. Note that
         * this is racy, since the device name might theoretically change
         * asynchronously.
         *
         * Using 0 as ifindex will remove the device-binding. For
         * SO_BINDTOIFINDEX we simply pass-through the 0 to the kernel, which
         * recognizes this correctly. For SO_BINDTODEVICE we pass the empty
         * string, which the kernel recognizes as a request to remove the
         * binding.
         *
         * The commit introducing SO_BINDTOIFINDEX first appeared in linux-5.1:
         *
         *     commit f5dd3d0c9638a9d9a02b5964c4ad636f06cf7e2c
         *     Author: David Herrmann <*****@*****.**>
         *     Date:   Tue Jan 15 14:42:14 2019 +0100
         *
         *         net: introduce SO_BINDTOIFINDEX sockopt
         *
         * In older kernels, setsockopt(2) is guaranteed to return ENOPROTOOPT
         * for this ioctl.
         */

#ifdef SO_BINDTOIFINDEX
        r = setsockopt(socket,
                       SOL_SOCKET,
                       SO_BINDTOIFINDEX,
                       &ifindex,
                       sizeof(ifindex));
        if (r >= 0)
                return 0;
        else if (errno != ENOPROTOOPT)
                return -errno;
#endif /* SO_BINDTOIFINDEX */

        if (ifindex > 0) {
                r = socket_SIOCGIFNAME(socket, ifindex, &ifname);
                if (r)
                        return r;
        }

        r = setsockopt(socket,
                       SOL_SOCKET,
                       SO_BINDTODEVICE,
                       ifname,
                       strlen(ifname));
        if (r < 0)
                return -errno;

        return 0;
}
Exemplo n.º 6
0
static void n_dhcp4_server_lease_free(NDhcp4ServerLease *lease) {
        c_assert(!lease->server);

        c_list_unlink(&lease->server_link);

        n_dhcp4_incoming_free(lease->request);
        free(lease);
}
Exemplo n.º 7
0
static void test_parent_step(TestContext *ctx) {
        size_t i, i_level;
        CRBNode *n, *p;

        n = ctx->tree->root;
        i_level = 0;

        while (n) {
                /* verify that we haven't visited @n, yet */
                c_assert(!fetch_visit(n));

                /* verify @n is a valid node */
                for (i = 0; i < ctx->n_nodes; ++i)
                        if (n == ctx->nodes[i])
                                break;
                c_assert(i < ctx->n_nodes);

                /* pre-order traversal and marker for cycle detection */
                if (n->left) {
                        toggle_visit(n, true);
                        ctx->cache[i_level++] = n;
                        n = n->left;
                } else if (n->right) {
                        toggle_visit(n, true);
                        ctx->cache[i_level++] = n;
                        n = n->right;
                } else {
                        while (i_level > 0) {
                                p = ctx->cache[i_level - 1];
                                if (p->right && n != p->right) {
                                        n = p->right;
                                        break;
                                }
                                --i_level;
                                n = p;
                                toggle_visit(n, false);
                        }
                        if (i_level == 0)
                                break;
                }
        }
}
Exemplo n.º 8
0
static void
egl_attributes_from_framebuffer_config(
    cg_display_t *display, cg_framebuffer_config_t *config, EGLint *attributes)
{
    cg_renderer_t *renderer = display->renderer;
    cg_renderer_egl_t *egl_renderer = renderer->winsys;
    int i = 0;

    /* Let the platform add attributes first */
    if (egl_renderer->platform_vtable->add_config_attributes)
        i = egl_renderer->platform_vtable->add_config_attributes(
            display, config, attributes);

    if (config->need_stencil) {
        attributes[i++] = EGL_STENCIL_SIZE;
        attributes[i++] = 2;
    }

    attributes[i++] = EGL_RED_SIZE;
    attributes[i++] = 1;
    attributes[i++] = EGL_GREEN_SIZE;
    attributes[i++] = 1;
    attributes[i++] = EGL_BLUE_SIZE;
    attributes[i++] = 1;

    attributes[i++] = EGL_ALPHA_SIZE;
    attributes[i++] = config->has_alpha ? 1 : EGL_DONT_CARE;

    attributes[i++] = EGL_DEPTH_SIZE;
    attributes[i++] = 1;

    attributes[i++] = EGL_BUFFER_SIZE;
    attributes[i++] = EGL_DONT_CARE;

    attributes[i++] = EGL_RENDERABLE_TYPE;
    attributes[i++] =
        ((renderer->driver == CG_DRIVER_GL || renderer->driver == CG_DRIVER_GL3)
         ? EGL_OPENGL_BIT
         : EGL_OPENGL_ES2_BIT);

    attributes[i++] = EGL_SURFACE_TYPE;
    attributes[i++] = EGL_WINDOW_BIT;

    if (config->samples_per_pixel) {
        attributes[i++] = EGL_SAMPLE_BUFFERS;
        attributes[i++] = 1;
        attributes[i++] = EGL_SAMPLES;
        attributes[i++] = config->samples_per_pixel;
    }

    attributes[i++] = EGL_NONE;

    c_assert(i < MAX_EGL_CONFIG_ATTRIBS);
}
Exemplo n.º 9
0
static int
get_max_activateable_texture_units(cg_device_t *dev)
{
    if (C_UNLIKELY(dev->max_activateable_texture_units == -1)) {
        GLint values[3];
        int n_values = 0;
        int i;

#ifdef CG_HAS_GL_SUPPORT
        if (!_cg_has_private_feature(dev, CG_PRIVATE_FEATURE_GL_EMBEDDED)) {
            /* GL_MAX_TEXTURE_COORDS is provided for GLSL. It defines
             * the number of texture coordinates that can be uploaded
             * (but doesn't necessarily relate to how many texture
             *  images can be sampled) */
            if (cg_has_feature(dev, CG_FEATURE_ID_GLSL)) {
                /* Previously this code subtracted the value by one but there
                   was no explanation for why it did this and it doesn't seem
                   to make sense so it has been removed */
                GE(dev,
                   glGetIntegerv(GL_MAX_TEXTURE_COORDS, values + n_values++));

                /* GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS is defined for GLSL */
                GE(dev,
                   glGetIntegerv(GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS,
                                 values + n_values++));
            }
        }
#endif /* CG_HAS_GL_SUPPORT */

#ifdef CG_HAS_GLES2_SUPPORT
        if (_cg_has_private_feature(dev, CG_PRIVATE_FEATURE_GL_EMBEDDED)) {
            GE(dev, glGetIntegerv(GL_MAX_VERTEX_ATTRIBS, values + n_values));
            /* Two of the vertex attribs need to be used for the position
               and color */
            values[n_values++] -= 2;

            GE(dev,
               glGetIntegerv(GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS,
                             values + n_values++));
        }
#endif

        c_assert(n_values <= C_N_ELEMENTS(values) && n_values > 0);

        /* Use the maximum value */
        dev->max_activateable_texture_units = values[0];
        for (i = 1; i < n_values; i++)
            dev->max_activateable_texture_units =
                MAX(values[i], dev->max_activateable_texture_units);
    }

    return dev->max_activateable_texture_units;
}
Exemplo n.º 10
0
void n_dhcp4_s_connection_deinit(NDhcp4SConnection *connection) {
        c_assert(!connection->ip);

        if (connection->fd_udp >= 0) {
                close(connection->fd_udp);
        }

        if (connection->fd_packet >= 0) {
                close(connection->fd_packet);
        }

        *connection = (NDhcp4SConnection)N_DHCP4_S_CONNECTION_NULL(*connection);
}
Exemplo n.º 11
0
static int
_cg_pot_slices_for_size(int size_to_fill,
                        int max_span_size,
                        int max_waste,
                        c_array_t *out_spans)
{
    int n_spans = 0;
    cg_span_t span;

    /* Init first slice span */
    span.start = 0;
    span.size = max_span_size;
    span.waste = 0;

    /* Fix invalid max_waste */
    if (max_waste < 0)
        max_waste = 0;

    while (true) {
        /* Is the whole area covered? */
        if (size_to_fill > span.size) {
            /* Not yet - add a span of this size */
            if (out_spans)
                c_array_append_val(out_spans, span);

            span.start += span.size;
            size_to_fill -= span.size;
            n_spans++;
        } else if (span.size - size_to_fill <= max_waste) {
            /* Yes and waste is small enough */
            /* Pick the next power of two up from size_to_fill. This can
               sometimes be less than the span.size that would be chosen
               otherwise */
            span.size = _cg_util_next_p2(size_to_fill);
            span.waste = span.size - size_to_fill;
            if (out_spans)
                c_array_append_val(out_spans, span);

            return ++n_spans;
        } else {
            /* Yes but waste is too large */
            while (span.size - size_to_fill > max_waste) {
                span.size /= 2;
                c_assert(span.size > 0);
            }
        }
    }

    /* Can't get here */
    return 0;
}
Exemplo n.º 12
0
static int
error_handler(Display *xdpy, XErrorEvent *error)
{
    cg_renderer_t *renderer;
    cg_xlib_renderer_t *xlib_renderer;

    renderer = get_renderer_for_xdisplay(xdpy);

    xlib_renderer = _cg_xlib_renderer_get_data(renderer);
    c_assert(xlib_renderer->trap_state);

    xlib_renderer->trap_state->trapped_error_code = error->error_code;

    return 0;
}
Exemplo n.º 13
0
int
_cg_xlib_renderer_untrap_errors(cg_renderer_t *renderer,
                                cg_xlib_trap_state_t *state)
{
    cg_xlib_renderer_t *xlib_renderer;

    xlib_renderer = _cg_xlib_renderer_get_data(renderer);
    c_assert(state == xlib_renderer->trap_state);

    XSetErrorHandler(state->old_error_handler);

    xlib_renderer->trap_state = state->old_state;

    return state->trapped_error_code;
}
Exemplo n.º 14
0
/**
 * n_dhcp4_server_lease_new() - XXX
 */
int n_dhcp4_server_lease_new(NDhcp4ServerLease **leasep, NDhcp4Incoming *message) {
        _c_cleanup_(n_dhcp4_server_lease_unrefp) NDhcp4ServerLease *lease = NULL;

        c_assert(leasep);

        lease = malloc(sizeof(*lease));
        if (!lease)
                return -ENOMEM;

        *lease = (NDhcp4ServerLease)N_DHCP4_SERVER_LEASE_NULL(*lease);

        lease->request = message;

        *leasep = lease;
        lease = NULL;
        return 0;
}
Exemplo n.º 15
0
/**
 * n_dhcp4_client_lease_new() - allocate new client lease object
 * @leasep:                     output argumnet for new client lease object
 * @message:                    incoming message representing the lease
 *
 * This creates a new client lease object. Client lease objects are simple
 * wrappers around an incoming message representing a lease.
 *
 * Return: 0 on success, negative error code on failure.
 */
int n_dhcp4_client_lease_new(NDhcp4ClientLease **leasep, NDhcp4Incoming *message) {
        _c_cleanup_(n_dhcp4_client_lease_unrefp) NDhcp4ClientLease *lease = NULL;
        int r;

        c_assert(leasep);

        lease = malloc(sizeof(*lease));
        if (!lease)
                return -ENOMEM;

        *lease = (NDhcp4ClientLease)N_DHCP4_CLIENT_LEASE_NULL(*lease);

        r = n_dhcp4_incoming_get_timeouts(message, &lease->t1, &lease->t2, &lease->lifetime);
        if (r)
                return r;

        lease->message = message;
        *leasep = lease;
        lease = NULL;
        return 0;
}
Exemplo n.º 16
0
/**
 * c_rbtree_last_postorder() - return last node in post-order
 * @t:          tree to operate on
 *
 * This returns the last node of a left-to-right post-order traversal. That is,
 * it always returns the root node, or NULL if the tree is empty.
 *
 * This can also be interpreted as the first node of a right-to-left pre-order
 * traversal.
 *
 * Fixed runtime (n: number of elements in tree): O(1)
 *
 * Return: Pointer to last node in post-order, or NULL.
 */
_c_public_ CRBNode *c_rbtree_last_postorder(CRBTree *t) {
        c_assert(t);
        return t->root;
}
Exemplo n.º 17
0
/**
 * c_rbtree_first_postorder() - return first node in post-order
 * @t:          tree to operate on
 *
 * This returns the first node of a left-to-right post-order traversal. That
 * is, it returns the left-deepest leaf. If the tree is empty, this returns
 * NULL.
 *
 * This can also be interpreted as the last node of a right-to-left pre-order
 * traversal.
 *
 * Fixed runtime (n: number of elements in tree): O(log(n))
 *
 * Return: Pointer to first node in post-order, or NULL.
 */
_c_public_ CRBNode *c_rbtree_first_postorder(CRBTree *t) {
        c_assert(t);
        return c_rbnode_leftdeepest(t->root);
}
Exemplo n.º 18
0
/**
 * c_rbtree_last() - return last node
 * @t:          tree to operate on
 *
 * An RB-Tree always defines a linear order of its elements. This function
 * returns the logically last node in @t. If @t is empty, NULL is returned.
 *
 * Fixed runtime (n: number of elements in tree): O(log(n))
 *
 * Return: Pointer to last node, or NULL.
 */
_c_public_ CRBNode *c_rbtree_last(CRBTree *t) {
        c_assert(t);
        return c_rbnode_rightmost(t->root);
}
Exemplo n.º 19
0
/**
 * c_rbtree_first() - return first node
 * @t:          tree to operate on
 *
 * An RB-Tree always defines a linear order of its elements. This function
 * returns the logically first node in @t. If @t is empty, NULL is returned.
 *
 * Fixed runtime (n: number of elements in tree): O(log(n))
 *
 * Return: Pointer to first node, or NULL.
 */
_c_public_ CRBNode *c_rbtree_first(CRBTree *t) {
        c_assert(t);
        return c_rbnode_leftmost(t->root);
}
Exemplo n.º 20
0
static int test_parallel(void) {
        TestContext ctx = {};
        int r, pid, status;
        uint64_t n_instr, n_event;

        /* create shared area for tree verification */
        test_parent_start(&ctx);

        /* run child */
        pid = fork();
        c_assert(pid >= 0);
        if (pid == 0) {
                r = test_parallel_child(&ctx);
                _exit(r);
        }

        /*
         * After setup, the child immediately enters TRACE-operation and raises
         * SIGUSR1. Once continued, the child performs the pre-configured tree
         * operations. When done, it raises SIGUSR2, and then exits.
         *
         * Here in the parent we catch all trace-stops of the child via waitpid
         * until we get no more such stop-events. Based on the stop-event we
         * get, we verify child-state, STEP it, or perform other state tracking.
         * We repeat this as long as we catch trace-stops from the child.
         */
        n_instr = 0;
        n_event = 0;
        for (r = waitpid(pid, &status, 0);
             r == pid && WIFSTOPPED(status);
             r = waitpid(pid, &status, 0)) {

                switch (WSTOPSIG(status)) {
                case SIGUSR1:
                        n_event |= 0x1;

                        /* step child */
                        r = ptrace(PTRACE_SINGLESTEP, pid, 0, 0);

                        /*
                         * Some architectures (e.g., armv7hl) do not implement
                         * SINGLESTEP, but return EIO. Skip the entire test in
                         * this case.
                         */
                        if (r < 0 && errno == EIO)
                                return 77;

                        c_assert(r >= 0);
                        break;

                case SIGURG:
                        n_event |= 0x2;
                        test_parent_middle(&ctx);

                        /* step child */
                        r = ptrace(PTRACE_SINGLESTEP, pid, 0, 0);
                        c_assert(r >= 0);
                        break;

                case SIGUSR2:
                        n_event |= 0x4;
                        test_parent_end(&ctx);

                        /* continue child */
                        r = ptrace(PTRACE_CONT, pid, 0, 0);
                        c_assert(r >= 0);
                        break;

                case SIGTRAP:
                        ++n_instr;
                        test_parent_step(&ctx);

                        /* step repeatedly as long as we get SIGTRAP */
                        r = ptrace(PTRACE_SINGLESTEP, pid, 0, 0);
                        c_assert(r >= 0);
                        break;

                default:
                        c_assert(0);
                        break;
                }
        }

        /* verify our child exited cleanly */
        c_assert(r == pid);
        c_assert(!!WIFEXITED(status));

        /*
         * 0xdf is signalled if ptrace is not allowed or we are already
         * ptraced. In this case we skip the test.
         *
         * 0xef is signalled on success.
         *
         * In any other case something went wobbly and we should fail hard.
         */
        switch (WEXITSTATUS(status)) {
        case 0xef:
                break;
        case 0xdf:
                return 77;
        default:
                c_assert(0);
                break;
        }

        /* verify we hit all child states */
        c_assert(n_event & 0x1);
        c_assert(n_event & 0x2);
        c_assert(n_event & 0x4);
        c_assert(n_instr > 0);

        return 0;
}
Exemplo n.º 21
0
/*
 * _cg_pipeline_flush_gl_state:
 *
 * Details of override options:
 * ->fallback_mask: is a bitmask of the pipeline layers that need to be
 *    replaced with the default, fallback textures. The fallback textures are
 *    fully transparent textures so they hopefully wont contribute to the
 *    texture combining.
 *
 *    The intention of fallbacks is to try and preserve
 *    the number of layers the user is expecting so that texture coordinates
 *    they gave will mostly still correspond to the textures they intended, and
 *    have a fighting chance of looking close to their originally intended
 *    result.
 *
 * ->disable_mask: is a bitmask of the pipeline layers that will simply have
 *    texturing disabled. It's only really intended for disabling all layers
 *    > X; i.e. we'd expect to see a contiguous run of 0 starting from the LSB
 *    and at some point the remaining bits flip to 1. It might work to disable
 *    arbitrary layers; though I'm not sure a.t.m how OpenGL would take to
 *    that.
 *
 *    The intention of the disable_mask is for emitting geometry when the user
 *    hasn't supplied enough texture coordinates for all the layers and it's
 *    not possible to auto generate default texture coordinates for those
 *    layers.
 *
 * ->layer0_override_texture: forcibly tells us to bind this GL texture name for
 *    layer 0 instead of plucking the gl_texture from the cg_texture_t of layer
 *    0.
 *
 *    The intention of this is for any primitives that supports sliced textures.
 *    The code will can iterate each of the slices and re-flush the pipeline
 *    forcing the GL texture of each slice in turn.
 *
 * ->wrap_mode_overrides: overrides the wrap modes set on each
 *    layer. This is used to implement the automatic wrap mode.
 *
 * XXX: It might also help if we could specify a texture matrix for code
 *    dealing with slicing that would be multiplied with the users own matrix.
 *
 *    Normaly texture coords in the range [0, 1] refer to the extents of the
 *    texture, but when your GL texture represents a slice of the real texture
 *    (from the users POV) then a texture matrix would be a neat way of
 *    transforming the mapping for each slice.
 *
 *    Currently for textured rectangles we manually calculate the texture
 *    coords for each slice based on the users given coords, but this solution
 *    isn't ideal, and can't be used with CGlibVertexBuffers.
 */
void
_cg_pipeline_flush_gl_state(cg_device_t *dev,
                            cg_pipeline_t *pipeline,
                            cg_framebuffer_t *framebuffer,
                            bool with_color_attrib,
                            bool unknown_color_alpha)
{
    cg_pipeline_t *current_pipeline = dev->current_pipeline;
    unsigned long pipelines_difference;
    int n_layers;
    unsigned long *layer_differences;
    int i;
    cg_texture_unit_t *unit1;
    const cg_pipeline_progend_t *progend;

    CG_STATIC_TIMER(pipeline_flush_timer,
                    "Mainloop", /* parent */
                    "Material Flush",
                    "The time spent flushing material state",
                    0 /* no application private data */);

    CG_TIMER_START(_cg_uprof_context, pipeline_flush_timer);

#warning "HACK"
    current_pipeline = NULL;

    /* Bail out asap if we've been asked to re-flush the already current
     * pipeline and we can see the pipeline hasn't changed */
    if (current_pipeline == pipeline &&
        dev->current_pipeline_age == pipeline->age &&
        dev->current_pipeline_with_color_attrib == with_color_attrib &&
        dev->current_pipeline_unknown_color_alpha == unknown_color_alpha)
        goto done;
    else {
        /* Update derived state (currently just the 'real_blend_enable'
         * state) and determine a mask of state that differs between the
         * current pipeline and the one we are flushing.
         *
         * Note updating the derived state is done before doing any
         * pipeline comparisons so that we can correctly compare the
         * 'real_blend_enable' state itself.
         */

        if (current_pipeline == pipeline) {
            pipelines_difference = dev->current_pipeline_changes_since_flush;

            if (pipelines_difference & CG_PIPELINE_STATE_AFFECTS_BLENDING ||
                pipeline->unknown_color_alpha != unknown_color_alpha) {
                bool save_real_blend_enable = pipeline->real_blend_enable;

                _cg_pipeline_update_real_blend_enable(pipeline,
                                                      unknown_color_alpha);

                if (save_real_blend_enable != pipeline->real_blend_enable)
                    pipelines_difference |= CG_PIPELINE_STATE_REAL_BLEND_ENABLE;
            }
        } else if (current_pipeline) {
            pipelines_difference = dev->current_pipeline_changes_since_flush;

            _cg_pipeline_update_real_blend_enable(pipeline,
                                                  unknown_color_alpha);

            pipelines_difference |= _cg_pipeline_compare_differences(dev->current_pipeline,
                                                                     pipeline);
        } else {
            _cg_pipeline_update_real_blend_enable(pipeline,
                                                  unknown_color_alpha);

            pipelines_difference = CG_PIPELINE_STATE_ALL;
        }
    }

    /* Get a layer_differences mask for each layer to be flushed */
    n_layers = cg_pipeline_get_n_layers(pipeline);
    if (n_layers) {
        cg_pipeline_compare_layers_state_t state;

        layer_differences = c_alloca(sizeof(unsigned long) * n_layers);
        memset(layer_differences, 0, sizeof(unsigned long) * n_layers);

        state.dev = dev;
        state.i = 0;
        state.layer_differences = layer_differences;

        _cg_pipeline_foreach_layer_internal(pipeline,
                                            compare_layer_differences_cb,
                                            &state);
    } else
        layer_differences = NULL;

    /* First flush everything that's the same regardless of which
     * pipeline backend is being used...
     *
     * 1) top level state:
     *  glColor (or skip if a vertex attribute is being used for color)
     *  blend state
     *  alpha test state (except for GLES 2.0)
     *
     * 2) then foreach layer:
     *  determine gl_target/gl_texture
     *  bind texture
     *
     *  Note: After _cg_pipeline_flush_common_gl_state you can expect
     *  all state of the layers corresponding texture unit to be
     *  updated.
     */
    _cg_pipeline_flush_common_gl_state(dev,
                                       pipeline,
                                       pipelines_difference,
                                       layer_differences,
                                       with_color_attrib);

    /* Now flush the fragment, vertex and program state according to the
     * current progend backend.
     *
     * Note: Some backends may not support the current pipeline
     * configuration and in that case it will report and error and we
     * will look for a different backend.
     *
     * NB: if pipeline->progend != CG_PIPELINE_PROGEND_UNDEFINED then
     * we have previously managed to successfully flush this pipeline
     * with the given progend so we will simply use that to avoid
     * fallback code paths.
     */
    if (pipeline->progend == CG_PIPELINE_PROGEND_UNDEFINED)
        _cg_pipeline_set_progend(pipeline, CG_PIPELINE_PROGEND_DEFAULT);

    for (i = pipeline->progend; i < CG_PIPELINE_N_PROGENDS;
         i++, _cg_pipeline_set_progend(pipeline, i)) {
        const cg_pipeline_vertend_t *vertend;
        const cg_pipeline_fragend_t *fragend;
        cg_pipeline_add_layer_state_t state;

        progend = _cg_pipeline_progends[i];

        if (C_UNLIKELY(!progend->start(dev, pipeline)))
            continue;

        vertend = _cg_pipeline_vertends[progend->vertend];

        vertend->start(dev, pipeline, n_layers, pipelines_difference);

        state.dev = dev;
        state.framebuffer = framebuffer;
        state.vertend = vertend;
        state.pipeline = pipeline;
        state.layer_differences = layer_differences;
        state.error_adding_layer = false;
        state.added_layer = false;

        _cg_pipeline_foreach_layer_internal(
            pipeline, vertend_add_layer_cb, &state);

        if (C_UNLIKELY(state.error_adding_layer))
            continue;

        if (C_UNLIKELY(!vertend->end(dev, pipeline, pipelines_difference)))
            continue;

        /* Now prepare the fragment processing state (fragend)
         *
         * NB: We can't combine the setup of the vertend and fragend
         * since the backends that do code generation share
         * dev->codegen_source_buffer as a scratch buffer.
         */

        fragend = _cg_pipeline_fragends[progend->fragend];
        state.fragend = fragend;

        fragend->start(dev, pipeline, n_layers, pipelines_difference);

        _cg_pipeline_foreach_layer_internal(
            pipeline, fragend_add_layer_cb, &state);

        if (C_UNLIKELY(state.error_adding_layer))
            continue;

        if (C_UNLIKELY(!fragend->end(dev, pipeline, pipelines_difference)))
            continue;

        if (progend->end)
            progend->end(dev, pipeline, pipelines_difference);
        break;
    }

    /* Since the NOP progend will claim to handle anything we should
     * never fall through without finding a suitable progend */
    c_assert(i != CG_PIPELINE_N_PROGENDS);

    /* FIXME: This reference is actually resulting in lots of
     * copy-on-write reparenting because one-shot pipelines end up
     * living for longer than necessary and so any later modification of
     * the parent will cause a copy-on-write.
     *
     * XXX: The issue should largely go away when we switch to using
     * weak pipelines for overrides.
     */
    cg_object_ref(pipeline);
    if (dev->current_pipeline != NULL)
        cg_object_unref(dev->current_pipeline);
    dev->current_pipeline = pipeline;
    dev->current_pipeline_changes_since_flush = 0;
    dev->current_pipeline_with_color_attrib = with_color_attrib;
    dev->current_pipeline_unknown_color_alpha = unknown_color_alpha;
    dev->current_pipeline_age = pipeline->age;

done:

    progend = _cg_pipeline_progends[pipeline->progend];

    /* We can't assume the color will be retained between flushes when
     * using the glsl progend because the generic attribute values are
     * not stored as part of the program object so they could be
     * overridden by any attribute changes in another program */
    if (pipeline->progend == CG_PIPELINE_PROGEND_GLSL && !with_color_attrib) {
        int attribute;
        cg_pipeline_t *authority =
            _cg_pipeline_get_authority(pipeline, CG_PIPELINE_STATE_COLOR);
        int name_index = CG_ATTRIBUTE_COLOR_NAME_INDEX;

        attribute =
            _cg_pipeline_progend_glsl_get_attrib_location(dev, pipeline,
                                                          name_index);
        if (attribute != -1)
            GE(dev,
               glVertexAttrib4f(attribute,
                                authority->color.red,
                                authority->color.green,
                                authority->color.blue,
                                authority->color.alpha));
    }

    /* Give the progend a chance to update any uniforms that might not
     * depend on the material state. This is used on GLES2 to update the
     * matrices */
    if (progend->pre_paint)
        progend->pre_paint(dev, pipeline, framebuffer);

    /* Handle the fact that OpenGL associates texture filter and wrap
     * modes with the texture objects not the texture units... */
    if (!_cg_has_private_feature(dev, CG_PRIVATE_FEATURE_SAMPLER_OBJECTS))
        foreach_texture_unit_update_filter_and_wrap_modes(dev);

    /* If this pipeline has more than one layer then we always need
     * to make sure we rebind the texture for unit 1.
     *
     * NB: various components of CGlib may temporarily bind arbitrary
     * textures to texture unit 1 so they can query and modify texture
     * object parameters. cg-pipeline.c (See
     * _cg_bind_gl_texture_transient)
     */
    unit1 = _cg_get_texture_unit(dev, 1);
    if (cg_pipeline_get_n_layers(pipeline) > 1 && unit1->dirty_gl_texture) {
        set_active_texture_unit(dev, 1);
        GE(dev, glBindTexture(unit1->gl_target, unit1->gl_texture));
        unit1->dirty_gl_texture = false;
    }

    CG_TIMER_STOP(_cg_uprof_context, pipeline_flush_timer);
}