Пример #1
0
yaml_emitter_initialize(yaml_emitter_t *emitter)
{
    assert(emitter);    /* Non-NULL emitter object expected. */

    memset(emitter, 0, sizeof(yaml_emitter_t));
    if (!BUFFER_INIT(emitter, emitter->buffer, OUTPUT_BUFFER_SIZE))
        goto error;
    if (!BUFFER_INIT(emitter, emitter->raw_buffer, OUTPUT_RAW_BUFFER_SIZE))
        goto error;
    if (!STACK_INIT(emitter, emitter->states, INITIAL_STACK_SIZE))
        goto error;
    if (!QUEUE_INIT(emitter, emitter->events, INITIAL_QUEUE_SIZE))
        goto error;
    if (!STACK_INIT(emitter, emitter->indents, INITIAL_STACK_SIZE))
        goto error;
    if (!STACK_INIT(emitter, emitter->tag_directives, INITIAL_STACK_SIZE))
        goto error;

    return 1;

error:

    BUFFER_DEL(emitter, emitter->buffer);
    BUFFER_DEL(emitter, emitter->raw_buffer);
    STACK_DEL(emitter, emitter->states);
    QUEUE_DEL(emitter, emitter->events);
    STACK_DEL(emitter, emitter->indents);
    STACK_DEL(emitter, emitter->tag_directives);

    return 0;
}
Пример #2
0
void
jsp_early_error_init (void)
{
  jsp_early_error_type = JSP_EARLY_ERROR__NO_ERROR;

  STACK_INIT (props);
  STACK_INIT (size_t_stack);
}
static void initSystem(LaplacianDeformModifierData *lmd, Object *ob, DerivedMesh *dm,
                       float (*vertexCos)[3], int numVerts)
{
	int i;
	int defgrp_index;
	int total_anchors;
	float wpaint;
	MDeformVert *dvert = NULL;
	MDeformVert *dv = NULL;
	LaplacianSystem *sys;

	if (isValidVertexGroup(lmd, ob, dm)) {
		int *index_anchors = MEM_mallocN(sizeof(int) * numVerts, __func__);  /* over-alloc */
		const MLoopTri *mlooptri;
		const MLoop *mloop;

		STACK_DECLARE(index_anchors);

		STACK_INIT(index_anchors, numVerts);

		modifier_get_vgroup(ob, dm, lmd->anchor_grp_name, &dvert, &defgrp_index);
		BLI_assert(dvert != NULL);
		dv = dvert;
		for (i = 0; i < numVerts; i++) {
			wpaint = defvert_find_weight(dv, defgrp_index);
			dv++;
			if (wpaint > 0.0f) {
				STACK_PUSH(index_anchors, i);
			}
		}
		DM_ensure_looptri(dm);
		total_anchors = STACK_SIZE(index_anchors);
		lmd->cache_system = initLaplacianSystem(numVerts, dm->getNumEdges(dm), dm->getNumLoopTri(dm),
		                                       total_anchors, lmd->anchor_grp_name, lmd->repeat);
		sys = (LaplacianSystem *)lmd->cache_system;
		memcpy(sys->index_anchors, index_anchors, sizeof(int) * total_anchors);
		memcpy(sys->co, vertexCos, sizeof(float[3]) * numVerts);
		MEM_freeN(index_anchors);
		lmd->vertexco = MEM_mallocN(sizeof(float[3]) * numVerts, "ModDeformCoordinates");
		memcpy(lmd->vertexco, vertexCos, sizeof(float[3]) * numVerts);
		lmd->total_verts = numVerts;

		createFaceRingMap(
		            dm->getNumVerts(dm), dm->getLoopTriArray(dm), dm->getNumLoopTri(dm),
		            dm->getLoopArray(dm), &sys->ringf_map, &sys->ringf_indices);
		createVertRingMap(
		            dm->getNumVerts(dm), dm->getEdgeArray(dm), dm->getNumEdges(dm),
		            &sys->ringv_map, &sys->ringv_indices);


		mlooptri = dm->getLoopTriArray(dm);
		mloop = dm->getLoopArray(dm);

		for (i = 0; i < sys->total_tris; i++) {
			sys->tris[i][0] = mloop[mlooptri[i].tri[0]].v;
			sys->tris[i][1] = mloop[mlooptri[i].tri[1]].v;
			sys->tris[i][2] = mloop[mlooptri[i].tri[2]].v;
		}
	}
}
Пример #4
0
extern void __interrupt _c_int00()
{
   int preInitStatus;

   STACK_INIT();
   
   /*------------------------------------------------------------------------*/
   /* Call hook configured into Startup_resetFxn                             */
   /*------------------------------------------------------------------------*/
   if (&xdc_runtime_Startup__RESETFXN__C == (int*)1) {
      xdc_runtime_Startup_reset__I();
   }

   /*------------------------------------------------------------------------*/
   /* Allow for any application-specific low level initialization prior to   */
   /* initializing the C/C++ environment (global variable initialization,    */
   /* constructers).  If _system_pre_init() returns 0, then bypass C/C++     */
   /* initialization.  NOTE: BYPASSING THE CALL TO THE C/C++ INITIALIZATION  */
   /* ROUTINE MAY RESULT IN PROGRAM FAILURE.                                 */
   /*------------------------------------------------------------------------*/
   preInitStatus = _system_pre_init(); /* moved here to allow clear of .bss */

   INIT_EXIT_PTRS();
   INIT_LOCKS();

   if (preInitStatus != 0) _auto_init();

   /*------------------------------------------------------------------------*/
   /* Handle any argc/argv arguments if supported by an MSP430 loader.       */
   /*------------------------------------------------------------------------*/
   _args_main();

   exit(1);
}
Пример #5
0
yaml_parser_load(yaml_parser_t *parser, yaml_document_t *document)
{
    yaml_event_t event;

    assert(parser);     /* Non-NULL parser object is expected. */
    assert(document);   /* Non-NULL document object is expected. */

    memset(document, 0, sizeof(yaml_document_t));
    if (!STACK_INIT(parser, document->nodes, INITIAL_STACK_SIZE))
        goto error;

    if (!parser->stream_start_produced) {
        if (!yaml_parser_parse(parser, &event)) goto error;
        assert(event.type == YAML_STREAM_START_EVENT);
                        /* STREAM-START is expected. */
    }

    if (parser->stream_end_produced) {
        return 1;
    }

    if (!yaml_parser_parse(parser, &event)) goto error;
    if (event.type == YAML_STREAM_END_EVENT) {
        return 1;
    }

    if (!STACK_INIT(parser, parser->aliases, INITIAL_STACK_SIZE))
        goto error;

    parser->document = document;

    if (!yaml_parser_load_document(parser, &event)) goto error;

    yaml_parser_delete_aliases(parser);
    parser->document = NULL;

    return 1;

error:

    yaml_parser_delete_aliases(parser);
    yaml_document_delete(document);
    parser->document = NULL;

    return 0;
}
Пример #6
0
static int
yaml_parser_load_sequence(yaml_parser_t *parser, yaml_event_t *first_event)
{
    yaml_event_t event;
    yaml_node_t node;
    struct {
        yaml_node_item_t *start;
        yaml_node_item_t *end;
        yaml_node_item_t *top;
    } items = { NULL, NULL, NULL };
    int index, item_index;
    yaml_char_t *tag = first_event->data.sequence_start.tag;

    if (!STACK_LIMIT(parser, parser->document->nodes, INT_MAX-1)) goto error;

    if (!tag || strcmp((char *)tag, "!") == 0) {
        yaml_free(tag);
        tag = yaml_strdup((yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG);
        if (!tag) goto error;
    }

    if (!STACK_INIT(parser, items, INITIAL_STACK_SIZE)) goto error;

    SEQUENCE_NODE_INIT(node, tag, items.start, items.end,
            first_event->data.sequence_start.style,
            first_event->start_mark, first_event->end_mark);

    if (!PUSH(parser, parser->document->nodes, node)) goto error;

    index = (int)(parser->document->nodes.top - parser->document->nodes.start);

    if (!yaml_parser_register_anchor(parser, index,
                first_event->data.sequence_start.anchor)) return 0;

    if (!yaml_parser_parse(parser, &event)) return 0;

    while (event.type != YAML_SEQUENCE_END_EVENT) {
        if (!STACK_LIMIT(parser,
                    parser->document->nodes.start[index-1].data.sequence.items,
                    INT_MAX-1)) return 0;
        item_index = yaml_parser_load_node(parser, &event);
        if (!item_index) return 0;
        if (!PUSH(parser,
                    parser->document->nodes.start[index-1].data.sequence.items,
                    item_index)) return 0;
        if (!yaml_parser_parse(parser, &event)) return 0;
    }

    parser->document->nodes.start[index-1].end_mark = event.end_mark;

    return index;

error:
    yaml_free(tag);
    yaml_free(first_event->data.sequence_start.anchor);
    return 0;
}
Пример #7
0
static int
yaml_parser_load_mapping(yaml_parser_t *parser, yaml_event_t *first_event)
{
    yaml_event_t event;
    yaml_node_t node;
    struct {
        yaml_node_pair_t *start;
        yaml_node_pair_t *end;
        yaml_node_pair_t *top;
    } pairs = { NULL, NULL, NULL };
    int index;
    yaml_node_pair_t pair;
    yaml_char_t *tag = first_event->data.mapping_start.tag;

    if (!tag || strcmp((char *)tag, "!") == 0) {
        yaml_free(tag);
        tag = yaml_strdup((yaml_char_t *)YAML_DEFAULT_MAPPING_TAG);
        if (!tag) goto error;
    }

    if (!STACK_INIT(parser, pairs, INITIAL_STACK_SIZE)) goto error;

    MAPPING_NODE_INIT(node, tag, pairs.start, pairs.end,
            first_event->data.mapping_start.style,
            first_event->start_mark, first_event->end_mark);

    if (!PUSH(parser, parser->document->nodes, node)) goto error;

    index = (int) (parser->document->nodes.top - parser->document->nodes.start);

    if (!yaml_parser_register_anchor(parser, index,
                first_event->data.mapping_start.anchor)) return 0;

    if (!yaml_parser_parse(parser, &event)) return 0;

    while (event.type != YAML_MAPPING_END_EVENT) {
        pair.key = yaml_parser_load_node(parser, &event);
        if (!pair.key) return 0;
        if (!yaml_parser_parse(parser, &event)) return 0;
        pair.value = yaml_parser_load_node(parser, &event);
        if (!pair.value) return 0;
        if (!PUSH(parser,
                    parser->document->nodes.start[index-1].data.mapping.pairs,
                    pair)) return 0;
        if (!yaml_parser_parse(parser, &event)) return 0;
    }

    parser->document->nodes.start[index-1].end_mark = event.end_mark;

    return index;

error:
    yaml_free(tag);
    yaml_free(first_event->data.mapping_start.anchor);
    return 0;
}
Пример #8
0
/*  Api_WMIInitFinish - implements common code for sending default wmi commands 
 *                      to target.
 *	 This should be called after Api_InitFinish().
 *      A_VOID *pCxt - the driver context.    
 *****************************************************************************/
A_VOID 
Api_WMIInitFinish(A_VOID *pCxt)
{
    A_DRIVER_CONTEXT *pDCxt = GET_DRIVER_COMMON(pCxt);
    A_STATUS status;    
    WMI_ALLOW_AGGR_CMD allow_aggr_cmd;
        
    if(pDCxt->wmiReady == A_TRUE)
    {
        do{
            status = STACK_INIT(pCxt);
            
            if(status == A_OK){
                CUSTOM_WAIT_FOR_WMI_RESPONSE(pCxt);              
                break;            
            }else if(status == A_NO_MEMORY){
                pDCxt->tx_complete_pend = A_TRUE;
                
                if(A_OK != CUSTOM_DRIVER_WAIT_FOR_CONDITION(pCxt, &(pDCxt->tx_complete_pend), A_FALSE, 5000)){
                    A_ASSERT(0);
                }
            }else{
                A_ASSERT(0);
            }
        }while(1);       

      
      /* issue some default WMI commands appropriate for most systems */
#if WLAN_CONFIG_IGNORE_POWER_SAVE_FAIL_EVENT_DURING_SCAN
        wmi_cmd_process(pCxt, WMI_SET_POWER_PARAMS_CMDID, &default_power_param, sizeof(WMI_POWER_PARAMS_CMD));    
#endif
        wmi_cmd_process(pCxt, WMI_SET_SCAN_PARAMS_CMDID, &default_scan_param, sizeof(WMI_SCAN_PARAMS_CMD));
        
        wmi_cmd_process(pCxt, WMI_STORERECALL_CONFIGURE_CMDID, 
            &default_strrcl_config_cmd, sizeof(WMI_STORERECALL_CONFIGURE_CMD));                     
        pDCxt->strrclState = STRRCL_ST_INIT;
        /* technically this call to wmi_allow_aggr_cmd is not necessary if both 
         * masks are 0 as the firmware has 0,0 as the default. */
        allow_aggr_cmd.tx_allow_aggr = A_CPU2LE16(pDCxt->txAggrTidMask);
        allow_aggr_cmd.rx_allow_aggr = A_CPU2LE16(pDCxt->rxAggrTidMask);
        wmi_cmd_process(pCxt, WMI_ALLOW_AGGR_CMDID, &allow_aggr_cmd, sizeof(WMI_ALLOW_AGGR_CMD));      
#if ENABLE_P2P_MODE
        if(WLAN_NUM_OF_DEVICES == 2) {
            /* Device-0 is P2P Device and Device-1 is Legacy STA.\
               Set Default P2P Params 
             */
            wmi_cmd_process(pCxt,WMI_P2P_SET_CONFIG_CMDID,&default_p2p_config,sizeof(WMI_P2P_FW_SET_CONFIG_CMD));                           
        }
#endif
        /* Set the BSS Filter to None. If this is not set, by default the firmware 
       sets to forward the beacons to host. This causes unnecessary BSSINFO events in 
       the host even after connecting to the AP */
        wmi_bssfilter_cmd(pDCxt->pWmiCxt, NONE_BSS_FILTER, 0);
    }
}
Пример #9
0
static void gc_start(void) {
    scm_val v = NIL, *p ;

    STACK_INIT() ;

    for (p = stack_start; p != (scm_val *)&p; p += stack_dir)
        if (IN_RANGE(*p)) GRAY(*p) ;

    FOREACH(v, roots) {
        scm_val r = *(scm_val *)(CAR(v).p) ;
        v.c->flags = FL_GC_BLACK ;
        if (PTR_AND_NO_FLAG(r, FL_GC_GRAY)) GRAY(r) ;
    }
Пример #10
0
yaml_parser_initialize(yaml_parser_t *parser)
{
    assert(parser);     /* Non-NULL parser object expected. */

    memset(parser, 0, sizeof(yaml_parser_t));
    if (!BUFFER_INIT(parser, parser->raw_buffer, INPUT_RAW_BUFFER_SIZE))
        goto error;
    if (!BUFFER_INIT(parser, parser->buffer, INPUT_BUFFER_SIZE))
        goto error;
    if (!QUEUE_INIT(parser, parser->tokens, INITIAL_QUEUE_SIZE))
        goto error;
    if (!STACK_INIT(parser, parser->indents, INITIAL_STACK_SIZE))
        goto error;
    if (!STACK_INIT(parser, parser->simple_keys, INITIAL_STACK_SIZE))
        goto error;
    if (!STACK_INIT(parser, parser->states, INITIAL_STACK_SIZE))
        goto error;
    if (!STACK_INIT(parser, parser->marks, INITIAL_STACK_SIZE))
        goto error;
    if (!STACK_INIT(parser, parser->tag_directives, INITIAL_STACK_SIZE))
        goto error;

    return 1;

error:

    BUFFER_DEL(parser, parser->raw_buffer);
    BUFFER_DEL(parser, parser->buffer);
    QUEUE_DEL(parser, parser->tokens);
    STACK_DEL(parser, parser->indents);
    STACK_DEL(parser, parser->simple_keys);
    STACK_DEL(parser, parser->states);
    STACK_DEL(parser, parser->marks);
    STACK_DEL(parser, parser->tag_directives);

    return 0;
}
Пример #11
0
extern void __interrupt _c_int00_noinit_noexit()
{
   STACK_INIT();

   /*------------------------------------------------------------------------*/
   /* Call hook configured into Startup_resetFxn                             */
   /*------------------------------------------------------------------------*/
   if (&xdc_runtime_Startup__RESETFXN__C == (int*)1) {
      xdc_runtime_Startup_reset__I();
   }

   _system_pre_init(); /* moved here to allow clear of .bss */

   INIT_LOCKS();
   main(0);
   abort();
}
Пример #12
0
extern void __interrupt _c_int00_noinit_mpu_init()
{
   MPU_INIT();
   STACK_INIT();

   /*------------------------------------------------------------------------*/
   /* Call hook configured into Startup_resetFxn                             */
   /*------------------------------------------------------------------------*/
   if (&xdc_runtime_Startup__RESETFXN__C == (int*)1) {
      xdc_runtime_Startup_reset__I();
   }

   _system_pre_init(); /* moved here to allow clear of .bss */
   
   INIT_EXIT_PTRS();
   INIT_LOCKS();
   _args_main();
   exit(1);
}
Пример #13
0
extern void __interrupt _c_int00_noexit_mpu_init()
{
   int preInitStatus;

   MPU_INIT();
   STACK_INIT();

   /*------------------------------------------------------------------------*/
   /* Call hook configured into Startup_resetFxn                             */
   /*------------------------------------------------------------------------*/
   if (&xdc_runtime_Startup__RESETFXN__C == (int*)1) {
      xdc_runtime_Startup_reset__I();
   }

   preInitStatus = _system_pre_init(); /* moved here to allow clear of .bss */

   INIT_LOCKS();
   if (preInitStatus != 0)  _auto_init();
   main(0);
   abort();
}
Пример #14
0
yaml_document_add_mapping(yaml_document_t *document,
        yaml_char_t *tag, yaml_mapping_style_t style)
{
    struct {
        yaml_error_type_t error;
    } context;
    yaml_mark_t mark = { 0, 0, 0 };
    yaml_char_t *tag_copy = NULL;
    struct {
        yaml_node_pair_t *start;
        yaml_node_pair_t *end;
        yaml_node_pair_t *top;
    } pairs = { NULL, NULL, NULL };
    yaml_node_t node;

    assert(document);   /* Non-NULL document object is expected. */

    if (!tag) {
        tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG;
    }

    if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error;
    tag_copy = yaml_strdup(tag);
    if (!tag_copy) goto error;

    if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error;

    MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end,
            style, mark, mark);
    if (!PUSH(&context, document->nodes, node)) goto error;

    return document->nodes.top - document->nodes.start;

error:
    STACK_DEL(&context, pairs);
    yaml_free(tag_copy);

    return 0;
}
Пример #15
0
extern void __interrupt _c_int00()
{
   STACK_INIT();

 //  INIT_EXIT_PTRS();
  // INIT_LOCKS();

   /*------------------------------------------------------------------------*/
   /* Allow for any application-specific low level initialization prior to   */
   /* initializing the C/C++ environment (global variable initialization,    */
   /* constructers).  If _system_pre_init() returns 0, then bypass C/C++     */
   /* initialization.  NOTE: BYPASSING THE CALL TO THE C/C++ INITIALIZATION  */
   /* ROUTINE MAY RESULT IN PROGRAM FAILURE.                                 */
   /*------------------------------------------------------------------------*/
//   if(_system_pre_init() != 0)  _auto_init();

   /*------------------------------------------------------------------------*/
   /* Handle any argc/argv arguments if supported by an MSP430 loader.       */
   /*------------------------------------------------------------------------*/
   main_boot();

}
Пример #16
0
extern void __interrupt _c_int00_noargs()
{
   int preInitStatus;

   CLINK_DIRECTIVE();
   STACK_INIT();

   /*------------------------------------------------------------------------*/
   /* Call hook configured into Startup_resetFxn                             */
   /*------------------------------------------------------------------------*/
   if (&xdc_runtime_Startup__RESETFXN__C == (int*)1) {
      xdc_runtime_Startup_reset__I();
   }

   preInitStatus = _system_pre_init(); /* moved here to allow clear of .bss */

   INIT_EXIT_PTRS();
   INIT_LOCKS();

   if (preInitStatus != 0) _auto_init();

   xdc_runtime_System_exit__E(main(0));
}
Пример #17
0
static DerivedMesh *applyModifier(
        ModifierData *md, Object *ob,
        DerivedMesh *dm,
        ModifierApplyFlag UNUSED(flag))
{
	DerivedMesh *result;
	const SolidifyModifierData *smd = (SolidifyModifierData *) md;

	MVert *mv, *mvert, *orig_mvert;
	MEdge *ed, *medge, *orig_medge;
	MLoop *ml, *mloop, *orig_mloop;
	MPoly *mp, *mpoly, *orig_mpoly;
	const unsigned int numVerts = (unsigned int)dm->getNumVerts(dm);
	const unsigned int numEdges = (unsigned int)dm->getNumEdges(dm);
	const unsigned int numFaces = (unsigned int)dm->getNumPolys(dm);
	const unsigned int numLoops = (unsigned int)dm->getNumLoops(dm);
	unsigned int newLoops = 0, newFaces = 0, newEdges = 0, newVerts = 0, rimVerts = 0;

	/* only use material offsets if we have 2 or more materials  */
	const short mat_nr_max = ob->totcol > 1 ? ob->totcol - 1 : 0;
	const short mat_ofs = mat_nr_max ? smd->mat_ofs : 0;
	const short mat_ofs_rim = mat_nr_max ? smd->mat_ofs_rim : 0;

	/* use for edges */
	/* over-alloc new_vert_arr, old_vert_arr */
	unsigned int *new_vert_arr = NULL;
	STACK_DECLARE(new_vert_arr);

	unsigned int *new_edge_arr = NULL;
	STACK_DECLARE(new_edge_arr);

	unsigned int *old_vert_arr = MEM_callocN(sizeof(*old_vert_arr) * (size_t)numVerts, "old_vert_arr in solidify");

	unsigned int *edge_users = NULL;
	char *edge_order = NULL;

	float (*vert_nors)[3] = NULL;
	float (*face_nors)[3] = NULL;

	const bool need_face_normals = (smd->flag & MOD_SOLIDIFY_NORMAL_CALC) || (smd->flag & MOD_SOLIDIFY_EVEN);

	const float ofs_orig = -(((-smd->offset_fac + 1.0f) * 0.5f) * smd->offset);
	const float ofs_new  = smd->offset + ofs_orig;
	const float offset_fac_vg = smd->offset_fac_vg;
	const float offset_fac_vg_inv = 1.0f - smd->offset_fac_vg;
	const bool do_flip = (smd->flag & MOD_SOLIDIFY_FLIP) != 0;
	const bool do_clamp = (smd->offset_clamp != 0.0f);
	const bool do_shell = ((smd->flag & MOD_SOLIDIFY_RIM) && (smd->flag & MOD_SOLIDIFY_NOSHELL)) == 0;

	/* weights */
	MDeformVert *dvert;
	const bool defgrp_invert = (smd->flag & MOD_SOLIDIFY_VGROUP_INV) != 0;
	int defgrp_index;

	/* array size is doubled in case of using a shell */
	const unsigned int stride = do_shell ? 2 : 1;

	modifier_get_vgroup(ob, dm, smd->defgrp_name, &dvert, &defgrp_index);

	orig_mvert = dm->getVertArray(dm);
	orig_medge = dm->getEdgeArray(dm);
	orig_mloop = dm->getLoopArray(dm);
	orig_mpoly = dm->getPolyArray(dm);

	if (need_face_normals) {
		/* calculate only face normals */
		face_nors = MEM_mallocN(sizeof(*face_nors) * (size_t)numFaces, __func__);
		BKE_mesh_calc_normals_poly(
		            orig_mvert, NULL, (int)numVerts,
		            orig_mloop, orig_mpoly,
		            (int)numLoops, (int)numFaces,
		            face_nors, true);
	}

	STACK_INIT(new_vert_arr, numVerts * 2);
	STACK_INIT(new_edge_arr, numEdges * 2);

	if (smd->flag & MOD_SOLIDIFY_RIM) {
		BLI_bitmap *orig_mvert_tag = BLI_BITMAP_NEW(numVerts, __func__);
		unsigned int eidx;
		unsigned int i;

#define INVALID_UNUSED ((unsigned int)-1)
#define INVALID_PAIR ((unsigned int)-2)

		new_vert_arr = MEM_mallocN(sizeof(*new_vert_arr) * (size_t)(numVerts * 2), __func__);
		new_edge_arr = MEM_mallocN(sizeof(*new_edge_arr) * (size_t)((numEdges * 2) + numVerts), __func__);

		edge_users = MEM_mallocN(sizeof(*edge_users) * (size_t)numEdges, "solid_mod edges");
		edge_order = MEM_mallocN(sizeof(*edge_order) * (size_t)numEdges, "solid_mod eorder");


		/* save doing 2 loops here... */
#if 0
		copy_vn_i(edge_users, numEdges, INVALID_UNUSED);
#endif

		for (eidx = 0, ed = orig_medge; eidx < numEdges; eidx++, ed++) {
			edge_users[eidx] = INVALID_UNUSED;
		}

		for (i = 0, mp = orig_mpoly; i < numFaces; i++, mp++) {
			MLoop *ml_prev;
			int j;

			ml = orig_mloop + mp->loopstart;
			ml_prev = ml + (mp->totloop - 1);

			for (j = 0; j < mp->totloop; j++, ml++) {
				/* add edge user */
				eidx = ml_prev->e;
				if (edge_users[eidx] == INVALID_UNUSED) {
					ed = orig_medge + eidx;
					BLI_assert(ELEM(ml_prev->v,    ed->v1, ed->v2) &&
					           ELEM(ml->v, ed->v1, ed->v2));
					edge_users[eidx] = (ml_prev->v > ml->v) == (ed->v1 < ed->v2) ? i : (i + numFaces);
					edge_order[eidx] = j;
				}
				else {
					edge_users[eidx] = INVALID_PAIR;
				}
				ml_prev = ml;
			}
		}

		for (eidx = 0, ed = orig_medge; eidx < numEdges; eidx++, ed++) {
			if (!ELEM(edge_users[eidx], INVALID_UNUSED, INVALID_PAIR)) {
				BLI_BITMAP_ENABLE(orig_mvert_tag, ed->v1);
				BLI_BITMAP_ENABLE(orig_mvert_tag, ed->v2);
				STACK_PUSH(new_edge_arr, eidx);
				newFaces++;
				newLoops += 4;
			}
		}

		for (i = 0; i < numVerts; i++) {
			if (BLI_BITMAP_TEST(orig_mvert_tag, i)) {
				old_vert_arr[i] = STACK_SIZE(new_vert_arr);
				STACK_PUSH(new_vert_arr, i);
				rimVerts++;
			}
			else {
				old_vert_arr[i] = INVALID_UNUSED;
			}
		}

		MEM_freeN(orig_mvert_tag);
	}

	if (do_shell == false) {
		/* only add rim vertices */
		newVerts = rimVerts;
		/* each extruded face needs an opposite edge */
		newEdges = newFaces;
	}
	else {
		/* (stride == 2) in this case, so no need to add newVerts/newEdges */
		BLI_assert(newVerts == 0);
		BLI_assert(newEdges == 0);
	}

	if (smd->flag & MOD_SOLIDIFY_NORMAL_CALC) {
		vert_nors = MEM_callocN(sizeof(float) * (size_t)numVerts * 3, "mod_solid_vno_hq");
		dm_calc_normal(dm, face_nors, vert_nors);
	}

	result = CDDM_from_template(dm,
	                            (int)((numVerts * stride) + newVerts),
	                            (int)((numEdges * stride) + newEdges + rimVerts), 0,
	                            (int)((numLoops * stride) + newLoops),
	                            (int)((numFaces * stride) + newFaces));

	mpoly = CDDM_get_polys(result);
	mloop = CDDM_get_loops(result);
	medge = CDDM_get_edges(result);
	mvert = CDDM_get_verts(result);

	if (do_shell) {
		DM_copy_vert_data(dm, result, 0, 0, (int)numVerts);
		DM_copy_vert_data(dm, result, 0, (int)numVerts, (int)numVerts);

		DM_copy_edge_data(dm, result, 0, 0, (int)numEdges);
		DM_copy_edge_data(dm, result, 0, (int)numEdges, (int)numEdges);

		DM_copy_loop_data(dm, result, 0, 0, (int)numLoops);
		DM_copy_loop_data(dm, result, 0, (int)numLoops, (int)numLoops);

		DM_copy_poly_data(dm, result, 0, 0, (int)numFaces);
		DM_copy_poly_data(dm, result, 0, (int)numFaces, (int)numFaces);
	}
	else {
		int i, j;
		DM_copy_vert_data(dm, result, 0, 0, (int)numVerts);
		for (i = 0, j = (int)numVerts; i < numVerts; i++) {
			if (old_vert_arr[i] != INVALID_UNUSED) {
				DM_copy_vert_data(dm, result, i, j, 1);
				j++;
			}
		}

		DM_copy_edge_data(dm, result, 0, 0, (int)numEdges);

		for (i = 0, j = (int)numEdges; i < numEdges; i++) {
			if (!ELEM(edge_users[i], INVALID_UNUSED, INVALID_PAIR)) {
				MEdge *ed_src, *ed_dst;
				DM_copy_edge_data(dm, result, i, j, 1);

				ed_src = &medge[i];
				ed_dst = &medge[j];
				ed_dst->v1 = old_vert_arr[ed_src->v1] + numVerts;
				ed_dst->v2 = old_vert_arr[ed_src->v2] + numVerts;
				j++;
			}
		}

		/* will be created later */
		DM_copy_loop_data(dm, result, 0, 0, (int)numLoops);
		DM_copy_poly_data(dm, result, 0, 0, (int)numFaces);
	}

#undef INVALID_UNUSED
#undef INVALID_PAIR


	/* initializes: (i_end, do_shell_align, mv)  */
#define INIT_VERT_ARRAY_OFFSETS(test) \
	if (((ofs_new >= ofs_orig) == do_flip) == test) { \
		i_end = numVerts; \
		do_shell_align = true; \
		mv = mvert; \
	} \
	else { \
		if (do_shell) { \
			i_end = numVerts; \
			do_shell_align = true; \
		} \
		else { \
			i_end = newVerts ; \
			do_shell_align = false; \
		} \
		mv = &mvert[numVerts]; \
	} (void)0


	/* flip normals */

	if (do_shell) {
		unsigned int i;

		mp = mpoly + numFaces;
		for (i = 0; i < dm->numPolyData; i++, mp++) {
			MLoop *ml2;
			unsigned int e;
			int j;

			/* reverses the loop direction (MLoop.v as well as custom-data)
			 * MLoop.e also needs to be corrected too, done in a separate loop below. */
			ml2 = mloop + mp->loopstart + dm->numLoopData;
			for (j = 0; j < mp->totloop; j++) {
				CustomData_copy_data(&dm->loopData, &result->loopData, mp->loopstart + j,
				                     mp->loopstart + (mp->totloop - j - 1) + dm->numLoopData, 1);
			}

			if (mat_ofs) {
				mp->mat_nr += mat_ofs;
				CLAMP(mp->mat_nr, 0, mat_nr_max);
			}

			e = ml2[0].e;
			for (j = 0; j < mp->totloop - 1; j++) {
				ml2[j].e = ml2[j + 1].e;
			}
			ml2[mp->totloop - 1].e = e;

			mp->loopstart += dm->numLoopData;

			for (j = 0; j < mp->totloop; j++) {
				ml2[j].e += numEdges;
				ml2[j].v += numVerts;
			}
		}

		for (i = 0, ed = medge + numEdges; i < numEdges; i++, ed++) {
			ed->v1 += numVerts;
			ed->v2 += numVerts;
		}
	}

	/* note, copied vertex layers don't have flipped normals yet. do this after applying offset */
	if ((smd->flag & MOD_SOLIDIFY_EVEN) == 0) {
		/* no even thickness, very simple */
		float scalar_short;
		float scalar_short_vgroup;

		/* for clamping */
		float *vert_lens = NULL;
		const float offset    = fabsf(smd->offset) * smd->offset_clamp;
		const float offset_sq = offset * offset;

		if (do_clamp) {
			unsigned int i;

			vert_lens = MEM_mallocN(sizeof(float) * numVerts, "vert_lens");
			copy_vn_fl(vert_lens, (int)numVerts, FLT_MAX);
			for (i = 0; i < numEdges; i++) {
				const float ed_len_sq = len_squared_v3v3(mvert[medge[i].v1].co, mvert[medge[i].v2].co);
				vert_lens[medge[i].v1] = min_ff(vert_lens[medge[i].v1], ed_len_sq);
				vert_lens[medge[i].v2] = min_ff(vert_lens[medge[i].v2], ed_len_sq);
			}
		}

		if (ofs_new != 0.0f) {
			unsigned int i_orig, i_end;
			bool do_shell_align;

			scalar_short = scalar_short_vgroup = ofs_new / 32767.0f;

			INIT_VERT_ARRAY_OFFSETS(false);

			for (i_orig = 0; i_orig < i_end; i_orig++, mv++) {
				const unsigned int i = do_shell_align ? i_orig : new_vert_arr[i_orig];
				if (dvert) {
					MDeformVert *dv = &dvert[i];
					if (defgrp_invert) scalar_short_vgroup = 1.0f - defvert_find_weight(dv, defgrp_index);
					else scalar_short_vgroup = defvert_find_weight(dv, defgrp_index);
					scalar_short_vgroup = (offset_fac_vg + (scalar_short_vgroup * offset_fac_vg_inv)) * scalar_short;
				}
				if (do_clamp) {
					/* always reset becaise we may have set before */
					if (dvert == NULL) {
						scalar_short_vgroup = scalar_short;
					}
					if (vert_lens[i] < offset_sq) {
						float scalar = sqrtf(vert_lens[i]) / offset;
						scalar_short_vgroup *= scalar;
					}
				}
				madd_v3v3short_fl(mv->co, mv->no, scalar_short_vgroup);
			}
		}

		if (ofs_orig != 0.0f) {
			unsigned int i_orig, i_end;
			bool do_shell_align;

			scalar_short = scalar_short_vgroup = ofs_orig / 32767.0f;

			/* as above but swapped */
			INIT_VERT_ARRAY_OFFSETS(true);

			for (i_orig = 0; i_orig < i_end; i_orig++, mv++) {
				const unsigned int i = do_shell_align ? i_orig : new_vert_arr[i_orig];
				if (dvert) {
					MDeformVert *dv = &dvert[i];
					if (defgrp_invert) scalar_short_vgroup = 1.0f - defvert_find_weight(dv, defgrp_index);
					else scalar_short_vgroup = defvert_find_weight(dv, defgrp_index);
					scalar_short_vgroup = (offset_fac_vg + (scalar_short_vgroup * offset_fac_vg_inv)) * scalar_short;
				}
				if (do_clamp) {
					/* always reset becaise we may have set before */
					if (dvert == NULL) {
						scalar_short_vgroup = scalar_short;
					}
					if (vert_lens[i] < offset_sq) {
						float scalar = sqrtf(vert_lens[i]) / offset;
						scalar_short_vgroup *= scalar;
					}
				}
				madd_v3v3short_fl(mv->co, mv->no, scalar_short_vgroup);
			}
		}

		if (do_clamp) {
			MEM_freeN(vert_lens);
		}
	}
	else {
#ifdef USE_NONMANIFOLD_WORKAROUND
		const bool check_non_manifold = (smd->flag & MOD_SOLIDIFY_NORMAL_CALC) != 0;
#endif
		/* same as EM_solidify() in editmesh_lib.c */
		float *vert_angles = MEM_callocN(sizeof(float) * numVerts * 2, "mod_solid_pair"); /* 2 in 1 */
		float *vert_accum = vert_angles + numVerts;
		unsigned int vidx;
		unsigned int i;

		if (vert_nors == NULL) {
			vert_nors = MEM_mallocN(sizeof(float) * numVerts * 3, "mod_solid_vno");
			for (i = 0, mv = mvert; i < numVerts; i++, mv++) {
				normal_short_to_float_v3(vert_nors[i], mv->no);
			}
		}

		for (i = 0, mp = mpoly; i < numFaces; i++, mp++) {
			/* #BKE_mesh_calc_poly_angles logic is inlined here */
			float nor_prev[3];
			float nor_next[3];

			int i_curr = mp->totloop - 1;
			int i_next = 0;

			ml = &mloop[mp->loopstart];

			sub_v3_v3v3(nor_prev, mvert[ml[i_curr - 1].v].co, mvert[ml[i_curr].v].co);
			normalize_v3(nor_prev);

			while (i_next < mp->totloop) {
				float angle;
				sub_v3_v3v3(nor_next, mvert[ml[i_curr].v].co, mvert[ml[i_next].v].co);
				normalize_v3(nor_next);
				angle = angle_normalized_v3v3(nor_prev, nor_next);


				/* --- not related to angle calc --- */
				if (angle < FLT_EPSILON) {
					angle = FLT_EPSILON;
				}

				vidx = ml[i_curr].v;
				vert_accum[vidx] += angle;

#ifdef USE_NONMANIFOLD_WORKAROUND
				/* skip 3+ face user edges */
				if ((check_non_manifold == false) ||
				    LIKELY(((orig_medge[ml[i_curr].e].flag & ME_EDGE_TMP_TAG) == 0) &&
				           ((orig_medge[ml[i_next].e].flag & ME_EDGE_TMP_TAG) == 0)))
				{
					vert_angles[vidx] += shell_v3v3_normalized_to_dist(vert_nors[vidx], face_nors[i]) * angle;
				}
				else {
					vert_angles[vidx] += angle;
				}
#else
				vert_angles[vidx] += shell_v3v3_normalized_to_dist(vert_nors[vidx], face_nors[i]) * angle;
#endif
				/* --- end non-angle-calc section --- */


				/* step */
				copy_v3_v3(nor_prev, nor_next);
				i_curr = i_next;
				i_next++;
			}
		}

		/* vertex group support */
		if (dvert) {
			MDeformVert *dv = dvert;
			float scalar;

			if (defgrp_invert) {
				for (i = 0; i < numVerts; i++, dv++) {
					scalar = 1.0f - defvert_find_weight(dv, defgrp_index);
					scalar = offset_fac_vg + (scalar * offset_fac_vg_inv);
					vert_angles[i] *= scalar;
				}
			}
			else {
				for (i = 0; i < numVerts; i++, dv++) {
					scalar = defvert_find_weight(dv, defgrp_index);
					scalar = offset_fac_vg + (scalar * offset_fac_vg_inv);
					vert_angles[i] *= scalar;
				}
			}
		}

		if (do_clamp) {
			float *vert_lens_sq = MEM_mallocN(sizeof(float) * numVerts, "vert_lens");
			const float offset    = fabsf(smd->offset) * smd->offset_clamp;
			const float offset_sq = offset * offset;
			copy_vn_fl(vert_lens_sq, (int)numVerts, FLT_MAX);
			for (i = 0; i < numEdges; i++) {
				const float ed_len = len_squared_v3v3(mvert[medge[i].v1].co, mvert[medge[i].v2].co);
				vert_lens_sq[medge[i].v1] = min_ff(vert_lens_sq[medge[i].v1], ed_len);
				vert_lens_sq[medge[i].v2] = min_ff(vert_lens_sq[medge[i].v2], ed_len);
			}
			for (i = 0; i < numVerts; i++) {
				if (vert_lens_sq[i] < offset_sq) {
					float scalar = sqrtf(vert_lens_sq[i]) / offset;
					vert_angles[i] *= scalar;
				}
			}
			MEM_freeN(vert_lens_sq);
		}

		if (ofs_new != 0.0f) {
			unsigned int i_orig, i_end;
			bool do_shell_align;

			INIT_VERT_ARRAY_OFFSETS(false);

			for (i_orig = 0; i_orig < i_end; i_orig++, mv++) {
				const unsigned int i_other = do_shell_align ? i_orig : new_vert_arr[i_orig];
				if (vert_accum[i_other]) { /* zero if unselected */
					madd_v3_v3fl(mv->co, vert_nors[i_other], ofs_new * (vert_angles[i_other] / vert_accum[i_other]));
				}
			}
		}

		if (ofs_orig != 0.0f) {
			unsigned int i_orig, i_end;
			bool do_shell_align;

			/* same as above but swapped, intentional use of 'ofs_new' */
			INIT_VERT_ARRAY_OFFSETS(true);

			for (i_orig = 0; i_orig < i_end; i_orig++, mv++) {
				const unsigned int i_other = do_shell_align ? i_orig : new_vert_arr[i_orig];
				if (vert_accum[i_other]) { /* zero if unselected */
					madd_v3_v3fl(mv->co, vert_nors[i_other], ofs_orig * (vert_angles[i_other] / vert_accum[i_other]));
				}
			}
		}

		MEM_freeN(vert_angles);
	}

	if (vert_nors)
		MEM_freeN(vert_nors);

	/* must recalculate normals with vgroups since they can displace unevenly [#26888] */
	if ((dm->dirty & DM_DIRTY_NORMALS) || (smd->flag & MOD_SOLIDIFY_RIM) || dvert) {
		result->dirty |= DM_DIRTY_NORMALS;
	}
	else if (do_shell) {
		unsigned int i;
		/* flip vertex normals for copied verts */
		mv = mvert + numVerts;
		for (i = 0; i < numVerts; i++, mv++) {
			negate_v3_short(mv->no);
		}
	}

	if (smd->flag & MOD_SOLIDIFY_RIM) {
		unsigned int i;

		/* bugger, need to re-calculate the normals for the new edge faces.
		 * This could be done in many ways, but probably the quickest way
		 * is to calculate the average normals for side faces only.
		 * Then blend them with the normals of the edge verts.
		 *
		 * at the moment its easiest to allocate an entire array for every vertex,
		 * even though we only need edge verts - campbell
		 */

#define SOLIDIFY_SIDE_NORMALS

#ifdef SOLIDIFY_SIDE_NORMALS
		const bool do_side_normals = !(result->dirty & DM_DIRTY_NORMALS);
		/* annoying to allocate these since we only need the edge verts, */
		float (*edge_vert_nos)[3] = do_side_normals ? MEM_callocN(sizeof(float) * numVerts * 3, __func__) : NULL;
		float nor[3];
#endif
		const unsigned char crease_rim = smd->crease_rim * 255.0f;
		const unsigned char crease_outer = smd->crease_outer * 255.0f;
		const unsigned char crease_inner = smd->crease_inner * 255.0f;

		int *origindex_edge;
		int *orig_ed;
		unsigned int j;

		if (crease_rim || crease_outer || crease_inner) {
			result->cd_flag |= ME_CDFLAG_EDGE_CREASE;
		}

		/* add faces & edges */
		origindex_edge = result->getEdgeDataArray(result, CD_ORIGINDEX);
		ed = &medge[(numEdges * stride) + newEdges];  /* start after copied edges */
		orig_ed = &origindex_edge[(numEdges * stride) + newEdges];
		for (i = 0; i < rimVerts; i++, ed++, orig_ed++) {
			ed->v1 = new_vert_arr[i];
			ed->v2 = (do_shell ? new_vert_arr[i] : i) + numVerts;
			ed->flag |= ME_EDGEDRAW;

			*orig_ed = ORIGINDEX_NONE;

			if (crease_rim) {
				ed->crease = crease_rim;
			}
		}

		/* faces */
		mp = mpoly + (numFaces * stride);
		ml = mloop + (numLoops * stride);
		j = 0;
		for (i = 0; i < newFaces; i++, mp++) {
			unsigned int eidx = new_edge_arr[i];
			unsigned int fidx = edge_users[eidx];
			int k1, k2;
			bool flip;

			if (fidx >= numFaces) {
				fidx -= numFaces;
				flip = true;
			}
			else {
				flip = false;
			}

			ed = medge + eidx;

			/* copy most of the face settings */
			DM_copy_poly_data(dm, result, (int)fidx, (int)((numFaces * stride) + i), 1);
			mp->loopstart = (int)(j + (numLoops * stride));
			mp->flag = mpoly[fidx].flag;

			/* notice we use 'mp->totloop' which is later overwritten,
			 * we could lookup the original face but theres no point since this is a copy
			 * and will have the same value, just take care when changing order of assignment */
			k1 = mpoly[fidx].loopstart + (((edge_order[eidx] - 1) + mp->totloop) % mp->totloop);  /* prev loop */
			k2 = mpoly[fidx].loopstart +   (edge_order[eidx]);

			mp->totloop = 4;

			CustomData_copy_data(&dm->loopData, &result->loopData, k2, (int)((numLoops * stride) + j + 0), 1);
			CustomData_copy_data(&dm->loopData, &result->loopData, k1, (int)((numLoops * stride) + j + 1), 1);
			CustomData_copy_data(&dm->loopData, &result->loopData, k1, (int)((numLoops * stride) + j + 2), 1);
			CustomData_copy_data(&dm->loopData, &result->loopData, k2, (int)((numLoops * stride) + j + 3), 1);

			if (flip == false) {
				ml[j].v = ed->v1;
				ml[j++].e = eidx;

				ml[j].v = ed->v2;
				ml[j++].e = (numEdges * stride) + old_vert_arr[ed->v2] + newEdges;

				ml[j].v = (do_shell ? ed->v2 : old_vert_arr[ed->v2]) + numVerts;
				ml[j++].e = (do_shell ? eidx : i) + numEdges;

				ml[j].v = (do_shell ? ed->v1 : old_vert_arr[ed->v1]) + numVerts;
				ml[j++].e = (numEdges * stride) + old_vert_arr[ed->v1] + newEdges;
			}
			else {
				ml[j].v = ed->v2;
				ml[j++].e = eidx;

				ml[j].v = ed->v1;
				ml[j++].e = (numEdges * stride) + old_vert_arr[ed->v1] + newEdges;

				ml[j].v = (do_shell ? ed->v1 : old_vert_arr[ed->v1]) + numVerts;
				ml[j++].e = (do_shell ? eidx : i) + numEdges;

				ml[j].v = (do_shell ? ed->v2 : old_vert_arr[ed->v2]) + numVerts;
				ml[j++].e = (numEdges * stride) + old_vert_arr[ed->v2] + newEdges;
			}

			origindex_edge[ml[j - 3].e] = ORIGINDEX_NONE;
			origindex_edge[ml[j - 1].e] = ORIGINDEX_NONE;

			/* use the next material index if option enabled */
			if (mat_ofs_rim) {
				mp->mat_nr += mat_ofs_rim;
				CLAMP(mp->mat_nr, 0, mat_nr_max);
			}
			if (crease_outer) {
				/* crease += crease_outer; without wrapping */
				char *cr = &(ed->crease);
				int tcr = *cr + crease_outer;
				*cr = tcr > 255 ? 255 : tcr;
			}

			if (crease_inner) {
				/* crease += crease_inner; without wrapping */
				char *cr = &(medge[numEdges + (do_shell ? eidx : i)].crease);
				int tcr = *cr + crease_inner;
				*cr = tcr > 255 ? 255 : tcr;
			}

#ifdef SOLIDIFY_SIDE_NORMALS
			if (do_side_normals) {
				normal_quad_v3(nor,
				               mvert[ml[j - 4].v].co,
				               mvert[ml[j - 3].v].co,
				               mvert[ml[j - 2].v].co,
				               mvert[ml[j - 1].v].co);

				add_v3_v3(edge_vert_nos[ed->v1], nor);
				add_v3_v3(edge_vert_nos[ed->v2], nor);
			}
#endif
		}

#ifdef SOLIDIFY_SIDE_NORMALS
		if (do_side_normals) {
			ed = medge + (numEdges * stride);
			for (i = 0; i < rimVerts; i++, ed++) {
				float nor_cpy[3];
				short *nor_short;
				int k;

				/* note, only the first vertex (lower half of the index) is calculated */
				normalize_v3_v3(nor_cpy, edge_vert_nos[ed->v1]);

				for (k = 0; k < 2; k++) { /* loop over both verts of the edge */
					nor_short = mvert[*(&ed->v1 + k)].no;
					normal_short_to_float_v3(nor, nor_short);
					add_v3_v3(nor, nor_cpy);
					normalize_v3(nor);
					normal_float_to_short_v3(nor_short, nor);
				}
			}

			MEM_freeN(edge_vert_nos);
		}
#endif

		MEM_freeN(new_vert_arr);
		MEM_freeN(new_edge_arr);

		MEM_freeN(edge_users);
		MEM_freeN(edge_order);
	}

	if (old_vert_arr)
		MEM_freeN(old_vert_arr);

	if (face_nors)
		MEM_freeN(face_nors);

	if (numFaces == 0 && numEdges != 0) {
		modifier_setError(md, "Faces needed for useful output");
	}

	return result;
}
Пример #18
0
static int
yaml_parser_process_directives(yaml_parser_t *parser,
                               yaml_version_directive_t **version_directive_ref,
                               yaml_tag_directive_t **tag_directives_start_ref,
                               yaml_tag_directive_t **tag_directives_end_ref)
{
    yaml_tag_directive_t default_tag_directives[] = {
        {(yaml_char_t *)"!", (yaml_char_t *)"!"},
        {(yaml_char_t *)"!!", (yaml_char_t *)"tag:yaml.org,2002:"},
        {NULL, NULL}
    };
    yaml_tag_directive_t *default_tag_directive;
    yaml_version_directive_t *version_directive = NULL;
    struct {
        yaml_tag_directive_t *start;
        yaml_tag_directive_t *end;
        yaml_tag_directive_t *top;
    } tag_directives = { NULL, NULL, NULL };
    yaml_token_t *token;

    if (!STACK_INIT(parser, tag_directives, INITIAL_STACK_SIZE))
        goto error;

    token = PEEK_TOKEN(parser);
    if (!token) goto error;

    while (token->type == YAML_VERSION_DIRECTIVE_TOKEN ||
            token->type == YAML_TAG_DIRECTIVE_TOKEN)
    {
        if (token->type == YAML_VERSION_DIRECTIVE_TOKEN) {
            if (version_directive) {
                yaml_parser_set_parser_error(parser,
                                             "found duplicate %YAML directive", token->start_mark);
                goto error;
            }
            if (token->data.version_directive.major != 1
                    || token->data.version_directive.minor != 1) {
                yaml_parser_set_parser_error(parser,
                                             "found incompatible YAML document", token->start_mark);
                goto error;
            }
            version_directive = yaml_malloc(sizeof(yaml_version_directive_t));
            if (!version_directive) {
                parser->error = YAML_MEMORY_ERROR;
                goto error;
            }
            version_directive->major = token->data.version_directive.major;
            version_directive->minor = token->data.version_directive.minor;
        }

        else if (token->type == YAML_TAG_DIRECTIVE_TOKEN) {
            yaml_tag_directive_t value;
            value.handle = token->data.tag_directive.handle;
            value.prefix = token->data.tag_directive.prefix;

            if (!yaml_parser_append_tag_directive(parser, value, 0,
                                                  token->start_mark))
                goto error;
            if (!PUSH(parser, tag_directives, value))
                goto error;
        }

        SKIP_TOKEN(parser);
        token = PEEK_TOKEN(parser);
        if (!token) goto error;
    }

    for (default_tag_directive = default_tag_directives;
            default_tag_directive->handle; default_tag_directive++) {
        if (!yaml_parser_append_tag_directive(parser, *default_tag_directive, 1,
                                              token->start_mark))
            goto error;
    }

    if (version_directive_ref) {
        *version_directive_ref = version_directive;
    }
    if (tag_directives_start_ref) {
        if (STACK_EMPTY(parser, tag_directives)) {
            *tag_directives_start_ref = *tag_directives_end_ref = NULL;
            STACK_DEL(parser, tag_directives);
        }
        else {
            *tag_directives_start_ref = tag_directives.start;
            *tag_directives_end_ref = tag_directives.top;
        }
    }
    else {
        STACK_DEL(parser, tag_directives);
    }

    return 1;

error:
    yaml_free(version_directive);
    while (!STACK_EMPTY(parser, tag_directives)) {
        yaml_tag_directive_t tag_directive = POP(parser, tag_directives);
        yaml_free(tag_directive.handle);
        yaml_free(tag_directive.prefix);
    }
    STACK_DEL(parser, tag_directives);
    return 0;
}
Пример #19
0
static void run_loaded_program(size_t argc, char** argv, uintptr_t kstack_top)
{
  // copy phdrs to user stack
  size_t stack_top = current.stack_top - current.phdr_size;
  memcpy((void*)stack_top, (void*)current.phdr, current.phdr_size);
  current.phdr = stack_top;

  // copy argv to user stack
  for (size_t i = 0; i < argc; i++) {
    size_t len = strlen((char*)(uintptr_t)argv[i])+1;
    stack_top -= len;
    memcpy((void*)stack_top, (void*)(uintptr_t)argv[i], len);
    argv[i] = (void*)stack_top;
  }

  // copy envp to user stack
  const char* envp[] = {
    // environment goes here
  };
  size_t envc = sizeof(envp) / sizeof(envp[0]);
  for (size_t i = 0; i < envc; i++) {
    size_t len = strlen(envp[i]) + 1;
    stack_top -= len;
    memcpy((void*)stack_top, envp[i], len);
    envp[i] = (void*)stack_top;
  }

  // align stack
  stack_top &= -sizeof(void*);

  struct {
    long key;
    long value;
  } aux[] = {
    {AT_ENTRY, current.entry},
    {AT_PHNUM, current.phnum},
    {AT_PHENT, current.phent},
    {AT_PHDR, current.phdr},
    {AT_PAGESZ, RISCV_PGSIZE},
    {AT_SECURE, 0},
    {AT_RANDOM, stack_top},
    {AT_NULL, 0}
  };

  // place argc, argv, envp, auxp on stack
  #define PUSH_ARG(type, value) do { \
    *((type*)sp) = (type)value; \
    sp += sizeof(type); \
  } while (0)

  #define STACK_INIT(type) do { \
    unsigned naux = sizeof(aux)/sizeof(aux[0]); \
    stack_top -= (1 + argc + 1 + envc + 1 + 2*naux) * sizeof(type); \
    stack_top &= -16; \
    long sp = stack_top; \
    PUSH_ARG(type, argc); \
    for (unsigned i = 0; i < argc; i++) \
      PUSH_ARG(type, argv[i]); \
    PUSH_ARG(type, 0); /* argv[argc] = NULL */ \
    for (unsigned i = 0; i < envc; i++) \
      PUSH_ARG(type, envp[i]); \
    PUSH_ARG(type, 0); /* envp[envc] = NULL */ \
    for (unsigned i = 0; i < naux; i++) { \
      PUSH_ARG(type, aux[i].key); \
      PUSH_ARG(type, aux[i].value); \
    } \
  } while (0)

  STACK_INIT(uintptr_t);

  if (current.cycle0) { // start timer if so requested
    current.time0 = rdtime();
    current.cycle0 = rdcycle();
    current.instret0 = rdinstret();
  }

  trapframe_t tf;
  init_tf(&tf, current.entry, stack_top);
  __clear_cache(0, 0);
  write_csr(sscratch, kstack_top);
  start_user(&tf);
}
Пример #20
0
/*  Strrcl_ChipUpFinish - Utility function to complete the chip_up process.
 *	 when WMI_READY is received from chip this function clears the chipDown
 * 	 boolean and sends the wmi store recall command to the chip so that its
 *	 associated State can be refreshed.
 *      void *pCxt - the driver context.
 *****************************************************************************/
static A_STATUS Strrcl_ChipUpFinish(void *pCxt)
{
    A_DRIVER_CONTEXT *pDCxt = GET_DRIVER_COMMON(pCxt);
    WMI_STORERECALL_RECALL_CMD *pDsetInfo;
    uint32_t dset_info_len;

    if (pDCxt->wmiReady == true)
    {
        switch (pDCxt->strrclState)
        {
            case STRRCL_ST_ACTIVE:
                /* clear the chipDown boolean to allow outside activity to resume */
                pDCxt->chipDown = false;

                if (A_OK != STACK_INIT(pCxt))
                {
                    break;
                }

                pDCxt->strrclState = STRRCL_ST_ACTIVE_B;
            // INTENTIONAL FALL_THRU
            case STRRCL_ST_ACTIVE_B:
/* send recall */
#if 0
		    pEv = (WMI_STORERECALL_STORE_EVENT*)pDCxt->strrclCxt;

            if(A_OK != wmi_storerecall_recall_cmd(pDCxt->pWmiCxt, A_LE2CPU32(pEv->length), &pEv->data[0])){
                break; //try again later this is likely because a previous wmi cmd has not finished.
            }
#elifndef ENABLE_LARGE_DSET

		    setup_host_dset(pCxt);

		    pDsetData = pDCxt->tempStorage;

            if(A_OK != wmi_storerecall_recall_cmd(pDCxt->pWmiCxt, A_LE2CPU32(pDCxt->strrclCxtLen), pDsetData)){
                break; //try again later this is likely because a previous wmi cmd has not finished.
            }
#else
                setup_host_dset(pCxt);

                pDsetInfo = (WMI_STORERECALL_RECALL_CMD *)pDCxt->tempStorage;
                if (pDsetInfo->length == 0)
                {
                    dset_info_len = sizeof(WMI_STORERECALL_RECALL_CMD);
                }
                else
                {
                    dset_info_len = sizeof(WMI_STORERECALL_RECALL_CMD) - sizeof(uint8_t);
                    dset_info_len += sizeof(WMI_STORERECALL_RECALL_DSET) * pDsetInfo->length;
                }

                if (A_OK != wmi_storerecall_recall_cmd(pDCxt->pWmiCxt, dset_info_len, pDsetInfo))
                {
                    break; // try again later this is likely because a previous wmi cmd has not finished.
                }
#endif

                pDCxt->strrclCxt = NULL;
                pDCxt->strrclCxtLen = 0;

                //            pDCxt->strrclState = STRRCL_ST_AWAIT_FW;

                pDCxt->strrclState = STRRCL_ST_INIT;
                pDCxt->strrclBlock = false;

                /* Indicate completion to the custom layer */
                API_STORE_RECALL_EVENT(pCxt);

                /* clear this function from the driver's temporary calls */
                pDCxt->asynchRequest = NULL;
                /* Since we are setting MAX_PERF before going to store-recall we need to set the state
                   after we wakes up */
                restore_power_state(pDCxt, 1); /* POWER_STATE_MOVED_FOR_STRRCL */

                break; // done!!
            default:
                A_ASSERT(0); // should never happen
#if DRIVER_CONFIG_DISABLE_ASSERT
                break;
#endif
        }
    }

    return A_OK;
}
Пример #21
0
/**
//
// Handle thread creation during when address space is available
// and we can recover from faults (from bad user pointers...)
//*/
void rdecl
thread_specret(THREAD *thp) {
	struct _thread_local_storage	*tsp;
	const struct _thread_attr		*attr;
	void							*init_cc;
	uintptr_t						 stack_top;
	uintptr_t						 new_sp;
	int								 verify;

	thp->status = (void *)EFAULT;
	if((attr = thp->args.wa.attr)) {
		//RD_VERIFY_PTR(act, attr, sizeof(*attr));
		//RD_PROBE_INT(act, attr, sizeof(*attr) / sizeof(int));

		// Check for attributes which we do not support.
		// If there is a stack addr there must be a stack size.
		// If there is a stack size it must be at lease PTHREAD_STACK_MIN.
		// If EXPLICIT sched, make sure policy and priority are valid.
		//                    add validation of the sporadic server attributes
		if(attr->__flags & PTHREAD_SCOPE_PROCESS) {
			verify = ENOTSUP;
		} else if((attr->__stackaddr || attr->__stacksize) && attr->__stacksize < PTHREAD_STACK_MIN) {
			verify = EINVAL;
		} else if(attr->__flags & PTHREAD_EXPLICIT_SCHED) {
			verify = kerschedok(thp, attr->__policy, (struct sched_param *)&attr->__param);
		} else {
			verify = EOK;
		}

		if(verify != EOK) {
			lock_kernel();
			thp->status = (void *)verify;
			thp->flags |= (_NTO_TF_KILLSELF | _NTO_TF_ONLYME);
			return;
			// RUSH3: this comes out in loader_exit() but EINTR overridden
		}
	}

	// Check if we need to allocate a stack
	if(!(thp->flags & _NTO_TF_ALLOCED_STACK)) {
		uintptr_t					guardsize = 0;
		unsigned					lazystate = 0;
		unsigned					prealloc  = 0;

		if(attr) {
			// Get the user requested values.
			thp->un.lcl.stackaddr = attr->__stackaddr;
			thp->un.lcl.stacksize = attr->__stacksize;
			if(attr->__stackaddr != NULL &&
			  !WR_PROBE_PTR(thp, thp->un.lcl.stackaddr, thp->un.lcl.stacksize)) {
				lock_kernel();
				thp->status = (void *)EINVAL;
				thp->flags |= (_NTO_TF_KILLSELF | _NTO_TF_ONLYME);
				return;
			}
			guardsize = attr->__guardsize;
			prealloc = attr->__prealloc;
			lazystate = attr->__flags & PTHREAD_NOTLAZYSTACK_MASK;
		}
		if(thp->un.lcl.stacksize == 0) {
			if(__cpu_flags & CPU_FLAG_MMU) {
				thp->un.lcl.stacksize = DEF_VIRTUAL_THREAD_STACKSIZE;
			} else {
				thp->un.lcl.stacksize = DEF_PHYSICAL_THREAD_STACKSIZE;
			}
		}
		if(!thp->un.lcl.stackaddr) {
			lock_kernel();

			if(thp->process->pid != PROCMGR_PID && procmgr.process_stack_code) {
				unspecret_kernel();

				if(thp->state != STATE_STACK) {
					// Must do modification of user address spaces at process time
					struct sigevent		event;

					CRASHCHECK(thp != actives[KERNCPU]);

					event.sigev_notify = SIGEV_PULSE;
					event.sigev_coid = PROCMGR_COID;
					event.sigev_value.sival_int = SYNC_OWNER(thp);
					event.sigev_priority = thp->priority;
					event.sigev_code = procmgr.process_stack_code;

					if(sigevent_proc(&event)) {
						// Pulse failed...
						thp->status = (void *)EAGAIN;
						thp->flags |= (_NTO_TF_KILLSELF | _NTO_TF_ONLYME);
						return;
					}

					// we may not be running after sigevent_proc()
					unready(thp, STATE_STACK);
					thp->prev.thread = (void *)guardsize;
					thp->next.thread = (void *)lazystate;
					thp->status = (void *)prealloc;
				}
				return;
			}

			guardsize = 0;
			if(procmgr_stack_alloc(thp) != EOK) {
				thp->status = (void *)EAGAIN;
				thp->flags |= (_NTO_TF_KILLSELF | _NTO_TF_ONLYME);
				return;
			}
			thp->flags |= _NTO_TF_ALLOCED_STACK;
			unlock_kernel();
			SPECRET_PREEMPT(thp);
		}
	}

	// Inherit or assign a scheduling policy and params.
	if(attr) {
		if(attr->__flags & PTHREAD_MULTISIG_DISALLOW) {
			thp->flags |= _NTO_TF_NOMULTISIG;
		}
		thp->args.wa.exitfunc = attr->__exitfunc;
	}

	// Clear detach state if there is a parent

	// Get the *real* attribute structure pointer - we may have
	// NULL'd out thp->args.wa.attr and then been preempted
	attr = thp->args.wa.real_attr;
	if(thp->join && (!attr || !(attr->__flags & PTHREAD_CREATE_DETACHED))) {
		thp->flags &= ~_NTO_TF_DETACHED;
	}

	// Make thread lookups valid
	lock_kernel();
	vector_flag(&thp->process->threads, thp->tid, 0);
	thp->args.wa.attr = 0;

	if(actives[KERNCPU] != thp) {
		return;
	}

	// Load the necessary registers for the thread to start execution.
	stack_top = STACK_INIT((uintptr_t)thp->un.lcl.stackaddr, thp->un.lcl.stacksize);
	STACK_ALLOC(thp->un.lcl.tls, new_sp, stack_top, sizeof *thp->un.lcl.tls);
	STACK_ALLOC(init_cc, new_sp, new_sp, STACK_INITIAL_CALL_CONVENTION_USAGE);
	SETKSP(thp, new_sp);

	// Could fault again while setting tls in stack...
	unlock_kernel();
	SPECRET_PREEMPT(thp);

	SET_XFER_HANDLER(&threadstack_fault_handlers);

	tsp = thp->un.lcl.tls;
	memset(tsp, 0, sizeof(*tsp));
	// Set the inital calling convention usage section to zero - will
	// help any stack traceback code to determine when it has hit the
	// top of the stack.
	memset(init_cc, 0, STACK_INITIAL_CALL_CONVENTION_USAGE);

	if(attr) {
		tsp->__flags = attr->__flags & (PTHREAD_CSTATE_MASK|PTHREAD_CTYPE_MASK);
	}
	tsp->__arg = thp->args.wa.arg;
	tsp->__exitfunc = thp->args.wa.exitfunc;
	if(tsp->__exitfunc == NULL && thp->process->valid_thp != NULL) {
		/*
			We don't have thread termination (exitfunc) for this thread.
			Likely it was created with SIGEV_THREAD. Use the same one
			as for the valid_thp's. This mostly works since all threads
			created via pthread_create have the same exit function.
		*/
		tsp->__exitfunc = thp->process->valid_thp->un.lcl.tls->__exitfunc;
	}

	tsp->__errptr = &tsp->__errval;
	if(thp->process->pid == PROCMGR_PID) {
		tsp->__stackaddr = (uint8_t *)thp->un.lcl.stackaddr;
	} else {
		tsp->__stackaddr = (uint8_t *)thp->un.lcl.stackaddr + ((attr == NULL) ? 0 : attr->__guardsize);
	}
	tsp->__pid = thp->process->pid;
	tsp->__tid = thp->tid + 1;
	tsp->__owner = SYNC_OWNER(thp);

	// Touch additional stack if requested in attr
	// @@@ NYI
	// if(attr->guaranteedstacksize) ...

	SET_XFER_HANDLER(NULL);

	cpu_thread_waaa(thp);

	// Let the parent continue. The tid was stuffed during thread_create().
	if(thp->join && thp->join->state == STATE_WAITTHREAD) {
		lock_kernel();
		ready(thp->join);
		thp->join = NULL;
	}

	//
	// Don't change priority until parent thread freed to run again
	// - we might get a priority inversion otherwise.
	//
	if((attr != NULL) && (attr->__flags & PTHREAD_EXPLICIT_SCHED)) {
		lock_kernel();

		if(sched_thread(thp, attr->__policy, (struct sched_param *)&attr->__param) != EOK) {
			/* We should have some error handling if sched_thread() fails ...
			thp->status = (void *)EAGAIN;
			thp->flags |= (_NTO_TF_KILLSELF | _NTO_TF_ONLYME);
			return;
			*/
		}
	}

	/* Only done once for the first thread running */
	if(thp->process->process_priority == 0) {
		thp->process->process_priority = thp->priority;
	}

	/* a thread is born unto a STOPPED process - make sure it stops too! */
	if ( thp->process->flags & (_NTO_PF_DEBUG_STOPPED|_NTO_PF_STOPPED) ) {
		thp->flags |= _NTO_TF_TO_BE_STOPPED;
	}
	thp->flags &= ~_NTO_TF_WAAA;
}
Пример #22
0
void
syntax_init (void)
{
  STACK_INIT (props);
  STACK_INIT (U8);
}
Пример #23
0
yaml_document_start_event_initialize(yaml_event_t *event,
        yaml_version_directive_t *version_directive,
        yaml_tag_directive_t *tag_directives_start,
        yaml_tag_directive_t *tag_directives_end,
        int implicit)
{
    struct {
        yaml_error_type_t error;
    } context;
    yaml_mark_t mark = { 0, 0, 0 };
    yaml_version_directive_t *version_directive_copy = NULL;
    struct {
        yaml_tag_directive_t *start;
        yaml_tag_directive_t *end;
        yaml_tag_directive_t *top;
    } tag_directives_copy = { NULL, NULL, NULL };
    yaml_tag_directive_t value = { NULL, NULL };

    assert(event);          /* Non-NULL event object is expected. */
    assert((tag_directives_start && tag_directives_end) ||
            (tag_directives_start == tag_directives_end));
                            /* Valid tag directives are expected. */

    if (version_directive) {
        version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t));
        if (!version_directive_copy) goto error;
        version_directive_copy->major = version_directive->major;
        version_directive_copy->minor = version_directive->minor;
    }

    if (tag_directives_start != tag_directives_end) {
        yaml_tag_directive_t *tag_directive;
        if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE))
            goto error;
        for (tag_directive = tag_directives_start;
                tag_directive != tag_directives_end; tag_directive ++) {
            assert(tag_directive->handle);
            assert(tag_directive->prefix);
            if (!yaml_check_utf8(tag_directive->handle,
                        strlen((char *)tag_directive->handle)))
                goto error;
            if (!yaml_check_utf8(tag_directive->prefix,
                        strlen((char *)tag_directive->prefix)))
                goto error;
            value.handle = yaml_strdup(tag_directive->handle);
            value.prefix = yaml_strdup(tag_directive->prefix);
            if (!value.handle || !value.prefix) goto error;
            if (!PUSH(&context, tag_directives_copy, value))
                goto error;
            value.handle = NULL;
            value.prefix = NULL;
        }
    }

    DOCUMENT_START_EVENT_INIT(*event, version_directive_copy,
            tag_directives_copy.start, tag_directives_copy.top,
            implicit, mark, mark);

    return 1;

error:
    yaml_free(version_directive_copy);
    while (!STACK_EMPTY(context, tag_directives_copy)) {
        yaml_tag_directive_t value = POP(context, tag_directives_copy);
        yaml_free(value.handle);
        yaml_free(value.prefix);
    }
    STACK_DEL(context, tag_directives_copy);
    yaml_free(value.handle);
    yaml_free(value.prefix);

    return 0;
}
Пример #24
0
/*
 * main() - 
 *
 * The entry point to the program.
 *
 */
int main(int argc, char *argv[]) {
  	// set maximum array size
	LEAVES = (argc <= 1 ? 10 : atoi( argv[1] ) ) ;
  	MAX = (argc <= 1 ? 10 : 2 * LEAVES );

	printf ("\n**** beginning pointer tests ** \n" ) ; 
	enum TreeType treetype = 0 ;
	for ( treetype = 0 ; treetype < TTNUM ; treetype++ ) { 
		printf ( "** testing treetype %s\n" , TTLabel[treetype] ) ;
		MaxErrCode = STACK_INIT ( &randomForest , MAX ) ;
		if ( MaxErrCode ) {
		MaxErrCode = 99 ; 
		printf ( "ERROR Forest could not be initialized (%d)\n", MaxErrCode ) ;
		exit ( MaxErrCode ) ; 
		} ;
					if ( DEBUG ) { STACK_DUMP ( &randomForest ) ; }
		TreeHarnessTest ( treetype ) ; 
		STACK_DUMP ( &randomForest ) ; 
		}

	printf ("**** beginning STACK tests ** \n" ) ; 
	// *randomForrest is now a stack of full trees
	int retCode = 0 ; 
	TYPE_PTRTREE LeftTree, RightTree = NULL; 	

	printf ( "** beginning POP test ** \n" ) ; 
					if ( DEBUG ) { 
						STACK_DUMP ( &randomForest ) ;
						}
	LeftTree = STACK_POP ( &randomForest ) ; 
	STACK_DUMP ( &randomForest ) ; 
					if ( DEBUG ) {
						STACK_DUMP ( &randomForest ) ;
						printf ( "..after STACK_POP, STACK_DUMP returns %p\n" , LeftTree ) ; 
						}

	printf ("** beginning PUSH test ** \n" ) ; 
					if ( DEBUG ) { 
						STACK_DUMP ( &randomForest ) ;
						}
	retCode = STACK_PUSH ( &randomForest, LeftTree ) ; 
	STACK_DUMP ( &randomForest ) ; 
					if ( DEBUG ) {
						STACK_DUMP ( &randomForest ) ;
						printf ( "..after STACK_PUSH returns %d\n" , retCode ) ; 
						}

	printf ("** beginning LIFT test ** \n" ) ;
					if ( DEBUG ) { 
						STACK_DUMP ( &randomForest ) ;
						}
	retCode = STACK_LIFT ( &randomForest ) ; 
	STACK_DUMP ( &randomForest ) ; 
					if ( DEBUG ) {
						STACK_DUMP ( &randomForest ) ;
						printf ( "..after STACK_LIFT returns %d\n" , retCode ) ; 
						}

	printf ("** beginning SWAP test ** \n" ) ; 
					if ( DEBUG ) { 
						STACK_DUMP ( &randomForest ) ;
						}
	retCode = STACK_SWAP ( &randomForest , 
		STACK_TOP_IS ( &randomForest ) , 
		STACK_BOTTOM_IS ( &randomForest ) 
		) ;
					if ( DEBUG ) {
						STACK_DUMP ( &randomForest ) ;
						printf ( 
							"..after STACK_SWAP ( %u, %u ) returning %d\n" , 
							STACK_TOP_IS ( &randomForest ) , 
							STACK_BOTTOM_IS ( &randomForest ) ,
							retCode 
							) ; 
						}

	}  // main()