static BOOL tg_shared_preferences_parse_keypath(const CHAR* key_path,struct array_list* list,INT32* key_num)
{
    UINT16 path_len = 0 ;
    CHAR* key=NULL;
    const CHAR* itor1 = key_path;
    const CHAR* itor2 = key_path;
    INT32 counter = 0;
    return_val_if_fail(key_path,FALSE);

    path_len = strlen(key_path);
    return_val_if_fail((path_len>0),FALSE);
    if (key_path[0]== '/')
    {

        itor1++;
        itor2++;

    }
    while (1)
    {
        if (*itor1 == '\0')
        {

            INT32 len = (itor1-itor2);
            if (len==0) break;
            key =TG_CALLOC(len+1,1);
            strncpy(key,itor2,len);
            array_list_add(list, (void *)key);
            counter++;
            break;
        }
        else if (*itor1 == '/')
        {
            INT32 len = (itor1-itor2);
            if (len==0) break;
            key =TG_CALLOC(len+1,1);
            strncpy(key,itor2,len);
            array_list_add(list, (void *)key);
            counter++;
            itor2=itor1+1;
        }
        itor1++;
    }
    *key_num = counter;
    return counter;

}
void test_array_list_add_and_remove ()
{
  array_list *list = create_array_list_simple ();
  TEST_ASSERT_NOT_NULL (list);

  // try to append an item and get it back later
  char *str1 = "hello";
  array_list_append (list, str1);
  TEST_ASSERT_EQUAL_INT (list->item_count, 1);
  TEST_ASSERT_EQUAL_STR (array_list_get (list, 0), str1);

  // try to append multiple items and get one back later
  char *strs1[] = {"world", "!"};
  array_list_append_all (list, (void **) strs1, 2);
  TEST_ASSERT_EQUAL_INT (list->item_count, 3);
  TEST_ASSERT_EQUAL_STR (array_list_get (list, 1), strs1[0]);
  TEST_ASSERT_EQUAL_STR (array_list_get (list, 2), strs1[1]);

  // try to add an item at a specific position
  char *str2 = " ";
  array_list_add (list, 1, str2);
  TEST_ASSERT_EQUAL_INT (list->item_count, 4);
  TEST_ASSERT_EQUAL_STR (array_list_get (list, 0), str1);
  TEST_ASSERT_EQUAL_STR (array_list_get (list, 1), str2);
  TEST_ASSERT_EQUAL_STR (array_list_get (list, 2), strs1[0]);
  TEST_ASSERT_EQUAL_STR (array_list_get (list, 3), strs1[1]);
  
  // try to add multiple items at a specific position
  char *strs2[] = {"WORLD", "?", "\n", "HELLO "};
  array_list_add_all (list, 2, (void **) strs2, 4);
  TEST_ASSERT_EQUAL_INT (list->item_count, 8);
  TEST_ASSERT_EQUAL_STR (array_list_get (list, 0), str1);
  TEST_ASSERT_EQUAL_STR (array_list_get (list, 1), str2);
  TEST_ASSERT_EQUAL_STR (array_list_get (list, 2), strs2[0]);
  TEST_ASSERT_EQUAL_STR (array_list_get (list, 3), strs2[1]);
  TEST_ASSERT_EQUAL_STR (array_list_get (list, 4), strs2[2]);
  TEST_ASSERT_EQUAL_STR (array_list_get (list, 5), strs2[3]);
  TEST_ASSERT_EQUAL_STR (array_list_get (list, 6), strs1[0]);
  TEST_ASSERT_EQUAL_STR (array_list_get (list, 7), strs1[1]);

  // try to remove an item
  array_list_remove (list, 6);
  TEST_ASSERT_EQUAL_INT (list->item_count, 7);
  TEST_ASSERT_EQUAL_STR (array_list_get (list, 0), str1);
  TEST_ASSERT_EQUAL_STR (array_list_get (list, 1), str2);
  TEST_ASSERT_EQUAL_STR (array_list_get (list, 2), strs2[0]);
  TEST_ASSERT_EQUAL_STR (array_list_get (list, 3), strs2[1]);
  TEST_ASSERT_EQUAL_STR (array_list_get (list, 4), strs2[2]);
  TEST_ASSERT_EQUAL_STR (array_list_get (list, 5), strs2[3]);
  TEST_ASSERT_EQUAL_STR (array_list_get (list, 6), strs1[1]);

  delete_array_list (list);
}
Beispiel #3
0
/*
 * get the next match from the current position,
 *		throught the dictionary.
 *	this will return all the matchs.
 *
 * @return friso_array_t that contains all the matchs.
 */
__STATIC_API__ friso_array_t get_next_match( friso_t friso, friso_task_t task, uint_t idx ) {

	register uint_t t;
	string_buffer_t sb = new_string_buffer_with_string( task->buffer );

	//create a match dynamic array.
	friso_array_t match = new_array_list_with_opacity( friso->max_len );
	array_list_add( match, friso_dic_get( friso->dic, __LEX_CJK_WORDS__, task->buffer ) );

	for ( t = 1; t < friso->max_len
			&& ( task->bytes = read_next_word( task, &idx, task->buffer ) ) != 0; t++ ) {

		task->unicode = get_utf8_unicode( task->buffer );
		if ( utf8_whitespace( task->unicode ) ) 	break;
		if ( ! utf8_cjk_string( task->unicode ) ) break;

		//append the task->buffer to the buffer.
		string_buffer_append( sb, task->buffer );

		//check the CJK dictionary.
		if ( friso_dic_match( friso->dic, __LEX_CJK_WORDS__, sb->buffer ) ) {
			/*
			 * add the lex_entry_t insite.
			 * here is a key point:
			 *		we use friso_dic_get function to get the address of the lex_entry_cdt
			 *		that store in the dictionary, not create a new lex_entry_cdt.
			 * so :
			 *		1.we will not bother to the allocations of the newly created lex_entry_cdt.
			 *		2.more efficient of course.
			 */
			array_list_add( match, friso_dic_get( friso->dic, __LEX_CJK_WORDS__, sb->buffer ) );
		}
	}

	/*buffer allocations clear*/
	free_string_buffer( sb );
	//array_list_trim( match );
	
	return match;
}
Beispiel #4
0
// NOTE: Queue function are not multi-thread safe. Caller must acquire lock
// Add message to the queue. Caller must acquire the lock before calling.
// RETURN VALUE:    TRUE if message was queued, FALSE if message could not be queued
static BOOLEAN ipc_enqueue_message(IPC_CPU_CONTEXT *ipc, IPC_MESSAGE_TYPE type, IPC_HANDLER_FN handler, void* arg,
                                   volatile UINT32 *before_handler_ack, volatile UINT32 *after_handler_ack)
{
    IPC_MESSAGE  msg;
    CPU_ID       cpu_id = IPC_CPU_ID();

    VMM_ASSERT(ipc != NULL);
    VMM_ASSERT(handler != NULL);
    msg.type = type;
    msg.from = cpu_id;
    msg.handler = handler;
    msg.arg = arg;
    msg.before_handler_ack = before_handler_ack;
    msg.after_handler_ack = after_handler_ack;
    return array_list_add(ipc->message_queue, &msg);
}
Beispiel #5
0
int main( int argc, char **args ) {
    
    //create a new array list.
    friso_array_t array = new_array_list();    
    fstring keys[] = {
        "chenmanwen", "yangqinghua",
        "chenxin", "luojiangyan", "xiaoyanzi", "bibi",
        "zhangrenfang", "yangjian",
        "liuxiao", "pankai",
        "chenpei", "liheng", "zhangzhigang", "zhgangyishao", "yangjiangbo",
        "caizaili", "panpan", "xiaolude", "yintanwen"
    };
    int j, idx = 2, len = sizeof( keys ) / sizeof( fstring );

    for ( j = 0; j < len; j++ ) {
        array_list_add( array, keys[j] );
    }

    printf("length=%d, allocations=%d\n", array->length, array->allocs );
    array_list_trim( array );
    printf("after tirm length=%d, allocations=%d\n", array->length, array->allocs );
    printf("idx=%d, value=%s\n", idx, ( fstring ) array_list_get( array, idx ) );

    printf("\nAfter set %dth item.\n", idx );
    array_list_set( array, idx, "chenxin__" );
    printf("idx=%d, value=%s\n", idx, ( fstring ) array_list_get( array, idx ) );

    printf("\nAfter remove %dth item.\n", idx );
    array_list_remove( array, idx );
    printf("length=%d, allocations=%d\n", array->length, array->allocs );
    printf("idx=%d, value=%s\n", idx, ( fstring ) array_list_get( array, idx ) );

    printf("\nInsert a item at %dth\n", idx );
    array_list_insert( array, idx, "*chenxin*" );
    printf("idx=%d, value=%s\n", idx, ( fstring ) array_list_get( array, idx ) );

    free_array_list( array );

    return 0;
}
Beispiel #6
0
bool jstruct_allocated_add(struct array_list *arr, enum jstruct_allocated_type type, void *data) {
    struct jstruct_allocated *allocated = malloc(sizeof(struct jstruct_allocated));
    allocated->type = type;
    allocated->data = data;
    return array_list_add(arr, allocated) == 0;
}
Beispiel #7
0
/**
 * load all the valid wors from a specified lexicon file . 
 *
 * @param dic        friso dictionary instance (A hash array)
 * @param lex        the lexicon type
 * @param lex_file    the path of the lexicon file
 * @param length    the maximum length of the word item
 */
FRISO_API void friso_dic_load( 
        friso_t friso,
        friso_config_t config,
        friso_lex_t lex,
        fstring lex_file,
        uint_t length ) 
{

    FILE * _stream;
    char __char[1024], _buffer[512];
    fstring _line;
    string_split_entry sse;

    fstring _word;
    char _sbuffer[512];
    fstring _syn;
    friso_array_t sywords;
    uint_t _fre;

    if ( ( _stream = fopen( lex_file, "rb" ) ) != NULL ) 
    {
        while ( ( _line = file_get_line( __char, _stream ) ) != NULL ) 
        {
            //clear up the notes
            //make sure the length of the line is greater than 1.
            //like the single '#' mark in stopwords dictionary.
            if ( _line[0] == '#' && strlen(_line) > 1 ) continue;

            //handle the stopwords.
            if ( lex == __LEX_STOPWORDS__ )
            {
                //clean the chinese words that its length is greater than max length.
                if ( ((int)_line[0]) < 0 && strlen( _line ) > length ) continue;
                friso_dic_add( friso->dic, __LEX_STOPWORDS__, 
                        string_copy_heap( _line, strlen(_line) ), NULL ); 
                continue;
            }

            //split the fstring with '/'.
            string_split_reset( &sse, "/", _line); 
            if ( string_split_next( &sse, _buffer ) == NULL ) continue;

            //1. get the word.
            _word = string_copy_heap( _buffer, strlen(_buffer) );

            if ( string_split_next( &sse, _buffer ) == NULL ) 
            {
                //normal lexicon type, 
                //add them to the dictionary directly
                friso_dic_add( friso->dic, lex, _word, NULL ); 
                continue;
            }

            /*
             * filter out the words that its length is larger
             *     than the specified limit.
             * but not for __LEX_ECM_WORDS__ and english __LEX_STOPWORDS__
             *     and __LEX_CEM_WORDS__.
             */
            if ( ! ( lex == __LEX_ECM_WORDS__ || lex == __LEX_CEM_WORDS__ )
                    && strlen( _word ) > length ) 
            {
                FRISO_FREE(_word);
                continue;
            }

            //2. get the synonyms words.
            _syn = NULL;
            if ( strcmp( _buffer, "null" ) != 0 )
                _syn = string_copy( _buffer, _sbuffer, strlen(_buffer) );

            //3. get the word frequency if it available.
            _fre = 0;
            if ( string_split_next( &sse, _buffer ) != NULL )
                _fre = atoi( _buffer );

            /**
             * Here:
             * split the synonyms words with mark "," 
             *     and put them in a array list if the synonyms is not NULL
             */
            sywords = NULL;
            if ( config->add_syn && _syn != NULL ) 
            {
                string_split_reset( &sse, ",", _sbuffer );
                sywords = new_array_list_with_opacity(5);
                while ( string_split_next( &sse, _buffer ) != NULL ) 
                {
                    if ( strlen(_buffer) > length ) continue;
                    array_list_add( sywords, 
                            string_copy_heap(_buffer, strlen(_buffer)) );
                }
                sywords = array_list_trim( sywords );
            }

            //4. add the word item
            friso_dic_add_with_fre( 
                    friso->dic, lex, _word, sywords, _fre );
        } 

        fclose( _stream );
    } else {
        printf("Warning: Fail to open lexicon file %s\n", lex_file);
    } 
}
Beispiel #8
0
/*
 * get the next cjk word from the current position, with complex mode.
 *	this is the core of the mmseg chinese word segemetation algorithm.
 *	we use four rules to filter the matched chunks and get the best one
 *		as the final result.
 *
 * @see mmseg_core_invoke( chunks );
 */
__STATIC_API__ friso_hits_t next_complex_cjk( friso_t friso, friso_task_t task ) {

	register uint_t x, y, z;
	/*bakup the task->bytes here*/
	uint_t __idx__ = task->bytes;
	lex_entry_t fe, se, te;
	friso_chunk_t e;
	friso_array_t words, chunks;
	friso_array_t smatch, tmatch, fmatch = get_next_match( friso, task, task->idx );

	/*
	 * here:
	 *		if the length of the fmatch is 1, mean we don't have to
	 *	continue the following work. ( no matter what we get the same result. )
	 */
	if ( fmatch->length == 1 ) {
		task->hits->word =  ( ( lex_entry_t ) fmatch->items[0] )->word;
		task->hits->type = __FRISO_SYS_WORDS__;
		free_array_list( fmatch );
		
		return task->hits;
	}

	chunks = new_array_list();
	task->idx -= __idx__;
	

	for ( x = 0; x < fmatch->length; x++ ) 
	{
		/*get the word and try the second layer match*/
		fe = ( lex_entry_t ) array_list_get( fmatch, x );
		__idx__ = task->idx + fe->length;
		read_next_word( task, &__idx__, task->buffer );

		if ( task->bytes != 0 
				&& utf8_cjk_string( get_utf8_unicode( task->buffer ) ) 
				&& friso_dic_match( friso->dic, __LEX_CJK_WORDS__, task->buffer ) ) {

			//get the next matchs
			smatch = get_next_match( friso, task, __idx__ );
			for ( y = 0; y < smatch->length; y++ ) 
			{
				/*get the word and try the third layer match*/
				se = ( lex_entry_t ) array_list_get( smatch, y );
				__idx__ = task->idx + fe->length + se->length;
				read_next_word( task, &__idx__, task->buffer );

				if ( task->bytes != 0 
						&& utf8_cjk_string( get_utf8_unicode( task->buffer ) )
						&& friso_dic_match( friso->dic, __LEX_CJK_WORDS__, task->buffer ) ) {

					//get the matchs.
					tmatch = get_next_match( friso, task, __idx__ );
					for ( z = 0; z < tmatch->length; z++ ) 
					{
						te = ( lex_entry_t ) array_list_get( tmatch, z );
						words = new_array_list_with_opacity(3);
						array_list_add( words, fe );
						array_list_add( words, se );
						array_list_add( words, te );
						array_list_add( chunks, 
								new_chunk( words, fe->length + se->length + te->length ) );
					}
					free_array_list( tmatch );
				} else {
					words = new_array_list_with_opacity(2);
					array_list_add( words, fe );
					array_list_add( words, se );
					//add the chunk
					array_list_add( chunks,
							new_chunk( words, fe->length + se->length ) );
				}
			}
			free_array_list( smatch );
		} else {
			words = new_array_list_with_opacity(1);
			array_list_add( words, fe );
			array_list_add( chunks, new_chunk( words, fe->length ) );
		}
	}
	free_array_list( fmatch );

	/*
	 * filter the chunks with the four rules of the mmseg algorithm
	 *		and get best chunk as the final result.
	 * @see mmseg_core_invoke( chunks );
	 * @date 2012-12-13
	 */
	if ( chunks->length > 1 ) {
		e = mmseg_core_invoke( chunks );
	} else {
		e = ( friso_chunk_t ) chunks->items[0];
	}
	fe = ( lex_entry_t ) e->words->items[0];
	task->hits->word = fe->word;
	task->hits->type = __FRISO_SYS_WORDS__;
	task->idx += fe->length;						//reset the idx of the task.
	free_chunk( e->words );
	free_chunk( e );
	
	return task->hits;
}
Beispiel #9
0
/*
 * here,
 * we use four rules to filter all the chunks to get the best chunk.
 *		and this is the core of the mmseg alogrithm.
 * 1. maximum match word length.
 * 2. larget average word length.
 * 3. smallest word length variance.
 * 4. largest single word morpheme degrees of freedom.
 */
__STATIC_API__ friso_chunk_t mmseg_core_invoke( friso_array_t chunks ) {
	
	register uint_t t/*, j*/;
	float max;
	friso_chunk_t e;
	friso_array_t __res__, __tmp__;
	__res__ = new_array_list_with_opacity( chunks->length );


	//1.get the maximum matched chunks.
		//count the maximum length
	max = ( float ) ( ( friso_chunk_t ) chunks->items[0] )->length;
	for ( t = 1; t < chunks->length; t++ ) {
		e = ( friso_chunk_t ) chunks->items[t];
		if ( e->length > max )
			max = ( float ) e->length;
	}
		//get the chunk items that owns the maximum length.
	for ( t = 0; t < chunks->length; t++ ) {
		e = ( friso_chunk_t ) chunks->items[t];
		if ( e->length >= max ) {
			array_list_add( __res__, e );
		} else {
			free_array_list( e->words );
			free_chunk( e );
		}
	}
		//check the left chunks
	if ( __res__->length == 1 ) {
		e = ( friso_chunk_t ) __res__->items[0];
		free_array_list( __res__ );
		free_array_list( chunks );
		return e;
	} else {
		__tmp__ = array_list_clear( chunks );
		chunks = __res__;
		__res__ = __tmp__;
	}


	//2.get the largest average word length chunks.
		//count the maximum average word length.
	max = count_chunk_avl( ( friso_chunk_t ) chunks->items[0] );
	for ( t = 1; t < chunks->length; t++ ) {
		e = ( friso_chunk_t ) chunks->items[t];
		if ( count_chunk_avl( e ) > max ) {
			max = e->average_word_length;
		}
	}
		//get the chunks items that own the largest average word length.
	for ( t = 0; t < chunks->length; t++ ) {
		e = ( friso_chunk_t ) chunks->items[t];
		if ( e->average_word_length >= max ) {
			array_list_add( __res__, e );
		} else {
			free_array_list( e->words );
			free_chunk( e );
		}
	}
		//check the left chunks
	if ( __res__->length == 1 ) {
		e = ( friso_chunk_t ) __res__->items[0];
		free_array_list( chunks );
		free_array_list( __res__ );
		return e;
	} else {
		__tmp__ = array_list_clear( chunks );
		chunks = __res__;
		__res__ = __tmp__;
	}


	//3.get the smallest word length variance chunks
		//count the smallest word length variance
	max = count_chunk_var( ( friso_chunk_t ) chunks->items[0] );
	for ( t = 1; t < chunks->length; t++ ) {
		e = ( friso_chunk_t ) chunks->items[t];
		if ( count_chunk_var( e ) < max ) {
			max = e->word_length_variance;
		}
	}
		//get the chunks that own the smallest word length variance.
	for ( t = 0; t < chunks->length; t++ ) {
		e = ( friso_chunk_t ) chunks->items[t];
		if ( e->word_length_variance <= max ) {
			array_list_add( __res__, e );
		} else {
			free_array_list( e->words );
			free_chunk( e );
		}
	}
		//check the left chunks
	if ( __res__->length == 1 ) {
		e = ( friso_chunk_t ) __res__->items[0];
		free_array_list( chunks );
		free_array_list( __res__ );
		return e;
	} else {
		__tmp__ = array_list_clear( chunks );
		chunks = __res__;
		__res__ = __tmp__;
	}

	//4.get the largest single word morpheme degrees of freedom.
		//count the maximum single word morpheme degreees of freedom
	max = count_chunk_mdf( ( friso_chunk_t ) chunks->items[0] );
	for ( t = 1; t < chunks->length; t++ ) {
		e = ( friso_chunk_t ) chunks->items[t];
		if ( count_chunk_mdf( e ) > max ) {
			max = e->single_word_dmf;
		}
	} 
		//get the chunks that own the largest single word word morpheme degrees of freedom.
	for ( t = 0; t < chunks->length; t++ ) {
		e = ( friso_chunk_t ) chunks->items[t];
		if ( e->single_word_dmf >= max ) {
			array_list_add( __res__, e );
		} else {
			free_array_list( e->words );
			free_chunk( e );
		}
	}

	/*
	 * there is still more than one chunks?
	 *		well, this rarely happen but still got a chance.
	 * here we simple return the first chunk as the final result.
	 */
	for ( t = 1; t < __res__->length; t++ ) {
		e = ( friso_chunk_t ) __res__->items[t];
		free_array_list( e->words );
		free_chunk( e );
	}

	e = ( friso_chunk_t ) __res__->items[0];
	free_array_list( chunks );
	free_array_list( __res__ );

	return e;
}