コード例 #1
0
ファイル: argument_parser.hpp プロジェクト: OSLL/ns3psm
	// Argument parsing
	void add_all_parameters(ParamSource& args) {
		// first parameter is the graph
		graph = args.get_matrix_argument();
		// then come the optional parameters
		while (!args.end()) {
			string key = args.get_parameter_name();
			normalize_key(key);
			add_parameter(key, args);
		}
	}
コード例 #2
0
ファイル: ggd-plugin.c プロジェクト: DaveMDS/geany-plugins
static gboolean
load_configuration (void)
{
  gboolean  success = FALSE;
  gchar    *conffile;
  GError   *err = NULL;
  guint     i;
  
  /* default options that needs to be set dynamically */
  GGD_OPT_doctype[0] = g_strdup ("doxygen");
  
  plugin->config = ggd_opt_group_new ("General");
  ggd_opt_group_add_string (plugin->config, &GGD_OPT_doctype[0], "doctype");
  for (i = 1; i < GEANY_MAX_BUILT_IN_FILETYPES; i++) {
    gchar *name;
    gchar *normal_ftname;
    
    normal_ftname = normalize_key (filetypes[i]->name);
    name = g_strconcat ("doctype_", normal_ftname, NULL);
    ggd_opt_group_add_string (plugin->config, &GGD_OPT_doctype[i], name);
    g_free (name);
    g_free (normal_ftname);
  }
  ggd_opt_group_add_boolean (plugin->config, &GGD_OPT_save_to_refresh, "save_to_refresh");
  ggd_opt_group_add_boolean (plugin->config, &GGD_OPT_indent, "indent");
  ggd_opt_group_add_string (plugin->config, &GGD_OPT_environ, "environ");
  conffile = ggd_get_config_file ("ggd.conf", NULL, GGD_PERM_R, &err);
  if (conffile) {
    success = ggd_opt_group_load_from_file (plugin->config, conffile, &err);
  }
  if (err) {
    GLogLevelFlags level = G_LOG_LEVEL_WARNING;
    
    if (err->domain == G_FILE_ERROR && err->code == G_FILE_ERROR_NOENT) {
      level = G_LOG_LEVEL_INFO;
    }
    g_log (G_LOG_DOMAIN, level,
           _("Failed to load configuration: %s"), err->message);
    g_error_free (err);
  }
  g_free (conffile);
  /* init filetype manager */
  ggd_file_type_manager_init ();
  
  return success;
}
コード例 #3
0
ファイル: hmacsha256.c プロジェクト: megrimm/pd-hmacsha256
static void hmacsha256_key(t_hmacsha256 *x, int argc, t_atom argv) // int main( int argc, char ** argv )
{
  if( argc != 2 )
  usage( argv[ 0 ] );

  uint8_t key[ BLOCK_LENGTH ];
  normalize_key( key, argv[ 1 ] );

  struct sha256 inner_s;
  sha256_init( &inner_s );

  uint8_t inner_key[ BLOCK_LENGTH ];
  uint8_t outer_key[ BLOCK_LENGTH ];
  for( size_t i = 0; i < BLOCK_LENGTH; i++ ) {
    inner_key[ i ] = key[ i ] ^ INNER_PADDING;
    outer_key[ i ] = key[ i ] ^ OUTER_PADDING;
  }

  sha256_update( &inner_s, inner_key, BLOCK_LENGTH );

  uint8_t buf[ BUFSIZ ];
  size_t n;
  while( ( n = fread( buf, 1, sizeof( buf ), stdin ) ) > 0 )
    sha256_update( &inner_s, buf, n );
  if( ferror( stdin ) )
    err( 1, "error reading stdin" );

  uint8_t inner_hash[ SHA256_DIGEST_LENGTH ];
  sha256_sum( &inner_s, inner_hash );

  struct sha256 outer_s;
  sha256_init( &outer_s );
  sha256_update( &outer_s, outer_key, BLOCK_LENGTH );
  sha256_update( &outer_s, inner_hash, SHA256_DIGEST_LENGTH );

  uint8_t hmac[ SHA256_DIGEST_LENGTH ];
  sha256_sum( &outer_s, hmac );
  for( size_t i = 0; i < SHA256_DIGEST_LENGTH; i++ )
    printf( "%02x", hmac[ i ] );
  putchar( '\n' );

  //return 0;
}
コード例 #4
0
weighted_result *se_tfidf_lucene (char *filename, char *indexname, char *insearchstring)
{

  char *word_ptr, *norm_word_ptr;

  float sumwt;
  float ndocs;
  int i, exists;
  int  tot_docs;
  int tot_qterms;
  int q_term_freq;
  int dummy = 0;
  int termidlist[100]; 
  int proxtermidlist[100]; 
  int proxtermlength[100];
  int num_found_terms = 0;
  char *searchstring = NULL;

  Tcl_HashTable *hash_tab, hash_tab_data;
  Tcl_HashEntry *entry;
  Tcl_HashSearch hash_search;

  Tcl_HashTable *q_hash_tab, q_hash_tab_data;
  Tcl_HashEntry *q_entry;
  Tcl_HashSearch q_hash_search;

  Tcl_HashTable *stoplist_hash;

  idx_result *index_result;
  DB *index_db;
  idx_list_entry *idx, *cf_getidx_entry();

  long docid;
  float docwt;
  double log_odds;
  int total_q_terms = 0;
  int sum_q_terms = 0;
  int index_type, morphed;
  char *breakletters;
  char *keywordletters = " \t\n`~!@$%^&*()_=|\\{[]};:'\",<>?/";
  char *urlletters = " \t\n<>";
  char *filenameletters = " \t\n<>";
  int dist_ndocs = 0; /* see search_stat.h for more details on these */
  int min_cf = 0;
  int max_cf = 0;
  int sum_entries = 0;
  int min_entries = 0;
  int max_entries = 0;
  int n_stopwords = 0;


  struct docsum{
    int num_qterms;
    double sum_wts;
    double sum_IDF;
    double avg_doclen;
    double sum_tf;
    double dnorm;
    int doclen;
    int min_tf;
    int max_tf;
  } *doc_wts;

  double avg_doclen;
  double avg_q_tf;
  double avg_d_tf;
  double qnorm;
  double qtweight;
  double dtweight;

  weighted_result *wt_res, *ranked_res, *se_rank_docs();

  char *lastptr;
  double p1, p2, p3, p4, p5;
  ranking_parameters *rank_parm;

  /* these aren't really used at the present time... */
  p1 = 0.0;
  p2 = 0.0;
  p3 = 0.0;
  p4 = 0.0;
  p5 = 0.0;


  if (insearchstring == NULL)
    return NULL;
  else
    searchstring = strdup(insearchstring);

  /* Init the hash table for collecting query terms */
  q_hash_tab = &q_hash_tab_data;
  Tcl_InitHashTable(q_hash_tab,TCL_STRING_KEYS);

  /* Init the hash table for collecting weights for each document */
  hash_tab = &hash_tab_data;
  Tcl_InitHashTable(hash_tab,TCL_ONE_WORD_KEYS);

  index_db = 
    (DB *) cf_index_open(filename, indexname, INDEXFL);

  if (index_db == NULL)
    return (NULL);

  idx = cf_getidx_entry(filename, indexname);

  stoplist_hash = &idx->stopwords_hash;
  index_type = idx->type;

  for (rank_parm = idx->ranking_parameters; rank_parm != NULL; rank_parm = rank_parm->next_parm) {
    if (rank_parm->type == 3) {
      /* it is an Okapi parameter */
      switch(rank_parm->id) {
      case 1:
	p1 = rank_parm->val;
	break;
      case 2:
	p2 = rank_parm->val;
	break;
      case 3:
	p3 = rank_parm->val;
	break;
      case 4:
	p4 = rank_parm->val;
	break;
      case 5:
	p5 = rank_parm->val;
	break;

	/* getting to here means that the defaults above are used */
      }
    }
  }



  if (index_type & URL_KEY) breakletters = urlletters;
  else if (index_type & FILENAME_KEY) breakletters = filenameletters;
  else breakletters = keywordletters; 

  /* get the total number of "documents": components or records indexed */
  if (index_type & COMPONENT_INDEX) {
    if (idx->comp_parent->comp_db == NULL) {
    /* open up the component info index */
      idx->comp_parent->comp_db = cf_component_open(idx->comp_parent->config_info->nickname, 
						    idx->comp_parent->name);
    }
    ndocs = idx->comp_parent->max_component_id;
    /* this is currently done VERY EXPENSIVELY and should be changed */
    avg_doclen = se_get_avg_component_len(idx->comp_parent);
  }
  else {
    ndocs = (float) cf_getnumdocs(filename);
    /* get the average doclength for this database  */
    avg_doclen = se_get_avg_document_len(filename);
  }

  /* find first token */
  word_ptr = strtok_r (searchstring, breakletters, &lastptr);
  do { /* build the query elements */

    norm_word_ptr = 
      normalize_key(word_ptr, idx, &dummy, 1);
    
    if (norm_word_ptr != NULL) {
	q_entry = Tcl_FindHashEntry(q_hash_tab,norm_word_ptr);
	
	if (q_entry == NULL){
	  total_q_terms++;
	  sum_q_terms++;
	  Tcl_SetHashValue(
			   Tcl_CreateHashEntry(
					       q_hash_tab,
					       norm_word_ptr,
					       &exists),
			   (ClientData)1);
	  FREE(norm_word_ptr); /* this was allocated in normalize_key */
	}
	else {
	  q_term_freq = (int) Tcl_GetHashValue(q_entry);
	  q_term_freq++;
	  sum_q_terms++;
	  /* total_q_terms++; count only unique terms */
	  Tcl_SetHashValue(q_entry,
			   (ClientData)q_term_freq);
	  FREE(norm_word_ptr); /* this was allocated in normalize_key */
	}
    }

    /* get the next word */
    word_ptr = strtok_r (NULL, breakletters, &lastptr);
  } while (word_ptr != NULL);

  qnorm = (double)total_q_terms/(avg_doclen/10.0);
  avg_q_tf = ((double)sum_q_terms + 1.0)/((double)total_q_terms + 1.0);
  avg_d_tf = ((double)idx->GlobalData->tot_occur + 1.0) / ((double)idx->GlobalData->recptr + 1.0);

  for (q_entry = Tcl_FirstHashEntry(q_hash_tab,&q_hash_search);
       q_entry != NULL; q_entry = Tcl_NextHashEntry(&q_hash_search)) {

    /* get the word/stem and it's frequency in the query from the hash tab */
    word_ptr = (char *)Tcl_GetHashKey(q_hash_tab,q_entry); 
    q_term_freq = (int) Tcl_GetHashValue(q_entry);
    qtweight = log((double)q_term_freq+1.0)/log(avg_q_tf+1.0);

    /* find it in the index */
    if (strchr(word_ptr,'#') != NULL)
      index_result = se_gettrunc_idx(idx,word_ptr);
    else
      index_result = se_getterm_idx(idx,
				    word_ptr, 0 /* don't normalize twice.*/,
				    &n_stopwords);

    if (index_result != NULL) {
      double IDF;

      if (num_found_terms < 100)
	termidlist[num_found_terms++] = index_result->termid;

      /* have the frequency information for this term, so we'll */
      /* figure out the term weight                             */
      sumwt = (float) index_result->tot_freq;

      IDF = log((double)ndocs/((double)index_result->num_entries + 1.0));

      if (min_cf == 0 || min_cf > index_result->tot_freq) 
	min_cf = index_result->tot_freq;
      if (max_cf == 0 || max_cf < index_result->tot_freq)
	max_cf = index_result->tot_freq;

      sum_entries =+ index_result->num_entries;
	
      if (min_entries == 0 || min_entries > index_result->num_entries) 
	min_entries = index_result->num_entries;
      if (max_entries == 0 || max_entries < index_result->num_entries)
	max_entries = index_result->num_entries;

      for (i = 0; i < index_result->num_entries; i++) {

	if (index_result->entries[i].record_no > 0) { /* forget deleted ones */
	  entry = Tcl_FindHashEntry(hash_tab, 
				    (void *)index_result->entries[i].record_no);

	  if (entry == NULL){
	    doc_wts = CALLOC(struct docsum,1);
	    
	    doc_wts->num_qterms = 1;
	    doc_wts->doclen = se_getdoclen(index_result->entries[i].record_no,
					   filename, idx);
	    
	    doc_wts->dnorm = ((double)doc_wts->doclen/10.0)/(avg_doclen/10.0);
	    
	    dtweight = log((double)index_result->entries[i].term_freq+1.0)/log(avg_d_tf + 1.0);

	    doc_wts->sum_wts = IDF * dtweight * qtweight;

	    doc_wts->min_tf = doc_wts->max_tf = 
	      index_result->entries[i].term_freq;
	    
	    doc_wts->sum_IDF = (double)IDF;
	    doc_wts->sum_tf = (double)index_result->entries[i].term_freq;
	    doc_wts->avg_doclen = (double)avg_doclen;

	    Tcl_SetHashValue(
			     Tcl_CreateHashEntry(
						 hash_tab,
						 (void *)index_result->entries[i].record_no,
						 &exists),
			     (ClientData)doc_wts);
	  }
	  else {
	    /* add to an existing doc entry */
	    doc_wts = (struct docsum *) Tcl_GetHashValue(entry);
	    doc_wts->num_qterms++;

	    dtweight = log((double)index_result->entries[i].term_freq)/log(avg_d_tf);

	    doc_wts->sum_wts += IDF * dtweight * qtweight;
	      
	    if (doc_wts->min_tf > index_result->entries[i].term_freq) 
	      doc_wts->min_tf = index_result->entries[i].term_freq;
	    if (doc_wts->max_tf < index_result->entries[i].term_freq) 
	      doc_wts->max_tf = index_result->entries[i].term_freq;
	    
	  }    
	}