static void AddGames(DBProvider *pdb, int session_id, int player_id0, int player_id1)
{
	int gamenum = 0;
	listOLD *plGame, *pl = lMatch.plNext;
	while ((plGame = pl->p) != NULL)
	{
		int game_id = GetNextId(pdb, "game");
		moverecord *pmr = plGame->plNext->p;
		xmovegameinfo *pmgi = &pmr->g;
		char *buf = g_strdup_printf("INSERT INTO game(game_id, session_id, player_id0, player_id1, "
				"score_0, score_1, result, added, game_number, crawford) "
				"VALUES (%d, %d, %d, %d, %d, %d, %d, CURRENT_TIME, %d, %d )",
				game_id, session_id, player_id0, player_id1,
				pmgi->anScore[0], pmgi->anScore[1], pmgi->nPoints, ++gamenum, pmr->g.fCrawfordGame);

		if (pdb->UpdateCommand(buf))
		{
			AddStats(pdb, game_id, player_id0, 0, "gamestat", ms.nMatchTo, &(pmgi->sc));
			AddStats(pdb, game_id, player_id1, 1, "gamestat", ms.nMatchTo, &(pmgi->sc));
		}
		g_free(buf);
		pl = pl->plNext;
	}
}
extern void CommandRelationalAddMatch(char *sz)
{
	DBProvider *pdb;
	char *buf, *buf2, *date;
	char warnings[1024] = "";
	int session_id, existing_id, player_id0, player_id1;
	char *arg = NULL;
	gboolean quiet = FALSE;

	arg = NextToken(&sz);
	if (arg)
		quiet = !strcmp(arg, "quiet");

	if (ListEmpty(&lMatch))
	{
		outputl( _("No match is being played.") );
		return;
	}

	/* Warn if match is not finished or fully analyzed */
	if (!quiet && !GameOver())
		strcat(warnings, _("The match is not finished\n"));
	if (!quiet && !MatchAnalysed())
		strcat(warnings, _("All of the match is not analyzed\n"));

	if (*warnings)
	{
		strcat(warnings, _("\nAdd match anyway?"));
		if (!GetInputYN(warnings))
			return;
	}

	if ((pdb = ConnectToDB(dbProviderType)) == NULL)
		return;

	existing_id = RelationalMatchExists(pdb);
	if (existing_id != -1)
	{
		if (!quiet && !GetInputYN(_("Match exists, overwrite?")))
			return;

		/* Remove any game stats and games */
		buf2 = g_strdup_printf("FROM game WHERE session_id = %d", existing_id);
		buf = g_strdup_printf("DELETE FROM gamestat WHERE game_id in (SELECT game_id %s)", buf2);
		pdb->UpdateCommand(buf);
		g_free(buf);
		buf = g_strdup_printf("DELETE %s", buf2);
		pdb->UpdateCommand(buf);
		g_free(buf);
		g_free(buf2);

		/* Remove any match stats and session */
		buf = g_strdup_printf("DELETE FROM matchstat WHERE session_id = %d", existing_id);
		pdb->UpdateCommand(buf);
		g_free(buf);
		buf = g_strdup_printf("DELETE FROM session WHERE session_id = %d", existing_id);
		pdb->UpdateCommand(buf);
		g_free(buf);
	}

	session_id = GetNextId(pdb, "session");
	player_id0 = AddPlayer(pdb, ap[0].szName);
	player_id1 = AddPlayer(pdb, ap[1].szName);
	if (session_id == -1 || player_id0 == -1 || player_id1 == -1)
	{
		outputl( _("Error adding match.") );
		return;
	}

	if( mi.nYear )
		date = g_strdup_printf("%04d-%02d-%02d", mi.nYear, mi.nMonth, mi.nDay);
	else
		date = NULL;

	buf = g_strdup_printf("INSERT INTO session(session_id, checksum, player_id0, player_id1, "
              "result, length, added, rating0, rating1, event, round, place, annotator, comment, date) "
              "VALUES (%d, '%s', %d, %d, %d, %d, CURRENT_TIMESTAMP, '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s')",
				session_id, GetMatchCheckSum(), player_id0, player_id1,
				MatchResult(ms.nMatchTo), ms.nMatchTo, NS(mi.pchRating[0]), NS(mi.pchRating[1]),
				NS(mi.pchEvent), NS(mi.pchRound), NS(mi.pchPlace), NS(mi.pchAnnotator), NS(mi.pchComment), NS(date));

	updateStatisticsMatch ( &lMatch );

	if (pdb->UpdateCommand(buf))
	{
		if (AddStats(pdb, session_id, player_id0, 0, "matchstat", ms.nMatchTo, &scMatch) &&
			AddStats(pdb, session_id, player_id1, 1, "matchstat", ms.nMatchTo, &scMatch))
		{
			if (storeGameStats)
				AddGames(pdb, session_id, player_id0, player_id1);
			pdb->Commit();
		}
	}
	g_free(buf);
	g_free(date);
	pdb->Disconnect();
}
示例#3
0
LOCAL_C TInt testAsyncAccess(TChar dc1, TChar dc2)
//
// Test one drive against the other.
//
    {
	TFileOps f1;
	TFileOps f2;

	f1.Open(dc1, 1);
	if (dc1 != dc2)
		f2.Open(dc2, 2);

	TInt   op1 = 0;
	TInt   op2 = 0;
	RTimer timer;
	TRequestStatus tstat;
	TTime startTime;
	TTime endTime;
	TTimeIntervalMicroSeconds timeTaken;

	timer.CreateLocal();

	timer.After(tstat, KTimeBM * KSecond);

	startTime.HomeTime();

	while (tstat == KRequestPending)
		{
		TInt num = f1.Write();
		num += f2.Write();
		if (num == 0)
			User::WaitForAnyRequest();
		}

	op1 = f1.End();
	op2 = f2.End();

	endTime.HomeTime();
	timeTaken=endTime.MicroSecondsFrom(startTime);

	TInt64 dtime = timeTaken.Int64();

	TTest::Printf(_L("%c: %8d writes in %6d mS = %8d bytes per second\n"),
				  (TUint)dc1, op1, I64LOW(dtime)/1000, GetSpeed(op1, dtime));

	if (dc1 != dc2)
		TTest::Printf(_L("%c: %8d writes in %6d mS = %8d bytes per second\n"),
					  (TUint)dc2, op2, I64LOW(dtime)/1000, GetSpeed(op2, dtime));

	AddStats(gWrStats, MAKE_TINT64(0, op1 + op2) * MAKE_TINT64(0, KBufLen) * MAKE_TINT64(0, KSecond), dtime);

	// now the reads!

	f1.Reset();
	f2.Reset();

	timer.After(tstat, KTimeBM * KSecond);

	startTime.HomeTime();

	while (tstat == KRequestPending)
		{
		f1.Read();
		f2.Read();
		User::WaitForAnyRequest();
		}

	op1 = f1.End();
	op2 = f2.End();

	endTime.HomeTime();
	timeTaken=endTime.MicroSecondsFrom(startTime);

	dtime = timeTaken.Int64();

	TTest::Printf(_L("%c: %8d reads  in %6d mS = %8d bytes per second\n"),
				  (TUint)dc1, op1, I64LOW(dtime)/1000, GetSpeed(op1, dtime));

	if (dc1 != dc2)
		TTest::Printf(_L("%c: %8d reads  in %6d mS = %8d bytes per second\n"),
					  (TUint)dc2, op2, I64LOW(dtime)/1000, GetSpeed(op2, dtime));

	AddStats(gRdStats, MAKE_TINT64(0, op1 + op2) * MAKE_TINT64(0, KBufLen) * MAKE_TINT64(0, KSecond), dtime);

	test.Printf(_L("\n"));
	test.Printf(_L("average write throughput = %d bytes/sec\n"), GetSpeed(gWrStats));
	test.Printf(_L("average read  throughput = %d bytes/sec\n"), GetSpeed(gRdStats));
	test.Printf(_L("\n"));
	gWrStats.Init();
	gRdStats.Init();

	timer.Cancel();
	timer.Close();
	f1.Close();
	f2.Close();
	// delay for a second to allow the close to complete before dismounting.
	User::After(1000000);
	return KErrNone;
    }
示例#4
0
LOCAL_C TInt testAsyncAccess(TAny* aData)
//
/// Test read file handling.
///
/// @param aData pointer to the thread data area
    {
	TThreadData& data = *(TThreadData*)aData;
	TFileName fileName = data.iFile;
	TBool     dowrite  = (data.iData != NULL);
	TBuf8<KBufLen>* buffer = gBufferArr[data.iNum];
	TRequestStatus* status = gStatusArr[data.iNum];

	RFs   myFs;
	TInt r = myFs.Connect();
	TEST(r==KErrNone);

	r = myFs.SetSessionPath(gSessionPath);
	if (r != KErrNone)
		TTest::Fail(HERE, _L("SetSessionPath returned %d"), r);

	TVolumeInfo vol;
	TInt        drv;
	r = myFs.CharToDrive(fileName[0], drv);
	if (r != KErrNone)
		TTest::Fail(HERE, _L("CharToDrive(%c) returned %d"), fileName[0], r);
	r = myFs.Volume(vol, drv);
	if (r != KErrNone)
		TTest::Fail(HERE, _L("Volume() returned %d"), r);

	TInt64 maxwrite = vol.iFree / 2 - KBufLen;
	if (maxwrite < KBufLen*2)
		TTest::Fail(HERE, _L("Not enough space to do test, only %d KB available"),
					 TInt(vol.iFree/1024));

    RFile f;
	RTimer timer;
	TRequestStatus tstat;
	TTime startTime;
	TTime endTime;
	TTimeIntervalMicroSeconds timeTaken;

	TInt wrnum = 0;
	TInt rdnum = 0;
	TInt opnum = 0;
	TInt opfin = 0;
	TInt i;

	timer.CreateLocal();

	if (dowrite)
		{
		r = f.Replace(myFs, fileName, EFileStreamText | EFileWrite);
		TEST(r==KErrNone);

		// wait for both tasks to have a chance to complete opening the files
		User::After(1000);

		for (i = 0; i < KNumBuf; i++)
			buffer[i].Fill('_', KBufLen);

		timer.After(tstat, KTimeBM * KSecond);

		startTime.HomeTime();

		while (tstat == KRequestPending)
			{
			TInt pos = TInt((wrnum * KBufLen) % maxwrite);
			TInt bnum = opnum++ % KNumBuf;
			f.Write(pos, buffer[bnum], status[bnum]);
			if (opnum - opfin > KMaxLag)
				{
				while (status[opfin % KNumBuf] == KRequestPending)
					User::WaitForRequest(status[opfin % KNumBuf]);
				opfin++;
				}
			++wrnum;
			}

		while (opfin < opnum)
			{
			while (status[opfin % KNumBuf] == KRequestPending)
				User::WaitForRequest(status[opfin % KNumBuf]);
			opfin++;
			}

		endTime.HomeTime();
		TTimeIntervalMicroSeconds timeTaken=endTime.MicroSecondsFrom(startTime);

		TInt64 dtime = timeTaken.Int64();
		TInt64 dsize = wrnum * KBufLen * TInt64(KSecond);
		TInt32 speed = TInt32((dsize + dtime/2) / dtime);
		AddStats(gWrStats, dsize, dtime);

		TTest::Printf(_L("%8d writes in %6d mS = %8d bytes per second\n"),
					  wrnum, TInt32(dtime)/1000, speed);
		}
	else
		{
		r = f.Open(myFs, fileName, EFileStreamText);
		TEST(r==KErrNone);

		timer.After(tstat, KTimeBM * KSecond);

		startTime.HomeTime();

		while (tstat == KRequestPending)
			{
			TInt pos = TInt((rdnum * KBufLen) % maxwrite);
			TInt bnum = opnum++ % KNumBuf;
			f.Read(pos, buffer[bnum], status[bnum]);
			if (opnum - opfin > KMaxLag)
				{
				User::WaitForRequest(status[opfin++ % KNumBuf]);
				}
			++rdnum;
			}

		while (opfin < opnum)
			{
			if (status[opfin % KNumBuf] == KRequestPending)
				User::WaitForRequest(status[opfin % KNumBuf]);
			opfin++;
			}

		endTime.HomeTime();
		timeTaken=endTime.MicroSecondsFrom(startTime);
		TInt64 dtime = timeTaken.Int64();
		TInt64 dsize = rdnum * KBufLen * TInt64(KSecond);
		TInt32 speed = TInt32((dsize + dtime/2) / dtime);
		AddStats(gRdStats, dsize, dtime);

		// wait to allow the dust to settle
		User::After(KSecond);

		TTest::Printf(_L("%8d reads  in %6d mS = %8d bytes per second\n"),
					  rdnum, TInt32(dtime)/1000, speed);

		myFs.Delete(fileName);
		}

	timer.Cancel();
	timer.Close();
	f.Close();
	myFs.Close();
	return r;
    }
示例#5
0
/* PPlexStream: compute perplexity and related statistics */
static void ProcessTextStream(char *fn, int nSize)
{
   int i;
   FILE *f;
   LabId lab=0;
   double ppl;
   int numPLabs;
   Boolean isPipe;
   char word[256];

   if (fn!=NULL) {
      if ((f=FOpen(fn, LMTextFilter, &isPipe))==NULL)
	 HError(16610,"ProcessTextStream: unable to open file %s", fn);
   } else {
      f = stdin;
   }
   if (trace>0) {
      printf("Processing text stream: %s\n", (fn==NULL)?"<stdin>":fn);
      fflush(stdout);
   }
   numPLabs = 0;
   ZeroStats(&sent);
   sent.nUtt = 1; sent.nTok = 0;
   while ((fscanf(f, "%200s", word))==1) {
      if (strlen(word)>=200)
	 HError(-16640, "ProcessTextStream: word too long, will be split: %s\n", word);
      lab = GetEQLab(GetLabId(word, TRUE));

   if (IS_SST(lab)) {
	 numPLabs = 0;
	 for (i=0; i<(nSize-1); i++) pLab[numPLabs++] = sstId;
	 ZeroStats(&sent);
	 sent.nUtt = 1; sent.nTok = 1;
	 continue;
      }
      if (IS_UNK(lab)) {
	 if (trace&T_OOV)
	    printf("mapping OOV: %s\n", lab->name);
	 StoreOOV(&sent,lab,1); lab = unkId;
      }
      pLab[numPLabs++] = lab; sent.nTok++;
      if (numPLabs>=LBUF_SIZE) {
         HError(16645,"ProcessTextStream: word buffer size exceeded - too many words without a sentence end (%d)",LBUF_SIZE);
	 CalcPerplexity(&sent,pLab,numPLabs,nSize);
	 numPLabs = 0;
      }
      if (IS_SEN(lab)) {
	 CalcPerplexity(&sent,pLab,numPLabs,nSize);
	 AddStats(&sent, &totl);

	 if (trace&T_SEL) {     /* compact info for sentence selection */
	   ppl = exp(-(sent.logpp)/(double) (sent.nWrd));
	   printf("#! %.4f", ppl);
	   for (i=nSize-1; i<numPLabs; i++)
	     printf(" %s", pLab[i]->name);
	   printf("\n"); fflush(stdout);
	 }

	 ZeroStats(&sent);
      }
   }
   AddStats(&sent,&totl);

   if (fn!=NULL)
      FClose(f,isPipe);
}
示例#6
0
/* ProcessLabelFile: compute perplexity and related statistics from labels */
static void ProcessLabelFile(char *fn, int nSize)
{
   LLink ll;
   double ppl;
   LabList *ref;
   LabId lab;
   Transcription *tr;
   int i,numPLabs,nLabel;

   tr = LOpen(&tempHeap, fn, lff);
   if (tr->numLists < 1) {
      HError(-16635,"ProcessLabelFile: transcription file %s is Empty",fn);
      return;
   }
   ref = GetLabelList(tr, 1);
   if (ref->head->succ == ref->tail) {
      HError(-16635,"ProcessLabelFile: transcription file %s is Empty",fn);
      return;
   }
   if (trace>0) {
      printf("Processing label file: %s\n", fn);
      fflush(stdout);
   }

   nLabel = CountLabs(ref);
   ZeroStats(&sent);
   sent.nTok = nLabel + 2; sent.nUtt = 1;

   /* copy labels into pLab, mapping OOVs */
   numPLabs = 0;
   if (sstId!=NULL)             /* add sentence start marker(s) */
      for (i=0; i<(nSize-1); i++) pLab[numPLabs++] = sstId;
   for (i=0,ll=ref->head->succ; i<nLabel; i++,ll=ll->succ) {
      lab = GetEQLab(ll->labid);
      if ((i==0) && IS_SST(lab)) {
	sent.nTok--; continue;
      }
      if ((i==(nLabel-1)) && IS_SEN(lab)) {
	 sent.nTok--; continue;
      }
      if (IS_UNK(lab)) {
	 if (trace&T_OOV)
	    printf("mapping OOV: %s\n", lab->name);
	 StoreOOV(&sent,lab,1); lab = unkId;
      }
      pLab[numPLabs++] = lab;
      if (numPLabs>=LBUF_SIZE) {
         HError(16650, "Maximum utterance length in a label file exceeded (limit is compiled to be %d tokens)",
                LBUF_SIZE);
      }
   }
   if (senId!=NULL)             /* add sentence end marker */
     pLab[numPLabs++] = senId;

   CalcPerplexity(&sent, pLab, numPLabs, nSize);
   AddStats(&sent, &totl);

   if (trace&T_SEL) {     /* compact info for sentence selection */
      ppl = exp(-(sent.logpp)/(double) (sent.nWrd));
      printf("#! %.4f", ppl);
      for (i=0, ll=ref->head->succ; i<nLabel; i++, ll=ll->succ)
	 printf(" %s", ll->labid->name);
      printf("\n"); fflush(stdout);
   }
}
  // update the tree, do pruning
  virtual void Update(const std::vector<bst_gpair> &gpair,
                      IFMatrix *p_fmat,
                      const BoosterInfo &info,
                      const std::vector<RegTree*> &trees) {
    if (trees.size() == 0) return;
    // number of threads
    // thread temporal space
    std::vector< std::vector<TStats> > stemp;
    std::vector<RegTree::FVec> fvec_temp;
    // setup temp space for each thread
    int nthread;
    #pragma omp parallel
    {
      nthread = omp_get_num_threads();
    }
    fvec_temp.resize(nthread, RegTree::FVec());
    stemp.resize(nthread, std::vector<TStats>());
    #pragma omp parallel
    {
      int tid = omp_get_thread_num();
      int num_nodes = 0;
      for (size_t i = 0; i < trees.size(); ++i) {
        num_nodes += trees[i]->param.num_nodes;
      }
      stemp[tid].resize(num_nodes, TStats(param));
      std::fill(stemp[tid].begin(), stemp[tid].end(), TStats(param));
      fvec_temp[tid].Init(trees[0]->param.num_feature);
    }
    // if it is C++11, use lazy evaluation for Allreduce,
    // to gain speedup in recovery
#if __cplusplus >= 201103L
    auto lazy_get_stats = [&]()
#endif
    {
      // start accumulating statistics
      utils::IIterator<RowBatch> *iter = p_fmat->RowIterator();
      iter->BeforeFirst();
      while (iter->Next()) {
        const RowBatch &batch = iter->Value();
        utils::Check(batch.size < std::numeric_limits<unsigned>::max(),
                     "too large batch size ");
        const bst_omp_uint nbatch = static_cast<bst_omp_uint>(batch.size);
        #pragma omp parallel for schedule(static)
        for (bst_omp_uint i = 0; i < nbatch; ++i) {
          RowBatch::Inst inst = batch[i];
          const int tid = omp_get_thread_num();
          const bst_uint ridx = static_cast<bst_uint>(batch.base_rowid + i);
          RegTree::FVec &feats = fvec_temp[tid];
          feats.Fill(inst);
          int offset = 0;
          for (size_t j = 0; j < trees.size(); ++j) {
            AddStats(*trees[j], feats, gpair, info, ridx,
                     BeginPtr(stemp[tid]) + offset);
            offset += trees[j]->param.num_nodes;
          }
          feats.Drop(inst);
        }
      }
      // aggregate the statistics
      int num_nodes = static_cast<int>(stemp[0].size());
      #pragma omp parallel for schedule(static)
      for (int nid = 0; nid < num_nodes; ++nid) {
        for (int tid = 1; tid < nthread; ++tid) {
          stemp[0][nid].Add(stemp[tid][nid]);
        }
      }
    };
#if __cplusplus >= 201103L
    reducer.Allreduce(BeginPtr(stemp[0]), stemp[0].size(), lazy_get_stats);
#else
    reducer.Allreduce(BeginPtr(stemp[0]), stemp[0].size());
#endif
    // rescale learning rate according to size of trees
    float lr = param.learning_rate;
    param.learning_rate = lr / trees.size();
    int offset = 0;
    for (size_t i = 0; i < trees.size(); ++i) {
      for (int rid = 0; rid < trees[i]->param.num_roots; ++rid) {
        this->Refresh(BeginPtr(stemp[0]) + offset, rid, trees[i]);
      }
      offset += trees[i]->param.num_nodes;
    }
    // set learning rate back
    param.learning_rate = lr;
  }