Ejemplo n.º 1
0
int main()
{
   //----- Deprecated since PDQ 6 -----
   int              nodes;
   int              streams;
   
   //----- Model specific variables -----
   double           arrivRate    = 0.75;
   double           service_time = 1.0;

   //----- Initialize the model & Give it a name ------
   PDQ_Init("OpenCenter");
   PDQ_SetComment("This is just a simple M/M/1 queue.");
   

   //----- Define the queueing center -----
   nodes = PDQ_CreateNode("server", CEN, FCFS);

   //----- Define the workload and circuit type -----
   streams = PDQ_CreateOpen("work", arrivRate);

   //----- Define service demand due to workload on the queueing center ------
   PDQ_SetDemand("server", "work", service_time);
   
   //----- Change unit labels -----
   PDQ_SetWUnit("Customers");
   PDQ_SetTUnit("Seconds");


   //----- Solve the model -----
   //  Must use the CANONical method for an open circuit
   PDQ_Solve(CANON);

   //----- Generate a report -----
   PDQ_Report();
   
}
Ejemplo n.º 2
0
int main()
{
   void            namex();
   int             intwt();
   void            itoa();

   char            cname[10];	/* cache id */
   char            wname[10];	/* workload */
   int             i;

   /* per CPU intruction stream intensity */

   double          Prhit = (RD * HT);
   double          Pwhit = (WR * HT * (1 - WUMD)) + (WR * (1 - HT) * (1 - MD));
   double          Prdop = RD * (1 - HT);
   double          Pwbop = WR * (1 - HT) * MD;
   double          Pwthr = WR;
   double          Pinvl = WR * HT * WUMD;

   double          Nrwht = 0.8075 * MAXCPU;
   double          Nrdop = 0.0850 * MAXCPU;
   double          Nwthr = 0.15 * MAXCPU;

   double          Nwbop = 0.0003 * MAXCPU * 100;
   double          Ninvl = 0.015 * MAXCPU;

   double          Srdop = (20.0);
   double          Swthr = (25.0);
   double          Swbop = (20.0);

   double          Wrwht;
   double          Wrdop;
   double          Wwthr;
   double          Wwbop;
   double          Winvl;

   double          Zrwht = ZX;
   double          Zrdop = ZX;
   double          Zwbop = ZX;
   double          Zinvl = ZX;
   double          Zwthr = ZX;

   double          Xcpu = 0.0;
   double          Pcpu = 0.0;
   double          Ubrd = 0.0;
   double          Ubwr = 0.0;
   double          Ubin = 0.0;
   double          Ucht = 0.0;
   double          Ucrd = 0.0;
   double          Ucwr = 0.0;
   double          Ucin = 0.0;

   char            *model = "ABC Model";

   PDQ_Init(model);

   /* create single bus queueing center */

   PDQ_CreateNode(BUS, CEN, FCFS);

   /* create per CPU cache queueing centers */

   for (i = 0; i < MAXCPU; i++) {
      namex(i, L2C, cname);
      PDQ_CreateNode(cname, CEN, FCFS);
   }

   /* create CPU nodes, workloads, and demands */

   for (i = 0; i < intwt(Nrwht, &Wrwht); i++) {
      namex(i, RWHT, wname);
      PDQ_CreateClosed(wname, TERM, Nrwht, Zrwht);
      namex(i, L2C, cname);
      PDQ_SetDemand(cname, wname, 1.0);
      PDQ_SetDemand(BUS, wname, 0.0);	/* no bus activity */
   }

   for (i = 0; i < intwt(Nrdop, &Wrdop); i++) {
      namex(i, RDOP, wname);
      PDQ_CreateClosed(wname, TERM, Nrdop, Zrdop);
      namex(i, L2C, cname);
      PDQ_SetDemand(cname, wname, gen);	/* generate bus request */
      PDQ_SetDemand(BUS, wname, Srdop);	/* req + async data return */
   }

   if (WBACK) {
      for (i = 0; i < intwt(Nwbop, &Wwbop); i++) {
	 		namex(i, WROP, wname);
	 		PDQ_CreateClosed(wname, TERM, Nwbop, Zwbop);
	 		namex(i, L2C, cname);
	 		PDQ_SetDemand(cname, wname, gen);
	 		PDQ_SetDemand(BUS, wname, Swbop);	/* asych write to memory ? */
      }
   } else {			/* write-thru */
      for (i = 0; i < intwt(Nwthr, &Wwthr); i++) {
	 		namex(i, WROP, wname);
	 		PDQ_CreateClosed(wname, TERM, Nwthr, Zwthr);
	 		namex(i, L2C, cname);
	 		PDQ_SetDemand(cname, wname, gen);
	 		PDQ_SetDemand(BUS, wname, Swthr);
      }
   }

   if (WBACK) {
      for (i = 0; i < intwt(Ninvl, &Winvl); i++) {
	 		namex(i, INVL, wname);
	 		PDQ_CreateClosed(wname, TERM, Ninvl, Zinvl);
	 		namex(i, L2C, cname);
	 		PDQ_SetDemand(cname, wname, gen);	/* gen + intervene */
	 		PDQ_SetDemand(BUS, wname, 1.0);
      }
   }
   
   
   PDQ_SetWUnit("Reqs");
   PDQ_SetTUnit("Cycs");

   PDQ_Solve(APPROX);

   /* bus utilizations */

   for (i = 0; i < intwt(Nrdop, &Wrdop); i++) {
      namex(i, RDOP, wname);
      Ubrd += PDQ_GetUtilization(BUS, wname, TERM);
   }
   Ubrd *= Wrdop;

   if (WBACK) {
      for (i = 0; i < intwt(Nwbop, &Wwbop); i++) {
	 		namex(i, WROP, wname);
	 		Ubwr += PDQ_GetUtilization(BUS, wname, TERM);
      }
      Ubwr *= Wwbop;

      for (i = 0; i < intwt(Ninvl, &Winvl); i++) {
	 		namex(i, INVL, wname);
	 		Ubin += PDQ_GetUtilization(BUS, wname, TERM);
      }
      Ubin *= Winvl;

   } else {			/* write-thru */
      for (i = 0; i < intwt(Nwthr, &Wwthr); i++) {
	 		namex(i, WROP, wname);
	 		Ubwr += PDQ_GetUtilization(BUS, wname, TERM);
      }
      Ubwr *= Wwthr;
   }

   /* cache measures at CPU[0] only */

   i = 0;
   namex(i, L2C, cname);

   namex(i, RWHT, wname);
   Xcpu = PDQ_GetThruput(TERM, wname) * Wrwht;
   Pcpu += Xcpu * Zrwht;
   Ucht = PDQ_GetUtilization(cname, wname, TERM) * Wrwht;

   namex(i, RDOP, wname);
   Xcpu = PDQ_GetThruput(TERM, wname) * Wrdop;
   Pcpu += Xcpu * Zrdop;
   Ucrd = PDQ_GetUtilization(cname, wname, TERM) * Wrdop;

   Pcpu *= 1.88;

   if (WBACK) {
      namex(i, WROP, wname);
      Ucwr = PDQ_GetUtilization(cname, wname, TERM) * Wwbop;
      namex(i, INVL, wname);
      Ucin = PDQ_GetUtilization(cname, wname, TERM) * Winvl;
   } else {			/* write-thru */
      namex(i, WROP, wname);
      Ucwr = PDQ_GetUtilization(cname, wname, TERM) * Wwthr;
   }

   printf("\n**** %s Results ****\n", model);
   printf("PDQ nodes: %d  PDQ streams: %d\n", PDQ_GetNodesCount(), PDQ_GetStreamsCount());
   printf("Memory Mode: %s\n", WBACK ? "WriteBack" : "WriteThru");
   printf("Ncpu:  %2d\n", MAXCPU);
   printf("Nrwht: %5.2f (N:%2d  W:%5.2f)\n",
	  Nrwht, intwt(Nrwht, &Wrwht), Wrwht);
   printf("Nrdop: %5.2f (N:%2d  W:%5.2f)\n",
	  Nrdop, intwt(Nrdop, &Wrdop), Wrdop);

   if (WBACK) {
      printf("Nwbop: %5.2f (N:%2d  W:%5.2f)\n",
	     Nwbop, intwt(Nwbop, &Wwbop), Wwbop);
      printf("Ninvl: %5.2f (N:%2d  W:%5.2f)\n",
	     Ninvl, intwt(Ninvl, &Winvl), Winvl);
   } else {
      printf("Nwthr: %5.2f (N:%2d  W:%5.2f)\n",
	     Nwthr, intwt(Nwthr, &Wwthr), Wwthr);
   }

   printf("\n");
   printf("Hit Ratio:   %5.2f %%\n", HT * 100.0);
   printf("Read Miss:   %5.2f %%\n", RD * (1 - HT) * 100.0);
   printf("WriteMiss:   %5.2f %%\n", WR * (1 - HT) * 100.0);
   printf("Ucpu:        %5.2f %%\n", Pcpu * 100.0 / MAXCPU);
   printf("Pcpu:        %5.2f\n", Pcpu);
   printf("\n");
   printf("Ubus[reads]: %5.2f %%\n", Ubrd * 100.0);
   printf("Ubus[write]: %5.2f %%\n", Ubwr * 100.0);
   printf("Ubus[inval]: %5.2f %%\n", Ubin * 100.0);
   printf("Ubus[total]: %5.2f %%\n", (Ubrd + Ubwr + Ubin) * 100.0);
   printf("\n");
   printf("Uca%d[hits]:  %5.2f %%\n", i, Ucht * 100.0);
   printf("Uca%d[reads]: %5.2f %%\n", i, Ucrd * 100.0);
   printf("Uca%d[write]: %5.2f %%\n", i, Ucwr * 100.0);
   printf("Uca%d[inval]: %5.2f %%\n", i, Ucin * 100.0);
   printf("Uca%d[total]: %5.2f %%\n", i, (Ucht + Ucrd + Ucwr + Ucin) * 100.0);

}
Ejemplo n.º 3
0
int main(void)
{
   extern int      nodes, streams;
   extern JOB_TYPE *job;
   extern NODE_TYPE *node;
   extern char     s1[];
   char            transCD[MAXCHARS], transRQ[MAXCHARS], transSU[MAXCHARS];
   char            dummyCD[MAXCHARS], dummyRQ[MAXCHARS], dummySU[MAXCHARS];
   char            nodePC[MAXCHARS], nodeFS[MAXCHARS], nodeGW[MAXCHARS];
   char            nodeMF[MAXCHARS], nodeTR[MAXCHARS];
   double          demand[MAXPROC][MAXDEV], util[MAXDEV], udsk[MAXDEV],
                   udasd[MAXDEV], RTexpect[MAXPROC];
   double          fsd, RTmean, ulan, ufs, uws, ugw, umf;
   int             work, dev, i, j;

   /*
   Disk-array data structures probably should go into PDQ_Build.c one
   day.
   */

   devarray_type  *FDarray;
   devarray_type  *MDarray;

   if ((FDarray = (devarray_type *) calloc(sizeof(devarray_type), 10)) == NULL)
      errmsg("", "FDarray allocation failed!\n");

   if ((MDarray = (devarray_type *) calloc(sizeof(devarray_type), 10)) == NULL)
      errmsg("", "MDarray allocation failed!\n");

   for (i = 0; i < FS_DISKS; i++) {
      FDarray[i].id = FD + i;
      resets(s1);
      sprintf(s1, "FSDK%d", i);
      strcpy(FDarray[i].label, s1);
   }

   for (i = 0; i < MF_DISKS; i++) {
      MDarray[i].id = MD + i;
      resets(s1);
      sprintf(s1, "MFDK%d", i);
      strcpy(MDarray[i].label, s1);
   }

   /*
   CPU service times are calculated from instruction counts tabulated
   in original 1993 CMG paper.
   */

   demand[CD_Req][PC] = 200 * k / PC_MIPS;
   demand[CD_Rpy][PC] = 100 * k / PC_MIPS;
   demand[RQ_Req][PC] = 150 * k / PC_MIPS;
   demand[RQ_Rpy][PC] = 200 * k / PC_MIPS;
   demand[SU_Req][PC] = 300 * k / PC_MIPS;
   demand[SU_Rpy][PC] = 300 * k / PC_MIPS;
   demand[Req_CD][FS] = 50 * k / FS_MIPS;
   demand[Req_RQ][FS] = 70 * k / FS_MIPS;
   demand[Req_SU][FS] = 10 * k / FS_MIPS;
   demand[CD_Msg][FS] = 35 * k / FS_MIPS;
   demand[RQ_Msg][FS] = 35 * k / FS_MIPS;
   demand[SU_Msg][FS] = 35 * k / FS_MIPS;
   demand[GT_Snd][GW] = 50 * k / GW_MIPS;
   demand[GT_Rcv][GW] = 50 * k / GW_MIPS;
   demand[MF_CD][MF] = 50 * k / MF_MIPS;
   demand[MF_RQ][MF] = 150 * k / MF_MIPS;
   demand[MF_SU][MF] = 20 * k / MF_MIPS;

   /*
    Service time on the LAN to send and recv packets from any of the PC
    desktop, the file server or the SNA gateway.
    8 bits per Byte.
    */
   
   demand[LAN_TX][PC] = (double) TR_Bytes * 8 / TR_Mbps;
   demand[LAN_TX][FS] = (double) TR_Bytes * 8 / TR_Mbps;
   demand[LAN_TX][GW] = (double) TR_Bytes * 8 / TR_Mbps;

   /*
    * File server disk IOs = number of accesses x caching / (max IOs / Sec)
    */

   for (i = 0; i < FS_DISKS; i++) {
      demand[Req_CD][FDarray[i].id] = (1.0 * 0.5 / 128.9) / FS_DISKS;
      demand[Req_RQ][FDarray[i].id] = (1.5 * 0.5 / 128.9) / FS_DISKS;
      demand[Req_SU][FDarray[i].id] = (0.2 * 0.5 / 128.9) / FS_DISKS;
      demand[CD_Msg][FDarray[i].id] = (1.0 * 0.5 / 128.9) / FS_DISKS;
      demand[RQ_Msg][FDarray[i].id] = (1.5 * 0.5 / 128.9) / FS_DISKS;
      demand[SU_Msg][FDarray[i].id] = (0.5 * 0.5 / 128.9) / FS_DISKS;
   }


   /* Mainframe DASD IOs = (#accesses / (max IOs/Sec)) / #disks */
   for (i = 0; i < MF_DISKS; i++) {
      demand[MF_CD][MDarray[i].id] = (2.0 / 60.24) / MF_DISKS;
      demand[MF_RQ][MDarray[i].id] = (4.0 / 60.24) / MF_DISKS;
      demand[MF_SU][MDarray[i].id] = (1.0 / 60.24) / MF_DISKS;
   }

   /* Now, start building the PDQ model... */

   PDQ_Init(scenario);

   /* Define physical resources as PDQ queueing nodes. */
   strcpy(nodePC, "PCDESK");
   strcpy(nodeFS, "FSERVR");
   strcpy(nodeGW, "GATWAY");
   strcpy(nodeMF, "MFRAME");
   strcpy(nodeTR, "TRLAN");

   PDQ_CreateNode(nodePC, CEN, FCFS);
   PDQ_CreateNode(nodeFS, CEN, FCFS);
   PDQ_CreateNode(nodeGW, CEN, FCFS);
   PDQ_CreateNode(nodeMF, CEN, FCFS);

   for (i = 0; i < FS_DISKS; i++) {
      PDQ_CreateNode(FDarray[i].label, CEN, FCFS);
   }

   for (i = 0; i < MF_DISKS; i++) {
      PDQ_CreateNode(MDarray[i].label, CEN, FCFS);
   }

   /*
    * NOTE: Although the token ring LAN is a passive computational device, it
    * is treated as a separate node so as to agree with the results presented
    * in the original CMG 1993 paper.
    */

   PDQ_CreateNode(nodeTR, CEN, FCFS);

   /*
    * Because the desktop PCs are all of the same type and emitting the same
    * homogeneous transaction workload, the focus can be placed on the
    * response time performance of a single PC workstation and generalized to
    * the others. Rather than having N * 3 workload streams or classes, we
    * simply model 2 PC desktops: the "real" one of interest and a dummy PC
    * representing the remaining (N-1) * 3 streams.
    */

   strcpy(transCD, "CatDisplay");
   strcpy(transRQ, "RemotQuote");
   strcpy(transSU, "StatUpdate");

   /* Aggregate transactions */
   strcpy(dummyCD, "CatDispAgg");
   strcpy(dummyRQ, "RemQuotAgg");
   strcpy(dummySU, "StatUpdAgg");

   PDQ_CreateOpen(transCD, 1 * 4.0 * TPS);
   PDQ_CreateOpen(transRQ, 1 * 8.0 * TPS);
   PDQ_CreateOpen(transSU, 1 * 1.0 * TPS);
   PDQ_CreateOpen(dummyCD, (USERS - 1) * 4.0 * TPS);
   PDQ_CreateOpen(dummyRQ, (USERS - 1) * 8.0 * TPS);
   PDQ_CreateOpen(dummySU, (USERS - 1) * 1.0 * TPS);

   /*
   Define the service demands on each physical resource.
   CD request + reply chain from workflow diagram
   Note that only the "real" PC demand is defined, and the aggregated (N-1) PCs.
   */

   /******************* RQ request + reply chain ... *******************/
   PDQ_SetDemand(nodePC, transCD, demand[CD_Req][PC] + (5 * demand[CD_Rpy][PC]));
   PDQ_SetDemand(nodeFS, transCD, demand[Req_CD][FS] + (5 * demand[CD_Msg][FS]));
   PDQ_SetDemand(nodeFS, dummyCD, demand[Req_CD][FS] + (5 * demand[CD_Msg][FS]));
   PDQ_SetDemand(nodeGW, transCD, demand[GT_Snd][GW] + (5 * demand[GT_Rcv][GW]));
   PDQ_SetDemand(nodeGW, dummyCD, demand[GT_Snd][GW] + (5 * demand[GT_Rcv][GW]));
   PDQ_SetDemand(nodeMF, transCD, demand[MF_CD][MF]);
   PDQ_SetDemand(nodeMF, dummyCD, demand[MF_CD][MF]);
   
   for (i = 0; i < FS_DISKS; i++) {
      fsd = demand[Req_CD][FDarray[i].id] + (5 * demand[CD_Msg][FDarray[i].id]);
      PDQ_SetDemand(FDarray[i].label, transCD, fsd);
      PDQ_SetDemand(FDarray[i].label, dummyCD, fsd);
   }

   for (i = 0; i < MF_DISKS; i++) {
      PDQ_SetDemand(MDarray[i].label, transCD, demand[MF_CD][MDarray[i].id]);
      PDQ_SetDemand(MDarray[i].label, dummyCD, demand[MF_CD][MDarray[i].id]);
   }


   /*
   NOTE:Synchronous process execution causes data for the CD transaction to
   cross the LAN 12 times as depicted in the following parameterization of
   PDQ_SetDemand.
   */

   PDQ_SetDemand(nodeTR, transCD,
		 (1 * demand[LAN_TX][PC]) +
		 (1 * demand[LAN_TX][FS]) +
		 (1 * demand[LAN_TX][GW]) +
		 (5 * demand[LAN_TX][GW]) +
		 (5 * demand[LAN_TX][FS]) +
		 (5 * demand[LAN_TX][PC]));

   PDQ_SetDemand(nodeTR, dummyCD,
		 (1 * demand[LAN_TX][PC]) +
		 (1 * demand[LAN_TX][FS]) +
		 (1 * demand[LAN_TX][GW]) +
		 (5 * demand[LAN_TX][GW]) +
		 (5 * demand[LAN_TX][FS]) +
		 (5 * demand[LAN_TX][PC]));



   /******************* RQ request + reply chain ... *******************/
   PDQ_SetDemand(nodePC, transRQ, demand[RQ_Req][PC] + (3 * demand[RQ_Rpy][PC]));
   PDQ_SetDemand(nodeFS, transRQ, demand[Req_RQ][FS] + (3 * demand[RQ_Msg][FS]));
   PDQ_SetDemand(nodeFS, dummyRQ, demand[Req_RQ][FS] + (3 * demand[RQ_Msg][FS]));

   for (i = 0; i < FS_DISKS; i++) {
      PDQ_SetDemand(FDarray[i].label, transRQ,
		    demand[Req_RQ][FDarray[i].id] +
		    (3 * demand[RQ_Msg][FDarray[i].id]));
      PDQ_SetDemand(FDarray[i].label, dummyRQ,
		    demand[Req_RQ][FDarray[i].id] +
		    (3 * demand[RQ_Msg][FDarray[i].id]));
   }

   PDQ_SetDemand(nodeGW, transRQ, demand[GT_Snd][GW] + (3 * demand[GT_Rcv][GW]));
   PDQ_SetDemand(nodeGW, dummyRQ, demand[GT_Snd][GW] + (3 * demand[GT_Rcv][GW]));
   PDQ_SetDemand(nodeMF, transRQ, demand[MF_RQ][MF]);
   PDQ_SetDemand(nodeMF, dummyRQ, demand[MF_RQ][MF]);

   for (i = 0; i < MF_DISKS; i++) {
      PDQ_SetDemand(MDarray[i].label, transRQ,
		    demand[MF_RQ][MDarray[i].id]);
      PDQ_SetDemand(MDarray[i].label, dummyRQ,
		    demand[MF_RQ][MDarray[i].id]);
   }

   PDQ_SetDemand(nodeTR, transRQ,
		 (1 * demand[LAN_TX][PC]) +
		 (1 * demand[LAN_TX][FS]) +
		 (1 * demand[LAN_TX][GW]) +
		 (3 * demand[LAN_TX][GW]) +
		 (3 * demand[LAN_TX][FS]) +
		 (3 * demand[LAN_TX][PC]));
   PDQ_SetDemand(nodeTR, dummyRQ,
		 (1 * demand[LAN_TX][PC]) +
		 (1 * demand[LAN_TX][FS]) +
		 (1 * demand[LAN_TX][GW]) +
		 (3 * demand[LAN_TX][GW]) +
		 (3 * demand[LAN_TX][FS]) +
		 (3 * demand[LAN_TX][PC]));




   /******************* SU request + reply chain *******************/
   PDQ_SetDemand(nodePC, transSU, demand[SU_Req][PC] + demand[SU_Rpy][PC]);
   PDQ_SetDemand(nodeFS, transSU, demand[Req_SU][FS] + demand[SU_Msg][FS]);
   PDQ_SetDemand(nodeFS, dummySU, demand[Req_SU][FS] + demand[SU_Msg][FS]);

   for (i = 0; i < FS_DISKS; i++) {
      PDQ_SetDemand(FDarray[i].label, transSU,
		    demand[Req_SU][FDarray[i].id] +
		    demand[SU_Msg][FDarray[i].id]);
      PDQ_SetDemand(FDarray[i].label, dummySU,
		    demand[Req_SU][FDarray[i].id] +
		    demand[SU_Msg][FDarray[i].id]);
   }

   PDQ_SetDemand(nodeGW, transSU, demand[GT_Snd][GW] + demand[GT_Rcv][GW]);
   PDQ_SetDemand(nodeGW, dummySU, demand[GT_Snd][GW] + demand[GT_Rcv][GW]);
   PDQ_SetDemand(nodeMF, transSU, demand[MF_SU][MF]);
   PDQ_SetDemand(nodeMF, dummySU, demand[MF_SU][MF]);

   for (i = 0; i < MF_DISKS; i++) {
      PDQ_SetDemand(MDarray[i].label, transSU,
		    demand[MF_SU][MDarray[i].id]);
      PDQ_SetDemand(MDarray[i].label, dummySU,
		    demand[MF_SU][MDarray[i].id]);
   }

   PDQ_SetDemand(nodeTR, transSU,
		 (1 * demand[LAN_TX][PC]) +
		 (1 * demand[LAN_TX][FS]) +
		 (1 * demand[LAN_TX][GW]) +
		 (1 * demand[LAN_TX][GW]) +
		 (1 * demand[LAN_TX][FS]) +
		 (1 * demand[LAN_TX][PC]));
   PDQ_SetDemand(nodeTR, dummySU,
		 (1 * demand[LAN_TX][PC]) +
		 (1 * demand[LAN_TX][FS]) +
		 (1 * demand[LAN_TX][GW]) +
		 (1 * demand[LAN_TX][GW]) +
		 (1 * demand[LAN_TX][FS]) +
		 (1 * demand[LAN_TX][PC]));

   PDQ_SetDebug(FALSE);
   PDQ_SetWUnit("Trans");
   PDQ_Solve(CANON);
   if (PRINT_REPORT) {
      PDQ_Report();
   }
   
   /*
    Break out each tx response time together with resource utilizations.
    The order of print out is the same as the 1993 CMG paper.
    */

   /* Mean response times reported in the CMG93 paper */
   RTexpect[0] = 0.2754;
   RTexpect[1] = 0.2625;
   RTexpect[2] = 0.1252;
   RTexpect[3] = 0.2624;
   RTexpect[4] = 0.2470;
   RTexpect[5] = 0.1120;

   printf("*** Metric breakout for \"%s\" with %d clients ***\n\n",
	  scenario, USERS);
   printf("Transaction\t    R (Sec)\t  CMG paper\n");
   printf("-----------\t    -------\t  ---------\n");

   for (work = 0; work < streams; work++) {
      resets(s1);
      strcpy(s1, job[work].trans->name);
      RTmean = PDQ_GetResponse(TRANS, s1);
      printf("%-15s\t%10.4f\t%10.4f\n", s1, RTmean, RTexpect[work]);
   }

   printf("\n\n");

   /*
    * Get node utilizations. This is a bit of a hack and should be written as
    * a subroutine.
    */
   for (dev = 0; dev < nodes; dev++) {
      util[dev] = 0.0;		/* reset array */
      for (work = 0; work < streams; work++) {
          util[dev] += 100 * PDQ_GetUtilization(node[dev].devname, job[work].trans->name, TRANS);
      }
   }

   for (dev = 0; dev < nodes; dev++) {
       for (i = 0; i < MF_DISKS; i++) {
           if (strcmp(node[dev].devname, MDarray[i].label) == 0) {
               udasd[i] = util[dev];
           }
       }
       for (i = 0; i < FS_DISKS; i++) {
           if (strcmp(node[dev].devname, FDarray[i].label) == 0) {
               udsk[i] = util[dev];
           }
       }       
       if (strcmp(node[dev].devname, nodePC) == 0) {
           uws = util[dev];
       }
       if (strcmp(node[dev].devname, nodeGW) == 0) {
           ugw = util[dev];
       }
       if (strcmp(node[dev].devname, nodeFS) == 0) {
           ufs = util[dev];
       }
       if (strcmp(node[dev].devname, nodeMF) == 0) {
           umf = util[dev];
       }
       if (strcmp(node[dev].devname, nodeTR) == 0) {
           ulan = util[dev];
       }
   }
   
   printf("PDQ Node       \t    %% Busy\t  CMG paper\n");
   printf("--------       \t    -------\t  ---------\n");
   printf("%-15s\t%10.4f\t%10.4f\n", "Token ring", ulan, 49.3333);
   printf("%-15s\t%10.4f\t%10.4f\n", "PC Desktop", uws,  0.5802);
   printf("%-15s\t%10.4f\t%10.4f\n", "File server", ufs, 11.9157);
   printf("%-15s\t%10.4f\t%10.4f\n", "Gateway CPU", ugw, 60.4167);
   printf("%-15s\t%10.4f\t%10.4f\n", "Mainframe", umf,   14.0873);


   for (i = 0; i < FS_DISKS; i++) {
      printf("%s%d\t%10.4f\t%10.4f\n", "FS disks",
	     FDarray[i].id, udsk[i], 59.0028);
   }

   for (i = 0; i < MF_DISKS; i++) {
      printf("%s%d\t%10.4f\t%10.4f\n", "DASD disk",
	     MDarray[i].id, udasd[i], 35.5502);
   }

}				/* main */
Ejemplo n.º 4
0
int main(void) {

	int              nodes;
	int              streams;
	
	// Mean service times from G&H
	double           stimeSelect 		=  0.5; 	// mins 
	double           stimeClaims 		=  6.0; 	// mins 
	double           stimePolicy 		= 20.0; 	// mins 

	double           callRateIncoming 	=  35.0/60;	// per min 	
	double           callRateClaim;		// compute from traffic eqns below
	double           callRatePolicy; 	// compute from traffic eqns below
	
	// Routing probabilities from G&H
	double           routeSelectClaim 	= 0.55;
	double           routeSelectPolicy 	= 0.45;
	double           routePolicyClaim 	= 0.01;
	double           routeClaimPolicy 	= 0.02;
	
	// Visit ratios: lambda_k / lambda 
	double           vSelect;   	// no branching
	double           vClaims;		// compute from traffic eqns below
	double           vPolicy;		// compute from traffic eqns below


	/*** Solve the traffic equations first ***/
	
	callRateClaim	= 
		(routeSelectClaim + routePolicyClaim * routeSelectPolicy) * callRateIncoming / 
		(1 - routePolicyClaim * routeSelectPolicy);
	
	callRatePolicy	= routeSelectPolicy * callRateIncoming + 
		routeClaimPolicy * callRateClaim;
	
	vSelect = 1.0;	// no branching
	vClaims = callRateClaim / callRateIncoming;
	vPolicy = callRatePolicy / callRateIncoming;


	/*** Now setup and solve the PDQ model using these visit ratios ***/	
	
	PDQ_Init("G&H Example 4.2");
	
	streams = PDQ_CreateOpen("Customers", callRateIncoming);	
	PDQ_SetWUnit("Calls");
	PDQ_SetTUnit("Mins");	// timebase for PDQ report
	

	// Use a standard PDQ node as a test case
	nodes = PDQ_CreateNode("Select", CEN, FCFS); 
	
	// Multiserver nodes
	nodes = PDQ_CreateMultiNode(3, "Claims", CEN, FCFS); 
	nodes = PDQ_CreateMultiNode(7, "Policy", CEN, FCFS);
	
	// In PDQ the computed visit ratios multiply the service times
	PDQ_SetDemand("Select", "Customers", vSelect * stimeSelect);
	PDQ_SetDemand("Claims", "Customers", vClaims * stimeClaims); 
	PDQ_SetDemand("Policy", "Customers", vPolicy * stimePolicy); 

	PDQ_Solve(CANON);
	PDQ_Report();
   
}  // main