Example #1
0
void GpuRule::generateCallCode(const std::string& name,
                        Transform& trans,
                        CodeGenerator& o,
                        const SimpleRegionPtr& region,
                        RuleFlavor flavor,
		        bool,
                        std::vector<RegionNodeGroup>& regionNodesGroups,
                        int nodeID,
                        int gpuCopyOut,
                        SpatialCallType)
{
  o.comment("from GpuRule::generateCallCode():");
  switch(flavor) {
  case RuleFlavor::SEQUENTIAL:
    o.callSpatial(_rule->trampcodename(trans)+TX_OPENCL_POSTFIX, region);
    break;
  case RuleFlavor::WORKSTEALING:
    o.mkCreateGpuSpatialMethodCallTask(trans.name(), name, trans.instClassName() + "_workstealing", _rule->trampcodename(trans), region, regionNodesGroups, nodeID, gpuCopyOut, _rule->getToRegions(), _rule->isDivisible());
    break;
  case RuleFlavor::DISTRIBUTED:
    o.comment("flavor distributed");
    UNIMPLEMENTED();
    break;
  default:
    UNIMPLEMENTED();
  }
}
Example #2
0
void petabricks::IterationDefinition::unpackargs(CodeGenerator& o) const {
  o.comment("Define iteration bounds");
  for(size_t i=0; i<_var.size(); ++i){
    o.write("const IndexT "+_begin[i]->toString()+" = "COORD_BEGIN_STR"["+jalib::XToString(i)+"];");
    o.write("const IndexT "+_end[i]->toString()+" = "COORD_END_STR"["+jalib::XToString(i)+"];");
  }
}
Example #3
0
void petabricks::IterationDefinition::genScratchRegionLoopBegin(CodeGenerator& o){
  if(isSingleCall()){
    genLoopBegin(o);
  }else{
    o.comment("Iterate along all the directions");

    // Compute size
    for(size_t i=0; i<_size.size(); ++i){
      o.write("int " + _size[i]->toString() + " = " + _end[i]->toString() +
              " - " + _begin[i]->toString() + ";");
    }

    for(size_t i=0; i<_var.size(); ++i){
      FormulaPtr b= new FormulaLiteral<int>(0);
      FormulaPtr e=_size[i];
      FormulaPtr s=_step[i];
      FormulaPtr v=_var[i];
      //TODO: expand to reorder dimensions
      if(_order.canIterateForward(i) || !_order.canIterateBackward(i)){
        JWARNING(_order.canIterateForward(i))(_order).Text("couldn't find valid iteration order, assuming forward");
        o.beginFor(v->toString(), b, e, s);
      } else {
        o.beginReverseFor(v->toString(), b, e, s);
      }
    }
  }
}
Example #4
0
void petabricks::IterationDefinition::genLoopBegin(CodeGenerator& o){
  if(isSingleCall()){
    o.write("{");
    for(size_t i=0; i<_var.size(); ++i){
      o.varDecl("const IndexT "+_var[i]->toString()+" = "+_begin[i]->toString());
    }
  }else{
    o.comment("Iterate along all the directions");
    for(size_t i=0; i<_var.size(); ++i){
      FormulaPtr b=_begin[i];
      FormulaPtr e=_end[i];
      FormulaPtr s=_step[i];
      FormulaPtr v=_var[i];
      //TODO: expand to reorder dimensions
      if(_order.canIterateForward(i) || !_order.canIterateBackward(i)){
        JWARNING(_order.canIterateForward(i))(_order).Text("couldn't find valid iteration order, assuming forward");
        o.beginFor(v->toString(), b, e, s);
      } else {
        o.beginReverseFor(v->toString(), b, e, s);
      }
    }
  }
}
Example #5
0
void
GpuRule::generateCallTaskCode(const std::string& name, Transform& trans, CodeGenerator& o, const SimpleRegionPtr& region)
{
  o.comment( "GENERATECALLTASKCODE" );
  o.mkSpatialTask(name, trans.instClassName(), codename(), region, RuleFlavor::WORKSTEALING);
}
Example #6
0
void
GpuRule::generateCallCodeSimple(Transform& /*trans*/, CodeGenerator& o, const SimpleRegionPtr& region)
{
  o.comment( "GENERATECALLCODESIMPLE" );
  o.callSpatial(codename(), region);
}
Example #7
0
void petabricks::CodeGenerator::mkCreateGpuSpatialMethodCallTask(
    const std::string& transname,
    const std::string& taskname, 
    const std::string& objname, 
    const std::string& methodname, 
    const SimpleRegion& region, 
    std::vector<RegionNodeGroup>& regionNodesGroups, 
    int nodeID, 
    int gpuCopyOut, 
    RegionList to, 
    bool divisible) {
  std::string taskclass
;
  int dim_int = region.totalDimensions();
  std::string lastdim = jalib::XToString(dim_int - 1);
  std::string max = region.maxCoord()[dim_int - 1]->toString();
  std::string min = region.minCoord()[dim_int - 1]->toString();

  if(!divisible) {
    taskclass = "petabricks::CreateGpuSpatialMethodCallTask<"+objname
      + ", " + jalib::XToString(region.totalDimensions())
      + ", &" + objname + "::" + methodname + TX_OPENCL_POSTFIX + "_createtasks"
      + ">";
    //beginIf(min+"<"+max);
    comment("MARKER 6");
    write("IndexT _tmp_begin[] = {" + region.getIterationLowerBounds() + "};");
    write("IndexT _tmp_end[] = {"   + region.getIterationUpperBounds() + "};");
    write("RegionNodeGroupMapPtr groups = new RegionNodeGroupMap();");
    for(std::vector<RegionNodeGroup>::iterator group = regionNodesGroups.begin(); group != regionNodesGroups.end(); ++group){
      write("{");
      incIndent();
      write("std::set<int> ids;");
      for(std::vector<int>::iterator id = group->nodeIDs().begin(); id != group->nodeIDs().end(); ++id){
	write("ids.insert("+jalib::XToString(*id)+");");
      }
      write("groups->insert(RegionNodeGroup(\""+group->matrixName()+"\",ids));");
      decIndent();
      write("}");
    }
    write(taskname+" = new "+taskclass+"(this,_tmp_begin, _tmp_end, "+jalib::XToString(nodeID)+", groups, "+jalib::XToString(gpuCopyOut)+");");
    //endIf();

    return;
  }

  std::string n_div = "cont_" + jalib::XToString(_contCounter++);
  write(taskname + " = new petabricks::MethodCallTask<"+_curClass+", &"+_curClass+"::"+n_div+">( this );");

  // Add divider function
  CodeGenerator helper = forkhelper();
  helper.beginFunc("DynamicTaskPtr", n_div);
  helper.write("DynamicTaskPtr _fini = new NullDynamicTask();");

  // Assign the gpu-cpu division point.
  helper.write("ElementT gpu_ratio = "+transname+"_gpuratio/8.0;");

  std::string div = "div";
  RegionPtr proxy = to.front();
  helper.write("IndexT totalRow = "+proxy->matrix()->name()+".size("+jalib::XToString(dim_int - 1)+");");
  helper.write("IndexT div = ceil(gpu_ratio * totalRow);");
  helper.beginIf("div > " + max);
  helper.write("div = "+max+";");
  helper.endIf();

  // GPU

  taskclass = "petabricks::CreateGpuSpatialMethodCallTask<"+objname
              + ", " + jalib::XToString(dim_int)
              + ", &" + objname + "::" + methodname + TX_OPENCL_POSTFIX + "_createtasks"
              + ">";
  helper.comment("MARKER 6");
  
  //helper.beginIf(min+" < div");
  helper.write("IndexT _gpu_begin[] = {" + region.getIterationLowerBounds() + "};");
  helper.write("IndexT _gpu_end[] = {" + region.getIterationMiddleEnd(div) + "};");
  helper.write("RegionNodeGroupMapPtr groups = new RegionNodeGroupMap();");

  for(std::vector<RegionNodeGroup>::iterator group = regionNodesGroups.begin(); group != regionNodesGroups.end(); ++group){
    helper.write("{");
    helper.incIndent();
    helper.write("std::set<int> ids;");
    for(std::vector<int>::iterator id = group->nodeIDs().begin(); id != group->nodeIDs().end(); ++id){
      helper.write("ids.insert("+jalib::XToString(*id)+");");
    }
    helper.write("groups->insert(RegionNodeGroup(\""+group->matrixName()+"\",ids));");
    helper.decIndent();
    helper.write("}");
  }

  helper.write("DynamicTaskPtr gpu_task = new "+taskclass+"(this,_gpu_begin, _gpu_end, "+jalib::XToString(nodeID)+", groups, "+jalib::XToString(gpuCopyOut)+");");
  helper.write("gpu_task->enqueue();");
  helper.write("_fini->dependsOn(gpu_task);");
  //helper.endIf();

  // CPU
  taskclass = "petabricks::SpatialMethodCallTask<CLASS"
              ", " + jalib::XToString(dim_int)
              + ", &CLASS::" + methodname + "_workstealing_wrap"
    //+ ", &CLASS::" + methodname + "_workstealing"
              + ">";
  helper.beginIf("div < " + max);
  helper.beginIf("div < " + min);
  helper.write("div = "+min+";");
  helper.endIf();
  helper.comment("MARKER 6");
  helper.write("IndexT _cpu_begin[] = {" + region.getIterationMiddleBegin(div) + "};");
  helper.write("IndexT _cpu_end[] = {"   + region.getIterationUpperBounds() + "};");
  helper.write("DynamicTaskPtr cpu_task = new "+taskclass+"(this,_cpu_begin, _cpu_end);");
  helper.write("cpu_task->enqueue();");
  helper.write("_fini->dependsOn(cpu_task);");
  helper.endIf();
  
  helper.write("return _fini;");
  helper.endFunc();
}