int TSch::AddTask( const TStrV& DimObjNmV, const TSecTm& StartTm, const TSecTm& EndTm){ IAssert(DimObjNmV.Len()==GetDims()); int TaskId=GetNewTaskId(); PSchTask Task=PSchTask(new TSchTask(TaskId, DimObjNmV, StartTm, EndTm)); IdToTaskH.AddDat(TaskId, Task); for (int DimN=0; DimN<GetDims(); DimN++){ DimV[DimN]->AddTask(Task);} return TaskId; }
bool TSch::IsTaskOk(const PSchTask& Task, TSchTaskV& DimIcTaskV) const { DimIcTaskV.Gen(GetDims(), GetDims()); bool Ok=true; for (int DimN=0; DimN<GetDims(); DimN++){ PSchTask IcTask; if (!DimV[DimN]->IsTaskOk(Task, IcTask)){ DimIcTaskV[DimN]=IcTask; Ok=false; } } return Ok; }
void DenseMat<CPU, Dtype>::SparseMM(SparseMat<CPU, Dtype>& A, DenseMat<CPU, Dtype>& B, Trans transa, Trans transb, Dtype alpha, Dtype beta) { assert(transb == Trans::N); size_t m, n, k; GetDims(A.rows, A.cols, transa, B.rows, B.cols, transb, m, n, k); Resize(m, n); MKLHelper_CSRMM(CPU_CharT(transa), A.rows, this->cols, A.cols, alpha, (char*)"GLNC", A.data->val, A.data->col_idx, A.data->ptr, A.data->ptr + 1, B.data, B.cols, beta, data, this->cols); }
void DenseMat<CPU, Dtype>::GeMM(DenseMat<CPU, Dtype>& A, DenseMat<CPU, Dtype>& B, Trans transa, Trans transb, Dtype alpha, Dtype beta) { size_t m, n, k; GetDims(A.rows, A.cols, transa, B.rows, B.cols, transb, m, n, k); Resize(m, n); MKLHelper_GeMM(CblasRowMajor, CPU_T(transa), CPU_T(transb), m, n, k, alpha, A.data, A.cols, B.data, B.cols, beta, data, this->cols); }
// get tensor shape of the slice referenced by a given FrameRange // Important: This shape does carry offset and stride; it's not just dimensions. TensorShape ComputationNodeBase::GetTensorSliceFor(size_t rank, const FrameRange& fr) const { // form the actual tensor that describes the full object // Note: This may have strides. auto tensorShape = GetTensorShape(rank); // determine the slice dimensions described by the FrameRange // Note: These are dimensions without strides. let slice = TensorSliceWithMBLayoutFor(tensorShape.GetDims(), fr, GetMBLayout()); // narrow the tensor // Note: Strides are honored correctly. tensorShape.NarrowTo(slice); return tensorShape; }
void TSch::DelTask(const int& TaskId){ PSchTask Task=IdToTaskH.GetDat(TaskId); for (int DimN=0; DimN<GetDims(); DimN++){ DimV[DimN]->DelTask(Task);} }
int TSch::GetDimN(const TStr& DimNm) const { for (int DimN=0; DimN<GetDims(); DimN++){ if (GetDimNm(DimN)==DimNm){return DimN;}} return -1; }
TStrV TSch::GetDimNmV() const { TStrV DimNmV(GetDims(), 0); for (int DimN=0; DimN<GetDims(); DimN++){ DimNmV.Add(GetDimNm(DimN));} return DimNmV; }