bool_t TMnet_findDescendants (al_t* lock, net_t* netPtr, long id, bitmap_t* descendantBitmapPtr, queue_t* workQueuePtr) { bool_t status; vector_t* nodeVectorPtr = LocalLoad(&netPtr->nodeVectorPtr); assert(LocalLoad(&descendantBitmapPtr->numBit) == vector_getSize(nodeVectorPtr)); PBITMAP_CLEARALL(descendantBitmapPtr); PQUEUE_CLEAR(workQueuePtr); { net_node_t* nodePtr = (net_node_t*)vector_at(nodeVectorPtr, id); list_t* childIdListPtr = LocalLoad(&nodePtr->childIdListPtr); list_iter_t it; TMLIST_ITER_RESET(lock, &it, childIdListPtr); while (TMLIST_ITER_HASNEXT(lock, &it, childIdListPtr)) { long childId = (long)TMLIST_ITER_NEXT(lock, &it, childIdListPtr); status = PBITMAP_SET(descendantBitmapPtr, childId); assert(status); status = PQUEUE_PUSH(workQueuePtr, (void*)childId); assert(status); } } while (!PQUEUE_ISEMPTY(workQueuePtr)) { long childId = (long)PQUEUE_POP(workQueuePtr); if (childId == id) { queue_clear(workQueuePtr); return FALSE; } net_node_t* nodePtr = (net_node_t*)vector_at(nodeVectorPtr, childId); list_t* grandChildIdListPtr = LocalLoad(&nodePtr->childIdListPtr); list_iter_t it; TMLIST_ITER_RESET(lock, &it, grandChildIdListPtr); while (TMLIST_ITER_HASNEXT(lock, &it, grandChildIdListPtr)) { long grandChildId = (long)TMLIST_ITER_NEXT(lock, &it, grandChildIdListPtr); if (!PBITMAP_ISSET(descendantBitmapPtr, grandChildId)) { status = PBITMAP_SET(descendantBitmapPtr, grandChildId); assert(status); status = PQUEUE_PUSH(workQueuePtr, (void*)grandChildId); assert(status); } } } return TRUE; }
bool_t TMnet_findAncestors (al_t* lock, net_t* netPtr, long id, bitmap_t* ancestorBitmapPtr, queue_t* workQueuePtr) { bool_t status; vector_t* nodeVectorPtr = LocalLoad(&netPtr->nodeVectorPtr); assert(LocalLoad(&ancestorBitmapPtr->numBit) == vector_getSize(nodeVectorPtr)); PBITMAP_CLEARALL(ancestorBitmapPtr); PQUEUE_CLEAR(workQueuePtr); { net_node_t* nodePtr = (net_node_t*)vector_at(nodeVectorPtr, id); list_t* parentIdListPtr = LocalLoad(&nodePtr->parentIdListPtr); list_iter_t it; TMLIST_ITER_RESET(lock, &it, parentIdListPtr); while (TMLIST_ITER_HASNEXT(lock, &it, parentIdListPtr)) { long parentId = (long)TMLIST_ITER_NEXT(lock, &it, parentIdListPtr); status = PBITMAP_SET(ancestorBitmapPtr, parentId); assert(status); status = PQUEUE_PUSH(workQueuePtr, (void*)parentId); assert(status); } } while (!PQUEUE_ISEMPTY(workQueuePtr)) { long parentId = (long)PQUEUE_POP(workQueuePtr); if (parentId == id) { PQUEUE_CLEAR(workQueuePtr); return FALSE; } net_node_t* nodePtr = (net_node_t*)vector_at(nodeVectorPtr, parentId); list_t* grandParentIdListPtr = LocalLoad(&nodePtr->parentIdListPtr); list_iter_t it; TMLIST_ITER_RESET(lock, &it, grandParentIdListPtr); while (TMLIST_ITER_HASNEXT(lock, &it, grandParentIdListPtr)) { long grandParentId = (long)TMLIST_ITER_NEXT(lock, &it, grandParentIdListPtr); if (!PBITMAP_ISSET(ancestorBitmapPtr, grandParentId)) { status = PBITMAP_SET(ancestorBitmapPtr, grandParentId); assert(status); status = PQUEUE_PUSH(workQueuePtr, (void*)grandParentId); assert(status); } } } return TRUE; }
/* ============================================================================= * net_findDescendants * -- Contents of bitmapPtr set to 1 if descendants, else 0 * -- Returns false if id is not root node (i.e., has cycle back id) * ============================================================================= */ bool_t net_findDescendants (net_t* netPtr, long id, bitmap_t* descendantBitmapPtr, queue_t* workQueuePtr) { bool_t status; vector_t* nodeVectorPtr = LocalLoad(&netPtr->nodeVectorPtr); assert(LocalLoad(&descendantBitmapPtr->numBit) == vector_getSize(nodeVectorPtr)); bitmap_clearAll(descendantBitmapPtr); queue_clear(workQueuePtr); { net_node_t* nodePtr = (net_node_t*)vector_at(nodeVectorPtr, id); list_t* childIdListPtr = LocalLoad(&nodePtr->childIdListPtr); list_iter_t it; list_iter_reset(&it, childIdListPtr); while (list_iter_hasNext(&it, childIdListPtr)) { long childId = (long)list_iter_next(&it, childIdListPtr); status = bitmap_set(descendantBitmapPtr, childId); assert(status); status = queue_push(workQueuePtr, (void*)childId); assert(status); } } while (!queue_isEmpty(workQueuePtr)) { long childId = (long)queue_pop(workQueuePtr); if (childId == id) { queue_clear(workQueuePtr); return FALSE; } net_node_t* nodePtr = (net_node_t*)vector_at(nodeVectorPtr, childId); list_t* grandChildIdListPtr = LocalLoad(&nodePtr->childIdListPtr); list_iter_t it; list_iter_reset(&it, grandChildIdListPtr); while (list_iter_hasNext(&it, grandChildIdListPtr)) { long grandChildId = (long)list_iter_next(&it, grandChildIdListPtr); if (!bitmap_isSet(descendantBitmapPtr, grandChildId)) { status = bitmap_set(descendantBitmapPtr, grandChildId); assert(status); status = queue_push(workQueuePtr, (void*)grandChildId); assert(status); } } } return TRUE; }
/* ============================================================================= * net_findAncestors * -- Contents of bitmapPtr set to 1 if ancestor, else 0 * -- Returns false if id is not root node (i.e., has cycle back id) * ============================================================================= */ bool_t net_findAncestors (net_t* netPtr, long id, bitmap_t* ancestorBitmapPtr, queue_t* workQueuePtr) { bool_t status; vector_t* nodeVectorPtr = LocalLoad(&netPtr->nodeVectorPtr); assert(LocalLoad(&ancestorBitmapPtr->numBit) == vector_getSize(nodeVectorPtr)); bitmap_clearAll(ancestorBitmapPtr); queue_clear(workQueuePtr); { net_node_t* nodePtr = (net_node_t*)vector_at(nodeVectorPtr, id); list_t* parentIdListPtr = LocalLoad(&nodePtr->parentIdListPtr); list_iter_t it; list_iter_reset(&it, parentIdListPtr); while (list_iter_hasNext(&it, parentIdListPtr)) { long parentId = (long)list_iter_next(&it, parentIdListPtr); status = bitmap_set(ancestorBitmapPtr, parentId); assert(status); status = queue_push(workQueuePtr, (void*)parentId); assert(status); } } while (!queue_isEmpty(workQueuePtr)) { long parentId = (long)queue_pop(workQueuePtr); if (parentId == id) { queue_clear(workQueuePtr); return FALSE; } net_node_t* nodePtr = (net_node_t*)vector_at(nodeVectorPtr, parentId); list_t* grandParentIdListPtr = LocalLoad(&nodePtr->parentIdListPtr); list_iter_t it; list_iter_reset(&it, grandParentIdListPtr); while (list_iter_hasNext(&it, grandParentIdListPtr)) { long grandParentId = (long)list_iter_next(&it, grandParentIdListPtr); if (!bitmap_isSet(ancestorBitmapPtr, grandParentId)) { status = bitmap_set(ancestorBitmapPtr, grandParentId); assert(status); status = queue_push(workQueuePtr, (void*)grandParentId); assert(status); } } } return TRUE; }
static void TMremoveEdge (al_t* lock, net_t* netPtr, long fromId, long toId) { vector_t* nodeVectorPtr = LocalLoad(&netPtr->nodeVectorPtr); bool_t status; net_node_t* childNodePtr = (net_node_t*)vector_at(nodeVectorPtr, toId); list_t* parentIdListPtr = LocalLoad(&childNodePtr->parentIdListPtr); status = TMLIST_REMOVE(lock, parentIdListPtr, (void*)fromId); assert(status); net_node_t* parentNodePtr = (net_node_t*)vector_at(nodeVectorPtr, fromId); list_t* childIdListPtr = LocalLoad(&parentNodePtr->childIdListPtr); status = TMLIST_REMOVE(lock, childIdListPtr, (void*)toId); assert(status); }
OP_STATUS WebFeedStorage::LoadFeed(WebFeedsAPI_impl::WebFeedStub* stub, WebFeed *&feed, WebFeedsAPI_impl* api_impl) { if (m_current_feed) m_current_feed->DecRef(); OP_ASSERT(!feed); OP_ASSERT(!m_current_feed); m_api_impl = api_impl; m_is_feed_file = TRUE; if (m_current_feed) m_current_feed->IncRef(); m_current_feed = NULL; if (m_own_stub) OP_DELETE(m_current_stub); m_current_stub = stub; m_own_stub = FALSE; OpString feed_file; feed_file.Reserve(14); uni_sprintf(feed_file.CStr(), UNI_L("%08x.feed"), stub->GetId()); OP_STATUS ret_stat = LocalLoad(feed_file.CStr()); feed = m_current_feed; if (m_current_feed) m_current_feed->DecRef(); m_current_feed = NULL; return ret_stat; }
bool_t TMnet_hasEdge (al_t* lock, net_t* netPtr, long fromId, long toId) { vector_t* nodeVectorPtr = LocalLoad(&netPtr->nodeVectorPtr); net_node_t* childNodePtr = (net_node_t*)vector_at(nodeVectorPtr, toId); list_t* parentIdListPtr = LocalLoad(&childNodePtr->parentIdListPtr); list_iter_t it; TMLIST_ITER_RESET(lock, &it, parentIdListPtr); while (TMLIST_ITER_HASNEXT(lock, &it, parentIdListPtr)) { long parentId = (long)TMLIST_ITER_NEXT(lock, &it, parentIdListPtr); if (parentId == fromId) { return TRUE; } } return FALSE; }
bool_t TMnet_isPath (al_t* lock, net_t* netPtr, long fromId, long toId, bitmap_t* visitedBitmapPtr, queue_t* workQueuePtr) { bool_t status; vector_t* nodeVectorPtr = LocalLoad(&netPtr->nodeVectorPtr); assert(LocalLoad(&visitedBitmapPtr->numBit) == vector_getSize(nodeVectorPtr)); PBITMAP_CLEARALL(visitedBitmapPtr); PQUEUE_CLEAR(workQueuePtr); status = PQUEUE_PUSH(workQueuePtr, (void*)fromId); assert(status); while (!PQUEUE_ISEMPTY(workQueuePtr)) { long id = (long)queue_pop(workQueuePtr); if (id == toId) { queue_clear(workQueuePtr); return TRUE; } status = PBITMAP_SET(visitedBitmapPtr, id); assert(status); net_node_t* nodePtr = (net_node_t*)vector_at(nodeVectorPtr, id); list_t* childIdListPtr = LocalLoad(&nodePtr->childIdListPtr); list_iter_t it; TMLIST_ITER_RESET(lock, &it, childIdListPtr); while (TMLIST_ITER_HASNEXT(lock, &it, childIdListPtr)) { long childId = (long)TMLIST_ITER_NEXT(lock, &it, childIdListPtr); if (!PBITMAP_ISSET(visitedBitmapPtr, childId)) { status = PQUEUE_PUSH(workQueuePtr, (void*)childId); assert(status); } } } return FALSE; }
void HandlePtrArithNoSizeofUpate(short dest, short src0, char op, short src1) /* * this function is similar to HandlePtrArith but doesn't multiply lda with * size to fix the offset. */ { short rs0, rs1, flag; if (op != '+' && op != '-') fko_error(__LINE__,"pointers may take only + and - operators"); if (!IS_PTR(STflag[src0-1])) fko_error(__LINE__,"Expecting <ptr> = <ptr> + <int>"); rs0 = LocalLoad(src0); /* load src0 */ flag = STflag[src1-1]; if (!IS_CONST(flag)) rs1 = LocalLoad(src1); /* load src1 */ else fko_error(__LINE__,"expecting var as 2nd src as a special case"); InsNewInst(NULL, NULL, NULL, op == '+' ? ADD : SUB, -rs0, -rs0, -rs1); LocalStore(dest, rs0); GetReg(-1); }
void HandlePtrArith(short dest, short src0, char op, short src1) /* * Ptr arithmetic must be of form <ptr> = <ptr> [+,-] <int/const> */ { short rs0, rs1, flag, type, dflag; #ifdef X86_64 short k; #endif if (op != '+' && op != '-') fko_error(__LINE__,"pointers may take only + and - operators"); if (!IS_PTR(STflag[src0-1])) fko_error(__LINE__,"Expecting <ptr> = <ptr> + <int>"); /* * Majedul: The concept of LIL as three address code violets here. We have * multiple operations in single load-store block. For example: * A1 = A0 + lda is actually treated as * A1 = A0 + (lda * size) * So, we have addition and shift in same load-store block. This would create * redundant computation. We eliminate the redundant computation of (lda*size), * we need to split this expression out and treated this as two HIL instruction * while converting this into LIL, like: * _lda = lda * size * A1 = A0 + _lda * NOTE: All variables starting with '_' are compiler's internal variables, * should not be used as variable name in HIL */ dflag = STflag[dest-1]; type = FLAG2TYPE(dflag); flag = STflag[src1-1]; if (IS_CONST(flag)) { rs0 = LocalLoad(src0); /* load src0 */ if (IS_INT(flag)) #ifdef X86_64 { if (IS_INT(dflag)) rs1 = -STiconstlookup(SToff[src1-1].i*4); else rs1 = -STiconstlookup(SToff[src1-1].i*type2len(type)); } #else rs1 = -STiconstlookup(SToff[src1-1].i*type2len(type)); #endif else fko_error(__LINE__,"Pointers may only be incremented by integers"); }
OP_STATUS WebFeedStorage::LoadStore(WebFeedsAPI_impl* api_impl) { OP_ASSERT(!m_current_feed); if (m_current_feed) m_current_feed->DecRef(); m_current_feed = NULL; if (m_own_stub) OP_DELETE(m_current_stub); m_current_stub = NULL; m_api_impl = api_impl; m_is_feed_file = FALSE; RETURN_IF_ERROR(LocalLoad(FEED_STORE_FILE)); return OpStatus::OK; }
void DoArrayStore(short ptr, short id) { short lreg, k, type; k = ptr-1; type = FLAG2TYPE(STflag[id-1]); #if IFKO_DEBUG_LEVEL > 1 fprintf(stderr, "pnam=%s, pflag=%d, idname='%s', idflag=%d\n", STname[SToff[k].sa[0]-1], STflag[SToff[k].sa[0]-1], STname[id-1], STflag[id-1]); #endif lreg = LocalLoad(id); /* * NOTE: we added vector store where the array type and variable type * may not exactly match. we may have DOUBLE pointer but the variable is * T_VDOUBLE. */ #if 0 assert(FLAG2TYPE(STflag[SToff[k].sa[0]-1]) == type); #else if (IS_VEC(STflag[id-1])) { if ( (IS_VDOUBLE(STflag[id-1]) && !IS_DOUBLE(STflag[SToff[ptr-1].sa[0]-1])) || (IS_VFLOAT(STflag[id-1]) && !IS_FLOAT(STflag[SToff[ptr-1].sa[0]-1])) ) fko_error(__LINE__, "type mismatch for vector store!"); } else { if (FLAG2TYPE(STflag[SToff[k].sa[0]-1]) != type) fko_error(__LINE__, "type mismatch for store!"); } #endif FixDeref(ptr); switch(type) { case T_INT: #ifdef X86_64 assert(lreg < 8); InsNewInst(NULL, NULL, NULL, STS, ptr, -lreg, 0); #else InsNewInst(NULL, NULL, NULL, ST, ptr, -lreg, 0); #endif break; case T_FLOAT: InsNewInst(NULL, NULL, NULL, FST, ptr, -lreg, 0); break; case T_DOUBLE: InsNewInst(NULL, NULL, NULL, FSTD, ptr, -lreg, 0); break; #if 1 case T_VFLOAT: InsNewInst(NULL, NULL, NULL, VFST, ptr, -lreg, 0); break; case T_VDOUBLE: InsNewInst(NULL, NULL, NULL, VDST, ptr, -lreg, 0); break; #endif default: fko_error(__LINE__, "Unknown type %d\n", type); } GetReg(-1); }
static void FixDeref(short ptr) /* * This routine takes a deref entry of type: * <STloc> <STloc> <mul> <STconst> * And translates into a fully-specified legal index for the machine: * <reg0> <reg1> <mul> <STconst> * where the address is then provided by <reg0> + <reg1>*mul + <STconst>. * On some machines, mul may need to be 1, resulting in an extra shift * instruction, and on others you may be able to use one of <reg1> or <STconst> * at a time, resulting in an additional add. */ { short type; short sta; ptr--; type = FLAG2TYPE(STflag[SToff[ptr].sa[0]-1]); /* fprintf(stderr, "FixDeref: [%d, %d, %d, %d]\n", SToff[ptr].sa[0], * SToff[ptr].sa[1], SToff[ptr].sa[2], SToff[ptr].sa[3]); */ /* * FIXED: incase of optimized 2D ptr, we want to keep the DT intact. * it is applied only on x86. so, it's safe to keep that unchanged * NOTE: mulitplying offset with datatype is done while creating the DT entry. */ sta = STarrColPtrlookup(SToff[ptr].sa[0]); if ( (FKO_FLAG & IFF_OPT2DPTR) && sta && STarr[sta-1].ndim > 1) { /* * Load beginning of array */ SToff[ptr].sa[0] = -LocalLoad(SToff[ptr].sa[0]); /* * Load index register if needed * we kept mul and con intact */ if (SToff[ptr].sa[1]) SToff[ptr].sa[1] = -LocalLoad(SToff[ptr].sa[1]); } else /* kept the old code and logic unchanged for all other case*/ { /* * Load beginning of array */ SToff[ptr].sa[0] = -LocalLoad(SToff[ptr].sa[0]); /* * Multiply constant by mul * FIXED: for const index, we already multiply datasize at the beginning * ref AddArrayDeref() */ /*if (SToff[ptr].sa[2]) SToff[ptr].sa[3] *= SToff[ptr].sa[2];*/ /* * Load index register if needed */ if (SToff[ptr].sa[1]) { SToff[ptr].sa[1] = -LocalLoad(SToff[ptr].sa[1]); /* * Some architectures cannot multiply the index register by some (or any) * constants, and in this case generate an extra shift instruction */ if (!ArchHasLoadMul(SToff[ptr].sa[2])) { InsNewInst(NULL, NULL, NULL, SHL, SToff[ptr].sa[1], SToff[ptr].sa[1], STiconstlookup(type2shift(type))); SToff[ptr].sa[2] = 1; } /* * On machines with fixed-size instructions, you usually need to choose * _either_ an index register, _or_ a constant addition. If we have both * on such a machine, add the constant to the index register */ #ifndef ArchConstAndIndex if (SToff[ptr].sa[3]) { InsNewInst(NULL, NULL, NULL, ADD, SToff[ptr].sa[1], SToff[ptr].sa[1], STiconstlookup(GetDTcon(SToff[ptr].sa[3]))); SToff[ptr].sa[3] = 0; } #endif } } }
void DoMove(short dest, short src) { short rsrc; int sflag, type; enum inst mov; sflag = STflag[src-1]; if (IS_CONST(sflag)) { type = FLAG2PTYPE(sflag); rsrc = GetReg(type); /* * FIXME: need to add const_init for each floating point const, by this way * we can have a placeholder for that const in stack. We should/will create * the placeholder whenever user uses a floating point const, but until it's * upto user to manage it correctly. * Don't need any place-holder for the value 0.0. we can use FZERO LIL * inst for that. It won't maintain the load->Arith->store structure of LIL. * Need to check whether it creates any problem in later transformation!!! */ #if 0 if (IS_CONST(sflag) && (type == T_INT) && SToff[src-1].i == 0) InsNewInst(NULL, NULL, NULL, XOR, -rsrc, -rsrc, -rsrc); else { if (type == T_INT) mov = MOV; else if (type == T_FLOAT) mov = FMOV; else { assert(type == T_DOUBLE); mov = FMOVD; } InsNewInst(NULL, NULL, NULL, mov, -rsrc, src, 0); } LocalStore(dest, rsrc); #else if (FLAG2TYPE(STflag[dest-1]) != FLAG2TYPE(sflag)) fko_error(__LINE__, "Conversions of constant not yet supported"); if (type == T_INT) { if (SToff[src-1].i == 0) InsNewInst(NULL, NULL, NULL, XOR, -rsrc, -rsrc, -rsrc); else { mov = MOV; InsNewInst(NULL, NULL, NULL, mov, -rsrc, src, 0); } } else if (type == T_FLOAT) { if (SToff[src-1].f == 0.0) InsNewInst(NULL, NULL, NULL, FZERO, -rsrc, 0, 0); else fko_error(__LINE__, "Floating point const other than zero must be defined before"); } else if (type == T_DOUBLE) { if (SToff[src-1].d == 0.0) InsNewInst(NULL, NULL, NULL, FZEROD, -rsrc, 0, 0); else fko_error(__LINE__, "Floating point const other than zero must be defined before"); } else fko_error(__LINE__, "unsupported constant!"); LocalStore(dest, rsrc); #endif } else if (FLAG2TYPE(STflag[dest-1]) == FLAG2TYPE(sflag)) { rsrc = LocalLoad(src); LocalStore(dest, rsrc); } else DoConvert(dest, src); GetReg(-1); }