void vfsHDDManager::CreateHDD(const std::string& path, u64 size, u64 block_size) { fs::file f(path, fom::rewrite); static const u64 cur_dir_block = 1; vfsHDD_Hdr hdr; CreateBlock(hdr); hdr.next_block = cur_dir_block; hdr.magic = g_hdd_magic; hdr.version = g_hdd_version; hdr.block_count = (size + block_size) / block_size; hdr.block_size = block_size; f.write(&hdr, sizeof(vfsHDD_Hdr)); { vfsHDD_Entry entry; CreateEntry(entry); entry.type = vfsHDD_Entry_Dir; entry.data_block = hdr.next_block; entry.next_block = 0; f.seek(cur_dir_block * hdr.block_size); f.write(&entry, sizeof(vfsHDD_Entry)); f.write(".", 1); } u8 null = 0; CHECK_ASSERTION(f.seek(hdr.block_count * hdr.block_size - sizeof(null)) != -1); f.write(&null, sizeof(null)); }
//--------------------------------------------------------------------------- // 辞書への単語の追加 // 既に単語が存在する場合でも、さらに追加する (現在の仕様) // 既に単語が存在する場合には、出現確率を変更する (将来の仕様) // 戻り値 : 追加した単語のID TWordID TNS_KawariDictionary::Insert(TEntryID entry,TKawariCode_base* word) { if(!word) return(0); // エントリ内の単語はコンテキストを持つ word=word->CreateContext(); TWordID id=0; if(!WordCollection.Insert(word,&id)) { // 既に登録済みの単語だった delete word; word=GetWordFromID(id); } TKawariCodeEntryCall *macro=dynamic_cast<TKawariCodeEntryCall *>(word); if(macro) { // 追加するのは純粋仮想単語 TEntryID child=CreateEntry(macro->Get()); PVWToEntry[id]=child; EntryToPVW[child]=id; } Dictionary[entry].push_back(id); ReverseDictionary[id].insert(entry); return(id); }
void GiSTnode::Unpack(const char *page) { const GiSTheader *h = (const GiSTheader *) page; Reset(); SetLevel(h->level); SetSibling(h->sibling); if (!packedNode) packedNode = new char[tree->Store()->PageSize()]; memcpy(packedNode, page, tree->Store()->PageSize()); Expand(h->numEntries); SetNumEntries(h->numEntries); for (int i=0; i<h->numEntries; i++) { GiSTcompressedEntry tmpentry = Entry(i); GiSTentry *e = CreateEntry(); e->SetLevel(Level()); e->SetPosition(i); e->SetNode(this); e->Decompress(tmpentry); // be tidy if (tmpentry.key) delete tmpentry.key; // Append the body with the entry entries[i] = e; } }
struct entry *ReadEntryData(struct entry *Top) { //この関数は戻り値がstruct型の、引数はentryのポインタを渡す関数 struct entry *ep; char *cp, *tp; char buf[512]; char name[32]; char sex; int age; //while(継続条件) {} ここではfgets関数が読み込んだ値の中身がNULLじゃなかったら、という条件の時こうする //fgets関数は第一引数は文字配列のポインタ第二引数は一行の最大文字数 //第三引数はファイルポインタ標準入力を指定する場合はファイルポインタに「stdin」と書く。すると入力した値がbufに入る while (fgets(buf, sizeof(buf), stdin) != NULL) { //while文で読み込んでいる fgtes関数を使って、一行も文字列を読み込んでいる /* * Node setup */ if (Top == NULL) { Top = CreateEntry();//epを返り血にした後で、それをTopにいれて、またepに戻す ep = Top;//TOPがなければ、C一つのノードをつくって、それをTOPにする } else {//一つでもリスト構造が有るなら、一番ケツに新しいエントリーをつける? //epという変数に、一番最後が入ってる? for (ep = Top; ep->next != NULL; ep = ep->next); ep->next = CreateEntry();//CreateEntry関数は一番最後にNULLをいれて返す ep = ep->next; } //ここから下の処理がイマイチわからない cp = buf;//入力させた値をポインタcpに入れる tp = cp;//その後にポインタtpに入れる if((cp = index(tp, ' ')) == NULL) { fprintf(stderr, "Input format fail: %s\n", tp); exit(-1); } *cp++ = '\0';//cpを++することで、\0をいれて文字列の終了をした後に、次のところに行っている strcpy(ep->name, tp); ep->sex = *cp++; cp++; ep->age = atoi(cp);//ageがでてくるのはここだけ!atoi関数は文字列で表現された数値をint型の数値に変換する。変換不能なアルファベットなどの文字列の場合は0を返すが、数値が先頭にあればその値を返す。 } return (Top); }
void initialise_hashmaps() { for(int i=0;i<ENTRY_LEN;i++) { _entries[i] = CreateEntry(NULL); _lasts[i] = _entries[i]; } }
JSON *entry_type_parse(){ next_non_space_char(); // get char '"' JSON *key_obj = string_type_parse(); next_non_space_char(); assert(str_value() == ':'); next_char(); JSON *value_obj = non_type_parse(); return CreateEntry(key_obj->valuestring, value_obj); }
/*---------------------------------------------------------------------- | TestRemoveDirectory +---------------------------------------------------------------------*/ void TestRemoveDirectory() { NPT_DirectoryEntryInfo info; NPT_String path; NPT_Result res; // read path from environment variable res = NPT_GetEnvironment("NEPTUNE_TEST_ROOT", path); CHECK(NPT_SUCCEEDED(res)); // find the next directory path that does not exist res = FindNextAvailableDirectory(path, path); CHECK(NPT_SUCCEEDED(res)); // create directory object res = NPT_DirectoryCreate(path, true); CHECK(NPT_SUCCEEDED(res)); // create some subdirectories res = CreateEntry(path, "foof1", true); CHECK(NPT_SUCCEEDED(res)); res = CreateEntry(path, "foof2", true); CHECK(NPT_SUCCEEDED(res)); res = CreateEntry(path, "foof2.dir", true); CHECK(NPT_SUCCEEDED(res)); // create some files res = CreateEntry(path, "foo1", false); CHECK(NPT_SUCCEEDED(res)); res = CreateEntry(path, "foo2.", false); CHECK(NPT_SUCCEEDED(res)); res = CreateEntry(path, "foo2.e", false); CHECK(NPT_SUCCEEDED(res)); res = CreateEntry(path, "foo2.e3", false); CHECK(NPT_SUCCEEDED(res)); res = CreateEntry(path, "foo2.gas", false); CHECK(NPT_SUCCEEDED(res)); res = CreateEntry(path, "foo4.tee", false); CHECK(NPT_SUCCEEDED(res)); // remove directory res = NPT_DirectoryRemove(path, true); CHECK(NPT_SUCCEEDED(res)); // verify directory does not exist any more res = NPT_DirectoryEntry::GetInfo(path, &info); CHECK(res == NPT_ERROR_NO_SUCH_ITEM); }
bool DOS_Drive_Cache::ReadDir(Bit16u id, char* &result) { // shouldnt happen... if (id>MAX_OPENDIRS) return false; if (!IsCachedIn(dirSearch[id])) { // Try to open directory dir_information* dirp = open_directory(dirPath); if (!dirp) { if (dirSearch[id]) { dirSearch[id]->id = MAX_OPENDIRS; dirSearch[id] = 0; } return false; } // Read complete directory char dir_name[CROSS_LEN]; bool is_directory; if (read_directory_first(dirp, dir_name, is_directory)) { CreateEntry(dirSearch[id], dir_name, is_directory); while (read_directory_next(dirp, dir_name, is_directory)) { CreateEntry(dirSearch[id], dir_name, is_directory); } } // close dir close_directory(dirp); // Info /* if (!dirp) { LOG_DEBUG("DIR: Error Caching in %s",dirPath); return false; } else { char buffer[128]; sprintf(buffer,"DIR: Caching in %s (%d Files)",dirPath,dirSearch[srchNr]->fileList.size()); LOG_DEBUG(buffer); };*/ }; if (SetResult(dirSearch[id], result, dirSearch[id]->nextEntry)) return true; if (dirSearch[id]) { dirSearch[id]->id = MAX_OPENDIRS; dirSearch[id] = 0; } return false; }
JSON *Duplicate(JSON *obj, int recurse){ switch (obj->type){ case JSON_NULL: return CreateNULL(); case JSON_TRUE: return CreateTrue(); case JSON_FALSE: return CreateFalse(); case JSON_NUMBER: return CreateNumber(obj->valuedouble); case JSON_STRING: return CreateString(obj->valuestring); case JSON_ARRAY:{ JSON *new_obj = CreateArray(); if (recurse == 0) new_obj->son = obj->son; else{ if (obj->son != NULL){ new_obj->son = Duplicate(obj->son, 1); JSON *old_item = obj->son, *new_item = new_obj->son; for ( ; old_item->next != NULL; old_item = old_item->next, new_item = new_item->next){ JSON *temp = Duplicate(old_item->next,1); new_item->next = temp; temp->last = new_item; } } } return new_obj; } case JSON_OBJECT:{ JSON *new_obj = CreateObject(); if (recurse == 0) new_obj->son = obj->son; else{ if (obj->son != NULL){ new_obj->son = Duplicate(obj->son, 1); JSON *old_item = obj->son, *new_item = new_obj->son; for ( ; old_item->next != NULL; old_item = old_item->next, new_item = new_item->next){ JSON *temp = Duplicate(old_item->next,1); new_item->next = temp; temp->last = new_item; } } } return new_obj; } case JSON_ENTRY:{ JSON *new_obj = CreateEntry(obj->key, Duplicate(obj->value,1)); return new_obj; } default: return NULL; } }
struct entry *ReadEntryData(struct entry *Top) { struct entry *ep; char *cp, *tp; char buf[512]; char name[32]; char sex; int age; while (fgets(buf, sizeof(buf), stdin) != NULL) { /* * Node setup */ if (Top == NULL) { Top = CreateEntry(); ep = Top; } else { ep->next = CreateEntry(); ep = ep->next; } cp = buf; tp = cp; if((cp = index(tp, ' ')) == NULL) { fprintf(stderr, "Input format fail: %s\n", tp); exit(-1); } *cp++ = '\0'; strcpy(ep->name, tp); ep->sex = *cp++; cp++; ep->age = atoi(cp); } return (Top); }
void add_cell(Node *cell) { #ifdef PRINT_CALL_MAP std::cout<<"P"; #endif int key = calculate_hash((size_t)(cell->offset + _buffers->data)); Entry *en = _entries[key]; while(en->next) en = en->next; _lasts[key]->next = CreateEntry(cell); // en->next = CreateEntry(cell); }
BOOL CAutoReg::AddEntry( HKEY hKey, LPCTSTR pKey, DWORD dwType, LPCTSTR pName, void* pvoidData, DWORD dwLength, DWORD dwDef, void* pvoidDef, DWORD dwDefLength, DWORD dwUser, LPCTSTR pComputer ) {_STTEX(); CAutoRegEntry reg; // Fill in the data CreateEntry( ®, hKey, pKey, dwType, pName, pvoidData, dwLength, dwDef, pvoidDef, dwDefLength, dwUser, pComputer ); // Add it to the list m_Entrys += reg; return TRUE; }
bool D3DVertexBufferCache::InitBuffer(D3DDevice *d3d, BYTE *data, int size, CacheEntryInfo &info) { assert(d3d != NULL); assert(data != NULL); assert(size > 0); int best = FindBestEntry(size); CacheEntry *ce = NULL; // Reallocate if (best >= 0 && _cache[best].size < size) { DeleteEntry(best); best = -1; } // New if (best < 0) { CacheEntry new_entry; if (!CreateEntry(d3d, new_entry, size)) { WRN("Failed to create new vbcache entry"); return false; } _cache.Add(new_entry); info.id = _cache.Length() - 1; ce = _cache.Last(); } else { info.id = best; ce = &_cache[best]; } assert(ce != NULL); if (!InsertData(*ce, data, size)) { WRN("Failed to insert vbcache data"); return false; } return true; }
//--------------------------------------------------------------------------- TWordID TNS_KawariDictionary::Insert(TEntryID entry,TWordID id) { if(id==0) return(0); TKawariCode_base* word=GetWordFromID(id); if(word==NULL) return(0); TKawariCodeEntryCall *macro=dynamic_cast<TKawariCodeEntryCall *>(word); if(macro) { // 追加するのは純粋仮想単語 TEntryID child=CreateEntry(macro->Get()); PVWToEntry[id]=child; EntryToPVW[child]=id; } Dictionary[entry].push_back(id); ReverseDictionary[id].insert(entry); return(id); }
NS_IMETHODIMP nsParserService::RegisterObserver(nsIElementObserver* aObserver, const nsAString& aTopic, const eHTMLTags* aTags) { nsresult result = NS_OK; nsObserverEntry* entry = GetEntry(aTopic); if(!entry) { result = CreateEntry(aTopic,&entry); NS_ENSURE_SUCCESS(result,result); } while (*aTags) { if (*aTags <= NS_HTML_TAG_MAX) { entry->AddObserver(aObserver,*aTags); } ++aTags; } return result; }
int AcuArchive::LoadContents(void) { NuError nerr; int numEntries; ASSERT(fFp != NULL); rewind(fFp); /* * Read the master header. In an ACU file this holds the number of * files and a bunch of stuff that doesn't seem to change. */ if (ReadMasterHeader(&numEntries) != 0) return -1; while (numEntries) { AcuFileEntry fileEntry; nerr = ReadFileHeader(&fileEntry); if (nerr != kNuErrNone) return 1; if (CreateEntry(&fileEntry) != 0) return 1; /* if file isn't empty, seek past it */ if (fileEntry.dataStorageLen) { nerr = AcuSeek(fileEntry.dataStorageLen); if (nerr != kNuErrNone) return 1; } numEntries--; } return 0; }
/** Creates a new entry as a child of the current context as part of a bulk creation operation. The entry will not be committed to file immediately. The parent ID and entry ID are set by the Message Server. @param aEntry Index entry value for the new entry @param aOwnerId The SID of the process that will own the create entry. @return KErrNone - success; KErrNoMemory - a memory allocation failed; KErrNotSupported - aEntry is invalid */ EXPORT_C TInt CMsvServerEntry::CreateEntryBulk(TMsvEntry& aEntry, TSecureId aOwnerId) { return CreateEntry(aEntry, aOwnerId, ETrue); }
char* DOS_Drive_Cache::GetExpandName(const char* path) { static char work [CROSS_LEN] = { 0 }; char dir [CROSS_LEN]; work[0] = 0; strcpy (dir,path); const char* pos = strrchr(path,CROSS_FILESPLIT); if (pos) dir[pos-path+1] = 0; CFileInfo* dirInfo = FindDirInfo(dir, work); if (pos) { // Last Entry = File strcpy(dir,pos+1); GetLongName(dirInfo, dir); strcat(work,dir); } if (*work) { size_t len = strlen(work); #if defined (WIN32) if((work[len-1] == CROSS_FILESPLIT ) && (len >= 2) && (work[len-2] != ':')) { #else if((len > 1) && (work[len-1] == CROSS_FILESPLIT )) { #endif work[len-1] = 0; // Remove trailing slashes except when in root } } return work; } void DOS_Drive_Cache::AddEntry(const char* path, bool checkExists) { // Get Last part... char file [CROSS_LEN]; char expand [CROSS_LEN]; CFileInfo* dir = FindDirInfo(path,expand); const char* pos = strrchr(path,CROSS_FILESPLIT); if (pos) { strcpy(file,pos+1); // Check if file already exists, then don't add new entry... if (checkExists) { if (GetLongName(dir,file)>=0) return; } CreateEntry(dir,file,false); Bits index = GetLongName(dir,file); if (index>=0) { Bit32u i; // Check if there are any open search dir that are affected by this... if (dir) for (i=0; i<MAX_OPENDIRS; i++) { if ((dirSearch[i]==dir) && ((Bit32u)index<=dirSearch[i]->nextEntry)) dirSearch[i]->nextEntry++; } } // LOG_DEBUG("DIR: Added Entry %s",path); } else { // LOG_DEBUG("DIR: Error: Failed to add %s",path); } }
static int CommandProc(struct cmd_syndesc *a_as, void *arock) { int i; long code = 0; long upos; long gpos = 0; struct prentry uentry, gentry; struct ubik_hdr *uh; char *dfile = 0; const char *pbase = AFSDIR_SERVER_PRDB_FILEPATH; char *pfile = NULL; char pbuffer[1028]; struct cmd_parmdesc *tparm; tparm = a_as->parms; if (tparm[0].items) { wflag++; /* so we are treated as admin and can create "mis"owned groups */ pr_noAuth = 1; } if (tparm[1].items) { flags |= DO_USR; } if (tparm[2].items) { flags |= DO_GRP; } if (tparm[3].items) { flags |= (DO_GRP | DO_MEM); } if (tparm[4].items) { nflag++; } if (tparm[5].items) { flags |= DO_SYS; } if (tparm[6].items) { flags |= DO_OTR; } if (tparm[7].items) { pfile = tparm[7].items->data; } if (tparm[8].items) { dfile = tparm[8].items->data; } if (pfile == NULL) { snprintf(pbuffer, sizeof(pbuffer), "%s.DB0", pbase); pfile = pbuffer; } if ((dbase_fd = open(pfile, (wflag ? O_RDWR : O_RDONLY) | O_CREAT, 0600)) < 0) { fprintf(stderr, "pt_util: cannot open %s: %s\n", pfile, strerror(errno)); exit(1); } if (read(dbase_fd, buffer, HDRSIZE) < 0) { fprintf(stderr, "pt_util: error reading %s: %s\n", pfile, strerror(errno)); exit(1); } if (dfile) { if ((dfp = fopen(dfile, wflag ? "r" : "w")) == 0) { fprintf(stderr, "pt_util: error opening %s: %s\n", dfile, strerror(errno)); exit(1); } } else dfp = (wflag ? stdin : stdout); uh = (struct ubik_hdr *)buffer; if (ntohl(uh->magic) != UBIK_MAGIC) fprintf(stderr, "pt_util: %s: Bad UBIK_MAGIC. Is %x should be %x\n", pfile, ntohl(uh->magic), UBIK_MAGIC); memcpy(&uv, &uh->version, sizeof(struct ubik_version)); if (wflag && ntohl(uv.epoch) == 0 && ntohl(uv.counter) == 0) { uv.epoch = htonl(2); /* a ubik version of 0 or 1 has special meaning */ memcpy(&uh->version, &uv, sizeof(struct ubik_version)); lseek(dbase_fd, 0, SEEK_SET); if (write(dbase_fd, buffer, HDRSIZE) < 0) { fprintf(stderr, "pt_util: error writing ubik version to %s: %s\n", pfile, strerror(errno)); exit(1); } } /* Now that any writeback is done, swap these */ uv.epoch = ntohl(uv.epoch); uv.counter = ntohl(uv.counter); fprintf(stderr, "Ubik Version is: %d.%d\n", uv.epoch, uv.counter); if (read(dbase_fd, &prh, sizeof(struct prheader)) < 0) { fprintf(stderr, "pt_util: error reading %s: %s\n", pfile, strerror(errno)); exit(1); } Initdb(); initialize_PT_error_table(); if (wflag) { struct usr_list *u; int seenGroup = 0, id = 0, flags = 0; while (fgets(buffer, sizeof(buffer), dfp)) { int oid, cid, quota, uid; char name[PR_MAXNAMELEN], mem[PR_MAXNAMELEN]; if (isspace(*buffer)) { code = sscanf(buffer, "%s %d", mem, &uid); if (code != 2) { fprintf(stderr, "Insuffient data provided for group membership\n"); exit(1); } if (!seenGroup) { fprintf(stderr, "Group member %s listed outside of group\n", mem); exit(1); } for (u = usr_head; u; u = u->next) if (u->uid && u->uid == uid) break; if (u) { /* Add user - deferred because it is probably foreign */ u->uid = 0; if (FindByID(0, uid)) code = PRIDEXIST; else { if (!code && (flags & (PRGRP | PRQUOTA)) == (PRGRP | PRQUOTA)) { gentry.ngroups++; code = pr_WriteEntry(0, 0, gpos, &gentry); if (code) fprintf(stderr, "Error setting group count on %s: %s\n", name, afs_error_message(code)); } code = CreateEntry(0, u->name, &uid, 1 /*idflag */ , 1 /*gflag */ , SYSADMINID /*oid */ , SYSADMINID /*cid */ ); } if (code) fprintf(stderr, "Error while creating %s: %s\n", u->name, afs_error_message(code)); continue; } /* Add user to group */ if (id == ANYUSERID || id == AUTHUSERID || uid == ANONYMOUSID) { code = PRPERM; } else if ((upos = FindByID(0, uid)) && (gpos = FindByID(0, id))) { code = pr_ReadEntry(0, 0, upos, &uentry); if (!code) code = pr_ReadEntry(0, 0, gpos, &gentry); if (!code) code = AddToEntry(0, &gentry, gpos, uid); if (!code) code = AddToEntry(0, &uentry, upos, id); } else code = PRNOENT; if (code) fprintf(stderr, "Error while adding %s to %s: %s\n", mem, name, afs_error_message(code)); } else { code = sscanf(buffer, "%s %d/%d %d %d %d", name, &flags, "a, &id, &oid, &cid); if (code != 6) { fprintf(stderr, "Insufficient data provided for user/group\n"); exit(1); } seenGroup = 1; if (FindByID(0, id)) code = PRIDEXIST; else code = CreateEntry(0, name, &id, 1 /*idflag */ , flags & PRGRP, oid, cid); if (code == PRBADNAM) { u = malloc(sizeof(struct usr_list)); u->next = usr_head; u->uid = id; strcpy(u->name, name); usr_head = u; } else if (code) { fprintf(stderr, "Error while creating %s: %s\n", name, afs_error_message(code)); } else if ((flags & PRACCESS) || (flags & (PRGRP | PRQUOTA)) == (PRGRP | PRQUOTA)) { gpos = FindByID(0, id); code = pr_ReadEntry(0, 0, gpos, &gentry); if (!code) { gentry.flags = flags; gentry.ngroups = quota; code = pr_WriteEntry(0, 0, gpos, &gentry); } if (code) fprintf(stderr, "Error while setting flags on %s: %s\n", name, afs_error_message(code)); } } } for (u = usr_head; u; u = u->next) if (u->uid) fprintf(stderr, "Error while creating %s: %s\n", u->name, afs_error_message(PRBADNAM)); } else { for (i = 0; i < HASHSIZE; i++) { upos = nflag ? ntohl(prh.nameHash[i]) : ntohl(prh.idHash[i]); while (upos) { long newpos; newpos = display_entry(upos); if (newpos == upos) { fprintf(stderr, "pt_util: hash error in %s chain %d\n", nflag ? "name":"id", i); exit(1); } else upos = newpos; } } if (flags & DO_GRP) display_groups(); } lseek(dbase_fd, 0, L_SET); /* rewind to beginning of file */ if (read(dbase_fd, buffer, HDRSIZE) < 0) { fprintf(stderr, "pt_util: error reading %s: %s\n", pfile, strerror(errno)); exit(1); } uh = (struct ubik_hdr *)buffer; uh->version.epoch = ntohl(uh->version.epoch); uh->version.counter = ntohl(uh->version.counter); if ((uh->version.epoch != uv.epoch) || (uh->version.counter != uv.counter)) { fprintf(stderr, "pt_util: Ubik Version number changed during execution.\n"); fprintf(stderr, "Old Version = %d.%d, new version = %d.%d\n", uv.epoch, uv.counter, uh->version.epoch, uh->version.counter); } close(dbase_fd); exit(0); }
LOCAL_C void TestMultipleNotifsL() { CMultipleSessionObserver* ob = new(ELeave) CMultipleSessionObserver; CleanupStack::PushL(ob); CMsvSession* session = CMsvSession::OpenSyncL(*ob); CleanupStack::PushL(session); CMsvEntry* cEntry = CMsvEntry::NewL(*session, KMsvGlobalInBoxIndexEntryId, TMsvSelectionOrdering()); CleanupStack::PushL(cEntry); CEntryObserver* entryOb1=new (ELeave) CEntryObserver; CleanupStack::PushL(entryOb1); cEntry->AddObserverL(*entryOb1); CTestActive* active = new (ELeave) CTestActive; CleanupStack::PushL(active); CMsvOperation* operation; CMsvEntrySelection* selection; // create folders to work under TMsvEntry folder1; folder1.iDescription.Set(KShortDescription); folder1.iDetails.Set(KShortDetails); folder1.iType = KUidMsvFolderEntry; folder1.iMtm = KUidMsvLocalServiceMtm; folder1.iServiceId = KMsvLocalServiceIndexEntryId; TMsvEntry folder2; folder2.iDescription.Set(KShortDescription); folder2.iDetails.Set(KShortDetails); folder2.iType = KUidMsvFolderEntry; folder2.iMtm = KUidMsvLocalServiceMtm; folder2.iServiceId = KMsvLocalServiceIndexEntryId; TMsvEntry folder3; folder3.iDescription.Set(KShortDescription); folder3.iDetails.Set(KShortDetails); folder3.iType = KUidMsvFolderEntry; folder3.iMtm = KUidMsvLocalServiceMtm; folder3.iServiceId = KMsvLocalServiceIndexEntryId; // create all th entries TMsvEntry entry; entry.iDescription.Set(KShortDescription); entry.iDetails.Set(KShortDetails); entry.iType = KUidMsvFolderEntry; entry.iMtm = KUidMsvLocalServiceMtm; entry.iServiceId = KMsvLocalServiceIndexEntryId; // create folder 2 & 3 cEntry->SetEntryL(KMsvGlobalInBoxIndexEntryId); CreateEntry(folder2, *cEntry, KErrNone, *entryOb1); CreateEntry(folder3, *cEntry, KErrNone, *entryOb1); TMsvLocalOperationProgress prog; TInt max = 2*TMsvPackedChangeNotification::KMsvPackedChangeLimit+4; for (TInt count=1; count<=max; count++) { test.Printf(_L(".")); // create another entry under folder 2 cEntry->SetEntryL(folder2.Id()); CreateEntry(entry, *cEntry, KErrNone, *entryOb1); // create folder1 cEntry->SetEntryL(KMsvGlobalInBoxIndexEntryId); CreateEntry(folder1, *cEntry, KErrNone, *entryOb1); // test cEntry->SetEntryL(folder3.Id()); selection = cEntry->ChildrenL(); test(selection->Count()==0); delete selection; // copy all entries to folder 3 ob->Start(MMsvSessionObserver::EMsvEntriesCreated, folder3.Id()); cEntry->SetEntryL(folder2.Id()); selection = cEntry->ChildrenL(); test(selection->Count()==count); active->StartL(); operation = cEntry->CopyL(*selection, folder3.Id(), active->iStatus); delete selection; CActiveScheduler::Start(); // operation complete test(operation->iStatus.Int()==KErrNone); test(operation->Mtm()==KUidMsvLocalServiceMtm); test(operation->Service()==KMsvLocalServiceIndexEntryId); prog = McliUtils::GetLocalProgressL(*operation); test(prog.iTotalNumberOfEntries==count); test(prog.iNumberCompleted==count); test(prog.iNumberFailed==0); test(prog.iError==KErrNone); delete operation; operation=NULL; // test cEntry->SetEntryL(folder3.Id()); selection = cEntry->ChildrenL(); test(selection->Count()==count); delete selection; if (ob->iEvents>1) test(ob->iSelection.Count()>TMsvPackedChangeNotification::KMsvPackedChangeLimit); else if (count>TMsvPackedChangeNotification::KMsvPackedChangeLimit) { ob->Wait(); CActiveScheduler::Start(); } // test selection = cEntry->ChildrenL(); test(selection->Count()==count); delete selection; if (ob->iEvents>2) test(ob->iSelection.Count()>2*TMsvPackedChangeNotification::KMsvPackedChangeLimit); else if (count>2*TMsvPackedChangeNotification::KMsvPackedChangeLimit) { ob->Wait(); CActiveScheduler::Start(); } test(ob->iSelection.Count()==count); ob->Finish(); // move all entries to folder 1 selection = cEntry->ChildrenL(); test(selection->Count()==count); ob->Start(MMsvSessionObserver::EMsvEntriesMoved, folder1.Id(), folder3.Id()); active->StartL(); operation = cEntry->MoveL(*selection, folder1.Id(), active->iStatus); delete selection; CActiveScheduler::Start(); // operation complete test(operation->iStatus.Int()==KErrNone); test(operation->Mtm()==KUidMsvLocalServiceMtm); test(operation->Service()==KMsvLocalServiceIndexEntryId); prog = McliUtils::GetLocalProgressL(*operation); test(prog.iTotalNumberOfEntries==count); test(prog.iNumberCompleted==count); test(prog.iNumberFailed==0); test(prog.iError==KErrNone); delete operation; operation=NULL; cEntry->SetEntryL(folder1.Id()); selection = cEntry->ChildrenL(); test(selection->Count()==count); delete selection; if (ob->iEvents>1) test(ob->iSelection.Count()>TMsvPackedChangeNotification::KMsvPackedChangeLimit); else if (count>TMsvPackedChangeNotification::KMsvPackedChangeLimit) { test(ob->iEvents==1); ob->Wait(); CActiveScheduler::Start(); test(ob->iEvents==2); } if (ob->iEvents>2) test(ob->iSelection.Count()>2*TMsvPackedChangeNotification::KMsvPackedChangeLimit); else if (count>2*TMsvPackedChangeNotification::KMsvPackedChangeLimit) { test(ob->iEvents==2); ob->Wait(); CActiveScheduler::Start(); test(ob->iEvents==3); } test(ob->iSelection.Count()==count); ob->Finish(); // delete them ob->Start(MMsvSessionObserver::EMsvEntriesDeleted, KMsvGlobalInBoxIndexEntryId); cEntry->SetEntryL(KMsvGlobalInBoxIndexEntryId); active->StartL(); operation = cEntry->DeleteL(folder1.Id(), active->iStatus); CActiveScheduler::Start(); // operation complete test(operation->iStatus.Int()==KErrNone); test(operation->Mtm()==KUidMsvLocalServiceMtm); test(operation->Service()==KMsvLocalServiceIndexEntryId); prog = McliUtils::GetLocalProgressL(*operation); test(prog.iTotalNumberOfEntries==1); test(prog.iNumberFailed==0); test(prog.iError==KErrNone); delete operation; operation=NULL; if (count+1>TMsvPackedChangeNotification::KMsvPackedChangeLimit && ob->iEvents==1) { ob->Wait(); CActiveScheduler::Start(); } if (count+1>2*TMsvPackedChangeNotification::KMsvPackedChangeLimit && ob->iEvents==2) { ob->Wait(); CActiveScheduler::Start(); } test(ob->iSelection.Count()==count+1); ob->Finish(); } test.Printf(_L("\n")); CleanupStack::PopAndDestroy(5); }
//-----------------------------------------------------------------------------------------// bool Set( hash_table_t *ht, uint8_t *key, size_t keySize, uint8_t *value, size_t valueSize ) { assert( ht != NULL ); if( key == NULL || keySize == 0 || value == NULL || valueSize == 0 ) return false; uint32_t hash = CalcHash( key, keySize ); uint32_t index = hash % TABLE_SIZE; bucket_t *bucket = ht->buckets + index; entry_t *ent = bucket->entry; entry_t *prev = NULL; entry_t *unused = NULL; // search entry that matchs one of below condition. // * duplicated key // * unused entry for(; ent != NULL; prev = ent, ent = ent->next ) { if( ent->hash == hash && ent->keySize == keySize && memcmp( ent->key, key, ent->keySize ) == 0 ) { // duplicated key found. break; } if( ent->isUsed == false ) { // save unused entry. unused = ent; } } // if no duplicated key entry found, // then set "unused" to "ent" for reusing. // of course, if "unused" is NULL, then "ent" remains as NULL. if( ent == NULL ) ent = unused; // (re)allocate entry. entry_t *newEnt = CreateEntry( ent, hash, key, keySize, value, valueSize ); if( newEnt == NULL ) { return false; } if( bucket->entry == NULL ) { // if "bucket->entry" is NULL, // that means "this bucket has no entry". // so, allocated entry above is first entry in this bucket. bucket->entry = newEnt; } else if( ent == NULL ) { // "ent is NULL" means // no duplicated key entry and unused entry found at searching. // so we have new entry(allocated entry). // and this bucket has one or more entry, so // * "prev" must not be NULL, // * "prev" is last entry in this bucket unti we add new entry. // so, "prev->next" must be NULL. // * "unused" must be NULL. assert( prev != NULL ); assert( prev->next == NULL ); assert( unused == NULL ); // append new entry to the tail. prev->next = newEnt; } // else, if we haven't much any above condition, // we have overwrited dupulicated key entry or reused unused entry. return true; }
TEST(MapsTest, file_buffer_cross) { constexpr size_t kBufferSize = 2048; TemporaryFile tf; ASSERT_TRUE(tf.fd != -1); // Compute how many to add in the first buffer. size_t entry_len = CreateEntry(0).size(); size_t index; std::string file_data; for (index = 0; index < kBufferSize / entry_len; index++) { file_data += CreateEntry(index); } // Add a long name to make sure that the first buffer does not contain a // complete line. // Remove the last newline. size_t extra = 0; size_t leftover = kBufferSize % entry_len; size_t overlap1_index = 0; std::string overlap1_name; if (leftover == 0) { // Exact match, add a long name to cross over the value. overlap1_name = "/fake/name/is/long/on/purpose"; file_data.erase(file_data.size() - 1); file_data += ' ' + overlap1_name + '\n'; extra = entry_len + overlap1_name.size() + 1; overlap1_index = index; } // Compute how many need to go in to hit the buffer boundary exactly. size_t bytes_left_in_buffer = kBufferSize - extra; size_t entries_to_add = bytes_left_in_buffer / entry_len + index; for (; index < entries_to_add; index++) { file_data += CreateEntry(index); } // Now figure out how many bytes to add to get exactly to the buffer boundary. leftover = bytes_left_in_buffer % entry_len; std::string overlap2_name; size_t overlap2_index = 0; if (leftover != 0) { file_data.erase(file_data.size() - 1); file_data += ' '; overlap2_name = std::string(leftover - 1, 'x'); file_data += overlap2_name + '\n'; overlap2_index = index - 1; } // Now add a few entries on the next page. for (size_t start = index; index < start + 10; index++) { file_data += CreateEntry(index); } ASSERT_TRUE(android::base::WriteStringToFile(file_data, tf.path, 0660, getuid(), getgid())); FileMaps maps(tf.path); ASSERT_TRUE(maps.Parse()); EXPECT_EQ(index, maps.Total()); // Verify all of the maps. for (size_t i = 0; i < index; i++) { MapInfo* info = maps.Get(i); ASSERT_TRUE(info != nullptr) << "Failed verifying index " + std::to_string(i); EXPECT_EQ(i * 4096, info->start) << "Failed verifying index " + std::to_string(i); EXPECT_EQ((i + 1) * 4096, info->end) << "Failed verifying index " + std::to_string(i); EXPECT_EQ(0U, info->offset) << "Failed verifying index " + std::to_string(i); if (overlap1_index != 0 && i == overlap1_index) { EXPECT_EQ(overlap1_name, info->name) << "Failed verifying overlap1 name " + std::to_string(i); } else if (overlap2_index != 0 && i == overlap2_index) { EXPECT_EQ(overlap2_name, info->name) << "Failed verifying overlap2 name " + std::to_string(i); } else { EXPECT_EQ("", info->name) << "Failed verifying index " + std::to_string(i); } } }
void AddItemToObject(JSON *object, const char *key, JSON *value){ JSON *it = object->son; JSON *entry = CreateEntry(key, value); AddEntryToObject(object, entry); }
/** Creates a new entry as a child of the current context. The parent ID and entry ID are set by the Message Server. @param aEntry Index entry value for the new entry @return KErrNone - success; KErrNoMemory - a memory allocation failed; KErrNotSupported - aEntry is invalid */ EXPORT_C TInt CMsvServerEntry::CreateEntry(TMsvEntry& aEntry) { return CreateEntry(aEntry, KMsvServerId); }
/** Creates a new entry as a child of the current context as part of a bulk creation operation The parent ID and entry ID are set by the Message Server. @param aEntry Index entry value for the new entry @return KErrNone - success; KErrNoMemory - a memory allocation failed; KErrNotSupported - aEntry is invalid */ EXPORT_C TInt CMsvServerEntry::CreateEntryBulk(TMsvEntry& aEntry) { return CreateEntry(aEntry, KMsvServerId, ETrue); }