示例#1
0
int
main(int argc, char *argv[])
{
	// check for correct arguments
	if(argc != 3) {
		fprintf(stderr, "Usage: %s <port> <file-system-image>\n", argv[0]);
		exit(1);
	}
	
	int portnum = atoi(argv[1]);
	char *fs_image = argv[2];
    int sd = UDP_Open(portnum);
    assert(sd > -1);

	int fd = open(fs_image, O_RDWR|O_CREAT, S_IRWXU);
	if(fd < 0) {
		fprintf(stderr, "Cannot open file image");
		exit(1);
	}
	
	struct stat file_stat;
	if(fstat(fd, &file_stat) < 0) {
		fprintf(stderr, "Cannot open file image");
		exit(1);
	}
	
	int i, j, rc;
	MFS_Header_t *header;
	int image_size;
	free_bytes = MFS_BYTE_STEP_SIZE; 
	
	int entry_offset, inode_offset, new_dir_offset, parent_inode_offset;
	int tmp_offset, tmp_inode_offset, tmp_imap_offset;
	int done = 0;
	MFS_Imap_t *imap_temp;
	MFS_Inode_t *inode_temp;
	MFS_Inode_t *new_inode;
	MFS_DirEnt_t *entry_temp;
	
	if(file_stat.st_size >= sizeof(MFS_Header_t)) {
	
		image_size = file_stat.st_size + MFS_BYTE_STEP_SIZE;
		printf("Using old file of size %d\n", (int)file_stat.st_size);
		header = (MFS_Header_t *)malloc(image_size);
		// Put text in memory
		rc = read(fd, header, file_stat.st_size);
		
		if(rc < 0){
			fprintf(stderr, "Cannot open file");
			exit(1);
		}
	} else {
		//Initialize
		image_size = sizeof(MFS_Header_t) + MFS_BYTE_STEP_SIZE;
		header = (MFS_Header_t *)malloc(image_size);

		// root initialization
		inode_temp = allot_space(&header, sizeof(MFS_Inode_t), &tmp_inode_offset);
		imap_temp = allot_space(&header, sizeof(MFS_Imap_t), &tmp_imap_offset);
		imap_temp->inodes[0] = tmp_inode_offset;
		prepare_inode(inode_temp, MFS_DIRECTORY, NULL);

		for (i = 0; i < 14; i++) {
			imap_temp->inodes[i] = -1;
		}
		
		// header initialization
		for (i = 0; i < 4096/14; i++) {
			header->map[i] = -1;	
		}
	
		imap_temp->inodes[0] = tmp_inode_offset;

		// add two default entries
		entry_temp = allot_space(&header, MFS_BLOCK_SIZE, &tmp_offset);
		entry_temp[0].name[0] = '.';
		entry_temp[0].name[1] = '\0';
		entry_temp[0].inum = 0; 
		entry_temp[1].name[0] = '.';
		entry_temp[1].name[1] = '.';
		entry_temp[1].name[2] = '\0';
		entry_temp[1].inum = 0; 

		for (i = 2; i < MFS_BLOCK_SIZE/sizeof(MFS_DirEnt_t); i++) {
			entry_temp[i].inum = -1;
		}
		
		inode_temp->data[0] = tmp_offset;

		//Write to disk
		header->map[0] = tmp_imap_offset;
		flush(fd);		
		write_header(fd, header);
		printf("Initializing new file\n");
	}
	
	void* header_ptr = (void*)header;
	void* block_ptr = header_ptr + sizeof(MFS_Header_t);

	prot_r = (MFS_Prot_t*)malloc(sizeof(MFS_Prot_t));

	printf("Started listening at port %d\n", portnum);
	
    while (1) {
		struct sockaddr_in s;
		rc = UDP_Read(sd, &s, (char*)prot_r, sizeof(MFS_Prot_t));
		if (rc > 0) {
			
			//Special case for shutdown
			if(prot_r->cmd == CMD_INIT){
				printf("Server initialized\n");
				prot_r->ret = 0;
			} else if(prot_r->cmd == CMD_LOOKUP){
				
				prot_r->ret = -1;
				MFS_Inode_t* parent_inode = fix_inode(header, prot_r->pinum);
				prot_r->ret = lookup(block_ptr, parent_inode, &(prot_r->datapacket[0]));
			} else if(prot_r->cmd == CMD_SHUTDOWN){
				//Close file
				rc = close(fd);
				if(rc < 0){
					fprintf(stderr, "Cannot open file");
					exit(1);
				}
				prot_r->ret = 0;
				if(UDP_Write(sd, &s, (char*)prot_r, sizeof(MFS_Prot_t)) < -1){
					fprintf(stderr, "Unable to send result");
					exit(1);
				}
				exit(0);
			} else if(prot_r->cmd == CMD_UNLINK){
				
				verify(&header, &block_ptr, 16384);
				prot_r->ret = -1;
				MFS_Inode_t* parent_inode = fix_inode(header, prot_r->pinum);
				if(parent_inode != NULL && parent_inode->type == MFS_DIRECTORY){
					int exist = lookup(block_ptr, parent_inode, &(prot_r->datapacket[0]));
					if(exist != -1){
						//Check if empty
						MFS_Inode_t* this_inode = fix_inode(header, exist);
						if(!(this_inode->type == MFS_DIRECTORY && this_inode->size != 0)){
							//Need to remove
							MFS_DirEnt_t* new_dir_entry = allot_space(&header, MFS_BLOCK_SIZE, &entry_offset);
							MFS_Inode_t* new_parent_inode = allot_space(&header, sizeof(MFS_Inode_t), &parent_inode_offset);

							prepare_inode(new_parent_inode, 0, parent_inode);
							update_inode(&header, prot_r->pinum, parent_inode_offset);
							i = 0, done = 0;
							while(i < 14) {
								if(parent_inode->data[i] != -1){
									j = 0;
									while(j < MFS_BLOCK_SIZE / sizeof(MFS_DirEnt_t)){
										//printf("Parent node %d %d\n", inode->data[i], MFS_BLOCK_SIZE / sizeof(MFS_DirEnt_t) );
										MFS_DirEnt_t* entry = (MFS_DirEnt_t*)(block_ptr + parent_inode->data[i] + (j * sizeof(MFS_DirEnt_t)));			
										if(entry->inum != -1 && strcmp(entry->name, prot_r->datapacket) == 0 ){
											memcpy(new_dir_entry, block_ptr + parent_inode->data[i] , MFS_BLOCK_SIZE);
											//We now know which entry
											new_parent_inode->data[i] = entry_offset;
											new_dir_entry[j].inum = -1;
											update_inode(&header, exist, -1);
											prot_r->ret = 0;
											new_parent_inode->size--;
											done = 1;
											break;
										}
										j++;
									}
									if(done == 1) break;
								}
								i++;
							}


						}

					}else{
						prot_r->ret = 0;
					}
				}

			} else if(prot_r->cmd == CMD_READ){
				
				prot_r->ret = -1;
				MFS_Inode_t* parent_inode = fix_inode(header, prot_r->pinum);
				if(parent_inode != NULL && parent_inode->type == MFS_REGULAR_FILE && prot_r->block >= 0 && prot_r->block < 14){
					//New inode
					memcpy(prot_r->datapacket, block_ptr + parent_inode->data[prot_r->block], MFS_BLOCK_SIZE);
					prot_r->ret = 0;
				}
			} else if(prot_r->cmd == CMD_STAT){
				
				prot_r->ret = -1;
				MFS_Inode_t* parent_inode = fix_inode(header, prot_r->pinum);
				if(parent_inode != NULL && prot_r->block >= 0 && prot_r->block < 14){
					//New inode
					prot_r->block = parent_inode->size;
					prot_r->datapacket[0] = parent_inode->type;
					prot_r->ret = 0;
				}
			} else if(prot_r->cmd == CMD_WRITE){
				
				verify(&header, &block_ptr, 16384);
				prot_r->ret = -1;
				MFS_Inode_t* parent_inode = fix_inode(header, prot_r->pinum);
				int block_offset;
				if(parent_inode != NULL && parent_inode->type == MFS_REGULAR_FILE && prot_r->block >= 0 && prot_r->block < 14){
					//New inode
					new_inode = (MFS_Inode_t*)allot_space(&header, sizeof(MFS_Inode_t), &inode_offset);
					prepare_inode(new_inode, 0, parent_inode);
					void* new_block = allot_space(&header, MFS_BLOCK_SIZE, &block_offset);

					memcpy(new_block, prot_r->datapacket, MFS_BLOCK_SIZE);
					i = prot_r->block;
					while(new_inode->data[i] == -1 && i >= 0){
						new_inode->size += MFS_BLOCK_SIZE;
						new_inode->data[i] = block_offset; 
						i--;
					}
					new_inode->data[prot_r->block] = block_offset; 
					update_inode(&header, prot_r->pinum, inode_offset);
					prot_r->ret = 0;
				}

			} else if(prot_r->cmd == CMD_CREAT){
				
				verify(&header, &block_ptr, 16384);
				prot_r->ret = -1;

				MFS_Inode_t* parent_inode = fix_inode(header, prot_r->pinum);
				int exist = lookup(block_ptr, parent_inode, &(prot_r->datapacket[1]));

				if(exist == -1){

					new_inode = allot_space(&header, sizeof(MFS_Inode_t), &inode_offset);
					prepare_inode(new_inode, prot_r->datapacket[0], NULL);
					int new_inode_inum = gen_inum(&header, inode_offset);


					if(parent_inode != NULL && parent_inode->type == MFS_DIRECTORY && strlen(&(prot_r->datapacket[1])) <= 28 && new_inode_inum != -1){
						//Check if the dir is full
						MFS_DirEnt_t* entry;
						//Initialize new data block for entries
						MFS_DirEnt_t* new_entry =  allot_space(&header, MFS_BLOCK_SIZE, &entry_offset);

						MFS_Inode_t* new_parent_inode = allot_space(&header, sizeof(MFS_Inode_t), &parent_inode_offset);
						prepare_inode(new_parent_inode, 0, parent_inode);
						update_inode(&header, prot_r->pinum, parent_inode_offset);

						//Copy new stuff	
						done = 0;
						i = 0;
						while(i < 14) {
							if(parent_inode->data[i] != -1){

								j = 0;
								while(j < MFS_BLOCK_SIZE / sizeof(MFS_DirEnt_t)){
									entry = (MFS_DirEnt_t*)(block_ptr + parent_inode->data[i] + (j * sizeof(MFS_DirEnt_t)));			
									if(entry->inum == -1){

										//Copy the dir entry
										memcpy(new_entry, block_ptr + parent_inode->data[i], MFS_BLOCK_SIZE);
										new_parent_inode->data[i] = entry_offset;
										new_entry[j].inum = new_inode_inum;	
										strcpy(new_entry[j].name, &(prot_r->datapacket[1]));
										//printf("Name: %s - %s\n",entry->name, &(prot_r->datapacket[1]));
										done = 1;
										break;
									}
									j++;
								}
								if(done == 1) break;
							}else{	

								//Create new node
								//Initialize
								for (j = 0; j < MFS_BLOCK_SIZE / sizeof(MFS_DirEnt_t); j++) {
									new_entry[j].inum = -1;
								}
								new_parent_inode->data[i] = entry_offset;
								new_entry[0].inum = new_inode_inum;			
								strcpy(new_entry[0].name, &(prot_r->datapacket[1]));
								done = 1;
								break;
							}
							i++;
						}
						if(done){
							//Actually create the inode
							//Add .. and . dirs
							if(new_inode->type == MFS_DIRECTORY){
								MFS_DirEnt_t* new_dir_entry =  allot_space(&header, MFS_BLOCK_SIZE, &new_dir_offset);
								for (i = 0; i < MFS_BLOCK_SIZE/sizeof(MFS_DirEnt_t); i++) {
									new_dir_entry[i].inum = -1;
								}
								new_dir_entry[0].name[0] = '.';
								new_dir_entry[0].name[1] = '\0';
								new_dir_entry[0].inum = new_inode_inum; 
								new_dir_entry[1].name[0] = '.';
								new_dir_entry[1].name[1] = '.';
								new_dir_entry[1].name[2] = '\0';
								new_dir_entry[1].inum = prot_r->pinum; 
								new_inode->data[0] = new_dir_offset;
							}	

							//Write to block
							new_parent_inode->size++;
							header->total_inode++;
							prot_r->ret = 0;
						}else{
							header->total_byte -= unwritten_bytes;
							unwritten_bytes = 0;
						}
					}else{
						header->total_byte -= unwritten_bytes;
						unwritten_bytes = 0;
					}
				}else{
					prot_r->ret = 0;
				}

			} else {
				fprintf(stderr, "Unknown command");
				exit(1);
				continue;
			}

			flush(fd);
			write_header(fd, header);
			if(UDP_Write(sd, &s, (char*)prot_r, sizeof(MFS_Prot_t)) < -1){
				fprintf(stderr, "Unable to send result");
				exit(1);
			}
		}

    }

    return 0;
}
FString FSkookumScriptEditor::make_project_editable()
  {
  FString error_msg;

  FString game_name(FApp::GetGameName());
  if (game_name.IsEmpty())
    {
    error_msg = TEXT("Tried to make project editable but engine has no project loaded!");
    }
  else
    {
    // Check if maybe already editable - if so, silently do nothing
    FString editable_scripts_path = FPaths::GameDir() / TEXT("Scripts");
    FString editable_project_path(editable_scripts_path / TEXT("Skookum-project.ini"));
    if (!FPaths::FileExists(editable_project_path))
      {
      // Check temporary location (in `Intermediate` folder)
      FString temp_root_path(FPaths::GameIntermediateDir() / TEXT("SkookumScript"));
      FString temp_scripts_path(temp_root_path / TEXT("Scripts"));
      FString temp_project_path = temp_scripts_path / TEXT("Skookum-project.ini");
      if (!FPaths::FileExists(temp_project_path))
        {
        error_msg = TEXT("Tried to make project editable but neither an editable nor a non-editable project was found!");
        }
      else
        {
        if (!IFileManager::Get().Move(*editable_scripts_path, *temp_scripts_path, true, true))
          {
          error_msg = TEXT("Failed moving project information from temporary to editable location!");
          }
        else
          {
          // Move compiled binaries for convenience
          // We don't care if this succeeds
          FString temp_binary_folder_path = temp_root_path / TEXT("Content/skookumscript");
          FString editable_binary_folder_path = FPaths::GameDir() / TEXT("Content/skookumscript");
          IFileManager::Get().Move(*editable_binary_folder_path, *temp_binary_folder_path, true, true);

          // Change project packaging settings to include Sk binaries
          UProjectPackagingSettings * packaging_settings_p = Cast<UProjectPackagingSettings>(UProjectPackagingSettings::StaticClass()->GetDefaultObject());
          const TCHAR * binary_path_name_p = TEXT("skookumscript");
          for (TArray<FDirectoryPath>::TConstIterator dir_path(packaging_settings_p->DirectoriesToAlwaysStageAsNonUFS); dir_path; ++dir_path)
            {
            if (dir_path->Path == binary_path_name_p)
              {
              binary_path_name_p = nullptr;
              break;
              }
            }
          if (binary_path_name_p)
            {
            FDirectoryPath binary_path;
            binary_path.Path = binary_path_name_p;
            packaging_settings_p->DirectoriesToAlwaysStageAsNonUFS.Add(binary_path);
            FString config_file_name = FPaths::GameConfigDir() / TEXT("DefaultGame.ini");
            if (ISourceControlModule::Get().IsEnabled())
              {
              SourceControlHelpers::CheckOutFile(config_file_name);
              }
            packaging_settings_p->SaveConfig(CPF_Config, *config_file_name);
            }

          // Create Project overlay folder
          IFileManager::Get().MakeDirectory(*(editable_scripts_path / TEXT("Project/Object")), true);

          // Change project to be editable
          FString proj_ini;
          verify(FFileHelper::LoadFileToString(proj_ini, *editable_project_path));
          proj_ini = proj_ini.Replace(m_editable_ini_settings_p, TEXT("")); // Remove editable settings
          proj_ini += TEXT("Overlay7=Project|Project\r\n"); // Create Project overlay definition
          verify(FFileHelper::SaveStringToFile(proj_ini, *editable_project_path, FFileHelper::EEncodingOptions::ForceAnsi));
          }
        }
      }
    }

  return error_msg;
  }
示例#3
0
    void* MemoryMappedFile::map(const char *filenameIn, unsigned long long &length, int options) {
        verify( fd == 0 && len == 0 ); // can't open more than once
        setFilename(filenameIn);
        FileAllocator::get()->allocateAsap( filenameIn, length );
        /* big hack here: Babble uses db names with colons.  doesn't seem to work on windows.  temporary perhaps. */
        char filename[256];
        strncpy(filename, filenameIn, 255);
        filename[255] = 0;
        {
            size_t len = strlen( filename );
            for ( size_t i=len-1; i>=0; i-- ) {
                if ( filename[i] == '/' ||
                        filename[i] == '\\' )
                    break;

                if ( filename[i] == ':' )
                    filename[i] = '_';
            }
        }

        updateLength( filename, length );

        {
            DWORD createOptions = FILE_ATTRIBUTE_NORMAL;
            if ( options & SEQUENTIAL )
                createOptions |= FILE_FLAG_SEQUENTIAL_SCAN;
            DWORD rw = GENERIC_READ | GENERIC_WRITE;
            fd = CreateFileW(
                     toWideString(filename).c_str(),
                     rw, // desired access
                     FILE_SHARE_WRITE | FILE_SHARE_READ, // share mode
                     NULL, // security
                     OPEN_ALWAYS, // create disposition
                     createOptions , // flags
                     NULL); // hTempl
            if ( fd == INVALID_HANDLE_VALUE ) {
                DWORD dosError = GetLastError();
                log() << "CreateFileW for " << filename
                        << " failed with " << errnoWithDescription( dosError )
                        << " (file size is " << length << ")"
                        << " in MemoryMappedFile::map"
                        << endl;
                return 0;
            }
        }

        mapped += length;

        {
            DWORD flProtect = PAGE_READWRITE; //(options & READONLY)?PAGE_READONLY:PAGE_READWRITE;
            maphandle = CreateFileMappingW(fd, NULL, flProtect,
                                          length >> 32 /*maxsizehigh*/,
                                          (unsigned) length /*maxsizelow*/,
                                          NULL/*lpName*/);
            if ( maphandle == NULL ) {
                DWORD dosError = GetLastError();
                log() << "CreateFileMappingW for " << filename
                        << " failed with " << errnoWithDescription( dosError )
                        << " (file size is " << length << ")"
                        << " in MemoryMappedFile::map"
                        << endl;
                close();
                fassertFailed( 16225 );
            }
        }

        void *view = 0;
        {
            scoped_lock lk(mapViewMutex);
            DWORD access = ( options & READONLY ) ? FILE_MAP_READ : FILE_MAP_ALL_ACCESS;

            int current_retry = 0;
            while (true) {

                LPVOID thisAddress = getNextMemoryMappedFileLocation(length);

                view = MapViewOfFileEx(
                    maphandle,      // file mapping handle
                    access,         // access
                    0, 0,           // file offset, high and low
                    0,              // bytes to map, 0 == all
                    thisAddress);  // address to place file

                if (view == 0) {
                    DWORD dosError = GetLastError();

                    ++current_retry;

                    // If we failed to allocate a memory mapped file, try again in case we picked
                    // an address that Windows is also trying to use for some other VM allocations
                    if (dosError == ERROR_INVALID_ADDRESS && current_retry < 5) {
                        continue;
                    }

#ifndef _WIN64
                    // Warn user that if they are running a 32-bit app on 64-bit Windows
                    if (dosError == ERROR_NOT_ENOUGH_MEMORY) {
                        BOOL wow64Process;
                        BOOL retWow64 = IsWow64Process(GetCurrentProcess(), &wow64Process);
                        if (retWow64 && wow64Process) {
                            log() << "This is a 32-bit MongoDB binary running on a 64-bit"
                                " operating system that has run out of virtual memory for"
                                " databases. Switch to a 64-bit build of MongoDB to open"
                                " the databases.";
                        }
                    }
#endif

                    log() << "MapViewOfFileEx for " << filename
                        << " at address " << thisAddress
                        << " failed with " << errnoWithDescription(dosError)
                        << " (file size is " << length << ")"
                        << " in MemoryMappedFile::map"
                        << endl;

                    close();
                    fassertFailed(16166);
                }

                break;
            }
        }

        views.push_back(view);
        len = length;
        return view;
    }
示例#4
0
文件: sceNpTrophy.cpp 项目: O1L/rpcs3
s32 sceNpTrophyGetGameInfo(u32 context, u32 handle, vm::ptr<SceNpTrophyGameDetails> details, vm::ptr<SceNpTrophyGameData> data)
{
	sceNpTrophy.error("sceNpTrophyGetGameInfo(context=0x%x, handle=0x%x, details=*0x%x, data=*0x%x)", context, handle, details, data);

	const auto ctxt = idm::get<trophy_context_t>(context);

	if (!ctxt)
	{
		return SCE_NP_TROPHY_ERROR_UNKNOWN_CONTEXT;
	}

	const auto hndl = idm::get<trophy_handle_t>(handle);

	if (!hndl)
	{
		return SCE_NP_TROPHY_ERROR_UNKNOWN_HANDLE;
	}

	// TODO: Get the path of the current user
	const std::string& path = vfs::get("/dev_hdd0/home/00000001/trophy/" + ctxt->trp_name + "/TROPCONF.SFM");
	
	// TODO: rXmlDocument can open only real file
	verify(HERE), !fs::get_virtual_device(path); 
	rXmlDocument doc;
	doc.Load(path);

	std::string titleName;
	std::string titleDetail;
	for (std::shared_ptr<rXmlNode> n = doc.GetRoot()->GetChildren(); n; n = n->GetNext())
	{
		if (n->GetName() == "title-name")
			titleName = n->GetNodeContent();
		if (n->GetName() == "title-detail")
			titleDetail = n->GetNodeContent();
		if (n->GetName() == "trophy")
		{
			u32 trophy_id = atoi(n->GetAttribute("id").c_str());
			
			details->numTrophies++;
			switch (n->GetAttribute("ttype")[0]) {
			case 'B': details->numBronze++;   break;
			case 'S': details->numSilver++;   break;
			case 'G': details->numGold++;     break;
			case 'P': details->numPlatinum++; break;
			}
			
			if (ctxt->tropusr->GetTrophyUnlockState(trophy_id))
			{
				data->unlockedTrophies++;
				switch (n->GetAttribute("ttype")[0]) {
				case 'B': data->unlockedBronze++;   break;
				case 'S': data->unlockedSilver++;   break;
				case 'G': data->unlockedGold++;     break;
				case 'P': data->unlockedPlatinum++; break;
				}
			}
		}
	}

	strcpy_trunc(details->title, titleName);
	strcpy_trunc(details->description, titleDetail);
	return CELL_OK;
}
示例#5
0
    static bool receivedQuery(Client& c, DbResponse& dbresponse, Message& m ) {
        bool ok = true;
        MSGID responseTo = m.header()->id;

        DbMessage d(m);
        QueryMessage q(d);
        auto_ptr< Message > resp( new Message() );

        CurOp& op = *(c.curop());

        shared_ptr<AssertionException> ex;

        try {
            if (!NamespaceString::isCommand(d.getns())) {
                // Auth checking for Commands happens later.
                Status status = cc().getAuthorizationManager()->checkAuthForQuery(d.getns());
                uassert(16550, status.reason(), status.isOK());
            }
            dbresponse.exhaustNS = runQuery(m, q, op, *resp);
            verify( !resp->empty() );
        }
        catch ( SendStaleConfigException& e ){
            ex.reset( new SendStaleConfigException( e.getns(), e.getInfo().msg, e.getVersionReceived(), e.getVersionWanted() ) );
            ok = false;
        }
        catch ( AssertionException& e ) {
            ex.reset( new AssertionException( e.getInfo().msg, e.getCode() ) );
            ok = false;
        }

        if( ex ){

            op.debug().exceptionInfo = ex->getInfo();
            LOGWITHRATELIMIT {
                log() << "assertion " << ex->toString() << " ns:" << q.ns << " query:" <<
                (q.query.valid() ? q.query.toString() : "query object is corrupt") << endl;
                if( q.ntoskip || q.ntoreturn )
                    log() << " ntoskip:" << q.ntoskip << " ntoreturn:" << q.ntoreturn << endl;
            }

            SendStaleConfigException* scex = NULL;
            if ( ex->getCode() == SendStaleConfigCode ) scex = static_cast<SendStaleConfigException*>( ex.get() );

            BSONObjBuilder err;
            ex->getInfo().append( err );
            if( scex ){
                err.append( "ns", scex->getns() );
                scex->getVersionReceived().addToBSON( err, "vReceived" );
                scex->getVersionWanted().addToBSON( err, "vWanted" );
            }
            BSONObj errObj = err.done();

            if( scex ){
                log() << "stale version detected during query over "
                      << q.ns << " : " << errObj << endl;
            }
            else{
                log() << "problem detected during query over "
                      << q.ns << " : " << errObj << endl;
            }

            BufBuilder b;
            b.skip(sizeof(QueryResult));
            b.appendBuf((void*) errObj.objdata(), errObj.objsize());

            // todo: call replyToQuery() from here instead of this!!! see dbmessage.h
            QueryResult * msgdata = (QueryResult *) b.buf();
            b.decouple();
            QueryResult *qr = msgdata;
            qr->_resultFlags() = ResultFlag_ErrSet;
            if( scex ) qr->_resultFlags() |= ResultFlag_ShardConfigStale;
            qr->len = b.len();
            qr->setOperation(opReply);
            qr->cursorId = 0;
            qr->startingFrom = 0;
            qr->nReturned = 1;
            resp.reset( new Message() );
            resp->setData( msgdata, true );

        }

        op.debug().responseLength = resp->header()->dataLen();

        dbresponse.response = resp.release();
        dbresponse.responseTo = responseTo;

        return ok;
    }
示例#6
0
    int ConfigServer::checkConfigVersion( bool upgrade ) {
        int cur = dbConfigVersion();
        if ( cur == VERSION )
            return 0;

        if ( cur == 0 ) {
            ScopedDbConnection conn( _primary );
            conn->insert( "config.version" , BSON( "_id" << 1 << "version" << VERSION ) );
            pool.flush();
            verify( VERSION == dbConfigVersion( conn.conn() ) );
            conn.done();
            return 0;
        }

        if ( cur == 2 ) {

            // need to upgrade
            verify( VERSION == 3 );
            if ( ! upgrade ) {
                log() << "newer version of mongo meta data\n"
                      << "need to --upgrade after shutting all mongos down"
                      << endl;
                return -9;
            }

            ScopedDbConnection conn( _primary );

            // do a backup
            string backupName;
            {
                stringstream ss;
                ss << "config-backup-" << terseCurrentTime(false);
                backupName = ss.str();
            }
            log() << "backing up config to: " << backupName << endl;
            conn->copyDatabase( "config" , backupName );

            map<string,string> hostToShard;
            set<string> shards;
            // shards
            {
                unsigned n = 0;
                auto_ptr<DBClientCursor> c = conn->query( ShardNS::shard , BSONObj() );
                while ( c->more() ) {
                    BSONObj o = c->next();
                    string host = o["host"].String();

                    string name = "";

                    BSONElement id = o["_id"];
                    if ( id.type() == String ) {
                        name = id.String();
                    }
                    else {
                        stringstream ss;
                        ss << "shard" << hostToShard.size();
                        name = ss.str();
                    }

                    hostToShard[host] = name;
                    shards.insert( name );
                    n++;
                }

                verify( n == hostToShard.size() );
                verify( n == shards.size() );

                conn->remove( ShardNS::shard , BSONObj() );

                for ( map<string,string>::iterator i=hostToShard.begin(); i != hostToShard.end(); i++ ) {
                    conn->insert( ShardNS::shard , BSON( "_id" << i->second << "host" << i->first ) );
                }
            }

            // databases
            {
                auto_ptr<DBClientCursor> c = conn->query( ShardNS::database , BSONObj() );
                map<string,BSONObj> newDBs;
                unsigned n = 0;
                while ( c->more() ) {
                    BSONObj old = c->next();
                    n++;

                    if ( old["name"].eoo() ) {
                        // already done
                        newDBs[old["_id"].String()] = old;
                        continue;
                    }

                    BSONObjBuilder b(old.objsize());
                    b.appendAs( old["name"] , "_id" );

                    BSONObjIterator i(old);
                    while ( i.more() ) {
                        BSONElement e = i.next();
                        if ( strcmp( "_id" , e.fieldName() ) == 0 ||
                                strcmp( "name" , e.fieldName() ) == 0 ) {
                            continue;
                        }

                        b.append( e );
                    }

                    BSONObj x = b.obj();
                    log() << old << "\n\t" << x << endl;
                    newDBs[old["name"].String()] = x;
                }

                verify( n == newDBs.size() );

                conn->remove( ShardNS::database , BSONObj() );

                for ( map<string,BSONObj>::iterator i=newDBs.begin(); i!=newDBs.end(); i++ ) {
                    conn->insert( ShardNS::database , i->second );
                }

            }

            // chunks
            {
                unsigned num = 0;
                map<string,BSONObj> chunks;
                auto_ptr<DBClientCursor> c = conn->query( ShardNS::chunk , BSONObj() );
                while ( c->more() ) {
                    BSONObj x = c->next();
                    BSONObjBuilder b;

                    string id = Chunk::genID( x["ns"].String() , x["min"].Obj() );
                    b.append( "_id" , id );

                    BSONObjIterator i(x);
                    while ( i.more() ) {
                        BSONElement e = i.next();
                        if ( strcmp( e.fieldName() , "_id" ) == 0 )
                            continue;
                        b.append( e );
                    }

                    BSONObj n = b.obj();
                    log() << x << "\n\t" << n << endl;
                    chunks[id] = n;
                    num++;
                }

                verify( num == chunks.size() );

                conn->remove( ShardNS::chunk , BSONObj() );
                for ( map<string,BSONObj>::iterator i=chunks.begin(); i!=chunks.end(); i++ ) {
                    conn->insert( ShardNS::chunk , i->second );
                }

            }

            conn->update( "config.version" , BSONObj() , BSON( "_id" << 1 << "version" << VERSION ) );
            conn.done();
            pool.flush();
            return 1;
        }

        log() << "don't know how to upgrade " << cur << " to " << VERSION << endl;
        return -8;
    }
示例#7
0
    PlanExecutor::ExecState PlanExecutor::getNext(BSONObj* objOut, DiskLoc* dlOut) {
        if (_killed) { return PlanExecutor::DEAD; }

        for (;;) {
            WorkingSetID id = WorkingSet::INVALID_ID;
            PlanStage::StageState code = _root->work(&id);

            if (PlanStage::ADVANCED == code) {
                // Fast count.
                if (WorkingSet::INVALID_ID == id) {
                    invariant(NULL == objOut);
                    invariant(NULL == dlOut);
                    return PlanExecutor::ADVANCED;
                }

                WorkingSetMember* member = _workingSet->get(id);
                bool hasRequestedData = true;

                if (NULL != objOut) {
                    if (WorkingSetMember::LOC_AND_IDX == member->state) {
                        if (1 != member->keyData.size()) {
                            _workingSet->free(id);
                            hasRequestedData = false;
                        }
                        else {
                            *objOut = member->keyData[0].keyData;
                        }
                    }
                    else if (member->hasObj()) {
                        *objOut = member->obj;
                    }
                    else {
                        _workingSet->free(id);
                        hasRequestedData = false;
                    }
                }

                if (NULL != dlOut) {
                    if (member->hasLoc()) {
                        *dlOut = member->loc;
                    }
                    else {
                        _workingSet->free(id);
                        hasRequestedData = false;
                    }
                }

                if (hasRequestedData) {
                    _workingSet->free(id);
                    return PlanExecutor::ADVANCED;
                }
                // This result didn't have the data the caller wanted, try again.
            }
            else if (PlanStage::NEED_TIME == code) {
                // Fall through to yield check at end of large conditional.
            }
            else if (PlanStage::IS_EOF == code) {
                return PlanExecutor::IS_EOF;
            }
            else if (PlanStage::DEAD == code) {
                return PlanExecutor::DEAD;
            }
            else {
                verify(PlanStage::FAILURE == code);
                if (NULL != objOut) {
                    WorkingSetCommon::getStatusMemberObject(*_workingSet, id, objOut);
                }
                return PlanExecutor::EXEC_ERROR;
            }
        }
    }
示例#8
0
 void DBDirectClient::killCursor(long long id) {
     // The killCursor command on the DB client is only used by sharding,
     // so no need to have it for MongoD.
     verify(!"killCursor should not be used in MongoD");
 }
示例#9
0
static int
change_one(zfs_handle_t *zhp, void *data)
{
	prop_changelist_t *clp = data;
	char property[ZFS_MAXPROPLEN];
	char where[64];
	prop_changenode_t *cn;
	zprop_source_t sourcetype = ZPROP_SRC_NONE;
	zprop_source_t share_sourcetype = ZPROP_SRC_NONE;

	/*
	 * We only want to unmount/unshare those filesystems that may inherit
	 * from the target filesystem.  If we find any filesystem with a
	 * locally set mountpoint, we ignore any children since changing the
	 * property will not affect them.  If this is a rename, we iterate
	 * over all children regardless, since we need them unmounted in
	 * order to do the rename.  Also, if this is a volume and we're doing
	 * a rename, then always add it to the changelist.
	 */

	if (!(ZFS_IS_VOLUME(zhp) && clp->cl_realprop == ZFS_PROP_NAME) &&
	    zfs_prop_get(zhp, clp->cl_prop, property,
	    sizeof (property), &sourcetype, where, sizeof (where),
	    B_FALSE) != 0) {
		zfs_close(zhp);
		return (0);
	}

	/*
	 * If we are "watching" sharenfs or sharesmb
	 * then check out the companion property which is tracked
	 * in cl_shareprop
	 */
	if (clp->cl_shareprop != ZPROP_INVAL &&
	    zfs_prop_get(zhp, clp->cl_shareprop, property,
	    sizeof (property), &share_sourcetype, where, sizeof (where),
	    B_FALSE) != 0) {
		zfs_close(zhp);
		return (0);
	}

	if (clp->cl_alldependents || clp->cl_allchildren ||
	    sourcetype == ZPROP_SRC_DEFAULT ||
	    sourcetype == ZPROP_SRC_INHERITED ||
	    (clp->cl_shareprop != ZPROP_INVAL &&
	    (share_sourcetype == ZPROP_SRC_DEFAULT ||
	    share_sourcetype == ZPROP_SRC_INHERITED))) {
		if ((cn = zfs_alloc(zfs_get_handle(zhp),
		    sizeof (prop_changenode_t))) == NULL) {
			zfs_close(zhp);
			return (-1);
		}

		cn->cn_handle = zhp;
		cn->cn_mounted = (clp->cl_gflags & CL_GATHER_MOUNT_ALWAYS) ||
		    zfs_is_mounted(zhp, NULL);
		cn->cn_shared = zfs_is_shared(zhp);
		cn->cn_zoned = zfs_prop_get_int(zhp, ZFS_PROP_ZONED);
		cn->cn_needpost = B_TRUE;

		/* Indicate if any child is exported to a local zone. */
		if (getzoneid() == GLOBAL_ZONEID && cn->cn_zoned)
			clp->cl_haszonedchild = B_TRUE;

		uu_list_node_init(cn, &cn->cn_listnode, clp->cl_pool);

		if (clp->cl_sorted) {
			uu_list_index_t idx;

			(void) uu_list_find(clp->cl_list, cn, NULL,
			    &idx);
			uu_list_insert(clp->cl_list, cn, idx);
		} else {
			/*
			 * Add this child to beginning of the list. Children
			 * below this one in the hierarchy will get added above
			 * this one in the list. This produces a list in
			 * reverse dataset name order.
			 * This is necessary when the original mountpoint
			 * is legacy or none.
			 */
			ASSERT(!clp->cl_alldependents);
			verify(uu_list_insert_before(clp->cl_list,
			    uu_list_first(clp->cl_list), cn) == 0);
		}

		if (!clp->cl_alldependents)
			return (zfs_iter_children(zhp, change_one, data));
	} else {
		zfs_close(zhp);
	}

	return (0);
}
示例#10
0
文件: libzfs_import.c 项目: AB17/zfs
/*
 * Determines if the pool is in use.  If so, it returns true and the state of
 * the pool as well as the name of the pool.  Both strings are allocated and
 * must be freed by the caller.
 */
int
zpool_in_use(libzfs_handle_t *hdl, int fd, pool_state_t *state, char **namestr,
    boolean_t *inuse)
{
	nvlist_t *config;
	char *name;
	boolean_t ret;
	uint64_t guid, vdev_guid;
	zpool_handle_t *zhp;
	nvlist_t *pool_config;
	uint64_t stateval, isspare;
	aux_cbdata_t cb = { 0 };
	boolean_t isactive;

	*inuse = B_FALSE;

	if (zpool_read_label(fd, &config) != 0) {
		(void) no_memory(hdl);
		return (-1);
	}

	if (config == NULL)
		return (0);

	verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
	    &stateval) == 0);
	verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID,
	    &vdev_guid) == 0);

	if (stateval != POOL_STATE_SPARE && stateval != POOL_STATE_L2CACHE) {
		verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
		    &name) == 0);
		verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
		    &guid) == 0);
	}

	switch (stateval) {
	case POOL_STATE_EXPORTED:
		/*
		 * A pool with an exported state may in fact be imported
		 * read-only, so check the in-core state to see if it's
		 * active and imported read-only.  If it is, set
		 * its state to active.
		 */
		if (pool_active(hdl, name, guid, &isactive) == 0 && isactive &&
		    (zhp = zpool_open_canfail(hdl, name)) != NULL &&
		    zpool_get_prop_int(zhp, ZPOOL_PROP_READONLY, NULL))
			stateval = POOL_STATE_ACTIVE;

		ret = B_TRUE;
		break;

	case POOL_STATE_ACTIVE:
		/*
		 * For an active pool, we have to determine if it's really part
		 * of a currently active pool (in which case the pool will exist
		 * and the guid will be the same), or whether it's part of an
		 * active pool that was disconnected without being explicitly
		 * exported.
		 */
		if (pool_active(hdl, name, guid, &isactive) != 0) {
			nvlist_free(config);
			return (-1);
		}

		if (isactive) {
			/*
			 * Because the device may have been removed while
			 * offlined, we only report it as active if the vdev is
			 * still present in the config.  Otherwise, pretend like
			 * it's not in use.
			 */
			if ((zhp = zpool_open_canfail(hdl, name)) != NULL &&
			    (pool_config = zpool_get_config(zhp, NULL))
			    != NULL) {
				nvlist_t *nvroot;

				verify(nvlist_lookup_nvlist(pool_config,
				    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
				ret = find_guid(nvroot, vdev_guid);
			} else {
				ret = B_FALSE;
			}

			/*
			 * If this is an active spare within another pool, we
			 * treat it like an unused hot spare.  This allows the
			 * user to create a pool with a hot spare that currently
			 * in use within another pool.  Since we return B_TRUE,
			 * libdiskmgt will continue to prevent generic consumers
			 * from using the device.
			 */
			if (ret && nvlist_lookup_uint64(config,
			    ZPOOL_CONFIG_IS_SPARE, &isspare) == 0 && isspare)
				stateval = POOL_STATE_SPARE;

			if (zhp != NULL)
				zpool_close(zhp);
		} else {
			stateval = POOL_STATE_POTENTIALLY_ACTIVE;
			ret = B_TRUE;
		}
		break;

	case POOL_STATE_SPARE:
		/*
		 * For a hot spare, it can be either definitively in use, or
		 * potentially active.  To determine if it's in use, we iterate
		 * over all pools in the system and search for one with a spare
		 * with a matching guid.
		 *
		 * Due to the shared nature of spares, we don't actually report
		 * the potentially active case as in use.  This means the user
		 * can freely create pools on the hot spares of exported pools,
		 * but to do otherwise makes the resulting code complicated, and
		 * we end up having to deal with this case anyway.
		 */
		cb.cb_zhp = NULL;
		cb.cb_guid = vdev_guid;
		cb.cb_type = ZPOOL_CONFIG_SPARES;
		if (zpool_iter(hdl, find_aux, &cb) == 1) {
			name = (char *)zpool_get_name(cb.cb_zhp);
			ret = TRUE;
		} else {
			ret = FALSE;
		}
		break;

	case POOL_STATE_L2CACHE:

		/*
		 * Check if any pool is currently using this l2cache device.
		 */
		cb.cb_zhp = NULL;
		cb.cb_guid = vdev_guid;
		cb.cb_type = ZPOOL_CONFIG_L2CACHE;
		if (zpool_iter(hdl, find_aux, &cb) == 1) {
			name = (char *)zpool_get_name(cb.cb_zhp);
			ret = TRUE;
		} else {
			ret = FALSE;
		}
		break;

	default:
		ret = B_FALSE;
	}


	if (ret) {
		if ((*namestr = zfs_strdup(hdl, name)) == NULL) {
			if (cb.cb_zhp)
				zpool_close(cb.cb_zhp);
			nvlist_free(config);
			return (-1);
		}
		*state = (pool_state_t)stateval;
	}

	if (cb.cb_zhp)
		zpool_close(cb.cb_zhp);

	nvlist_free(config);
	*inuse = ret;
	return (0);
}
示例#11
0
文件: libzfs_import.c 项目: AB17/zfs
/*
 * Convert our list of pools into the definitive set of configurations.  We
 * start by picking the best config for each toplevel vdev.  Once that's done,
 * we assemble the toplevel vdevs into a full config for the pool.  We make a
 * pass to fix up any incorrect paths, and then add it to the main list to
 * return to the user.
 */
static nvlist_t *
get_configs(libzfs_handle_t *hdl, pool_list_t *pl, boolean_t active_ok)
{
	pool_entry_t *pe;
	vdev_entry_t *ve;
	config_entry_t *ce;
	nvlist_t *ret = NULL, *config = NULL, *tmp = NULL, *nvtop, *nvroot;
	nvlist_t **spares, **l2cache;
	uint_t i, nspares, nl2cache;
	boolean_t config_seen;
	uint64_t best_txg;
	char *name, *hostname = NULL;
	uint64_t guid;
	uint_t children = 0;
	nvlist_t **child = NULL;
	uint_t holes;
	uint64_t *hole_array, max_id;
	uint_t c;
	boolean_t isactive;
	uint64_t hostid;
	nvlist_t *nvl;
	boolean_t found_one = B_FALSE;
	boolean_t valid_top_config = B_FALSE;

	if (nvlist_alloc(&ret, 0, 0) != 0)
		goto nomem;

	for (pe = pl->pools; pe != NULL; pe = pe->pe_next) {
		uint64_t id, max_txg = 0;

		if (nvlist_alloc(&config, NV_UNIQUE_NAME, 0) != 0)
			goto nomem;
		config_seen = B_FALSE;

		/*
		 * Iterate over all toplevel vdevs.  Grab the pool configuration
		 * from the first one we find, and then go through the rest and
		 * add them as necessary to the 'vdevs' member of the config.
		 */
		for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) {

			/*
			 * Determine the best configuration for this vdev by
			 * selecting the config with the latest transaction
			 * group.
			 */
			best_txg = 0;
			for (ce = ve->ve_configs; ce != NULL;
			    ce = ce->ce_next) {

				if (ce->ce_txg > best_txg) {
					tmp = ce->ce_config;
					best_txg = ce->ce_txg;
				}
			}

			/*
			 * We rely on the fact that the max txg for the
			 * pool will contain the most up-to-date information
			 * about the valid top-levels in the vdev namespace.
			 */
			if (best_txg > max_txg) {
				(void) nvlist_remove(config,
				    ZPOOL_CONFIG_VDEV_CHILDREN,
				    DATA_TYPE_UINT64);
				(void) nvlist_remove(config,
				    ZPOOL_CONFIG_HOLE_ARRAY,
				    DATA_TYPE_UINT64_ARRAY);

				max_txg = best_txg;
				hole_array = NULL;
				holes = 0;
				max_id = 0;
				valid_top_config = B_FALSE;

				if (nvlist_lookup_uint64(tmp,
				    ZPOOL_CONFIG_VDEV_CHILDREN, &max_id) == 0) {
					verify(nvlist_add_uint64(config,
					    ZPOOL_CONFIG_VDEV_CHILDREN,
					    max_id) == 0);
					valid_top_config = B_TRUE;
				}

				if (nvlist_lookup_uint64_array(tmp,
				    ZPOOL_CONFIG_HOLE_ARRAY, &hole_array,
				    &holes) == 0) {
					verify(nvlist_add_uint64_array(config,
					    ZPOOL_CONFIG_HOLE_ARRAY,
					    hole_array, holes) == 0);
				}
			}

			if (!config_seen) {
				/*
				 * Copy the relevant pieces of data to the pool
				 * configuration:
				 *
				 *	version
				 *	pool guid
				 *	name
				 *	comment (if available)
				 *	pool state
				 *	hostid (if available)
				 *	hostname (if available)
				 */
				uint64_t state, version;
				char *comment = NULL;

				version = fnvlist_lookup_uint64(tmp,
				    ZPOOL_CONFIG_VERSION);
				fnvlist_add_uint64(config,
				    ZPOOL_CONFIG_VERSION, version);
				guid = fnvlist_lookup_uint64(tmp,
				    ZPOOL_CONFIG_POOL_GUID);
				fnvlist_add_uint64(config,
				    ZPOOL_CONFIG_POOL_GUID, guid);
				name = fnvlist_lookup_string(tmp,
				    ZPOOL_CONFIG_POOL_NAME);
				fnvlist_add_string(config,
				    ZPOOL_CONFIG_POOL_NAME, name);

				if (nvlist_lookup_string(tmp,
				    ZPOOL_CONFIG_COMMENT, &comment) == 0)
					fnvlist_add_string(config,
					    ZPOOL_CONFIG_COMMENT, comment);

				state = fnvlist_lookup_uint64(tmp,
				    ZPOOL_CONFIG_POOL_STATE);
				fnvlist_add_uint64(config,
				    ZPOOL_CONFIG_POOL_STATE, state);

				hostid = 0;
				if (nvlist_lookup_uint64(tmp,
				    ZPOOL_CONFIG_HOSTID, &hostid) == 0) {
					fnvlist_add_uint64(config,
					    ZPOOL_CONFIG_HOSTID, hostid);
					hostname = fnvlist_lookup_string(tmp,
					    ZPOOL_CONFIG_HOSTNAME);
					fnvlist_add_string(config,
					    ZPOOL_CONFIG_HOSTNAME, hostname);
				}

				config_seen = B_TRUE;
			}

			/*
			 * Add this top-level vdev to the child array.
			 */
			verify(nvlist_lookup_nvlist(tmp,
			    ZPOOL_CONFIG_VDEV_TREE, &nvtop) == 0);
			verify(nvlist_lookup_uint64(nvtop, ZPOOL_CONFIG_ID,
			    &id) == 0);

			if (id >= children) {
				nvlist_t **newchild;

				newchild = zfs_alloc(hdl, (id + 1) *
				    sizeof (nvlist_t *));
				if (newchild == NULL)
					goto nomem;

				for (c = 0; c < children; c++)
					newchild[c] = child[c];

				free(child);
				child = newchild;
				children = id + 1;
			}
			if (nvlist_dup(nvtop, &child[id], 0) != 0)
				goto nomem;

		}

		/*
		 * If we have information about all the top-levels then
		 * clean up the nvlist which we've constructed. This
		 * means removing any extraneous devices that are
		 * beyond the valid range or adding devices to the end
		 * of our array which appear to be missing.
		 */
		if (valid_top_config) {
			if (max_id < children) {
				for (c = max_id; c < children; c++)
					nvlist_free(child[c]);
				children = max_id;
			} else if (max_id > children) {
				nvlist_t **newchild;

				newchild = zfs_alloc(hdl, (max_id) *
				    sizeof (nvlist_t *));
				if (newchild == NULL)
					goto nomem;

				for (c = 0; c < children; c++)
					newchild[c] = child[c];

				free(child);
				child = newchild;
				children = max_id;
			}
		}

		verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
		    &guid) == 0);

		/*
		 * The vdev namespace may contain holes as a result of
		 * device removal. We must add them back into the vdev
		 * tree before we process any missing devices.
		 */
		if (holes > 0) {
			ASSERT(valid_top_config);

			for (c = 0; c < children; c++) {
				nvlist_t *holey;

				if (child[c] != NULL ||
				    !vdev_is_hole(hole_array, holes, c))
					continue;

				if (nvlist_alloc(&holey, NV_UNIQUE_NAME,
				    0) != 0)
					goto nomem;

				/*
				 * Holes in the namespace are treated as
				 * "hole" top-level vdevs and have a
				 * special flag set on them.
				 */
				if (nvlist_add_string(holey,
				    ZPOOL_CONFIG_TYPE,
				    VDEV_TYPE_HOLE) != 0 ||
				    nvlist_add_uint64(holey,
				    ZPOOL_CONFIG_ID, c) != 0 ||
				    nvlist_add_uint64(holey,
				    ZPOOL_CONFIG_GUID, 0ULL) != 0)
					goto nomem;
				child[c] = holey;
			}
		}

		/*
		 * Look for any missing top-level vdevs.  If this is the case,
		 * create a faked up 'missing' vdev as a placeholder.  We cannot
		 * simply compress the child array, because the kernel performs
		 * certain checks to make sure the vdev IDs match their location
		 * in the configuration.
		 */
		for (c = 0; c < children; c++) {
			if (child[c] == NULL) {
				nvlist_t *missing;
				if (nvlist_alloc(&missing, NV_UNIQUE_NAME,
				    0) != 0)
					goto nomem;
				if (nvlist_add_string(missing,
				    ZPOOL_CONFIG_TYPE,
				    VDEV_TYPE_MISSING) != 0 ||
				    nvlist_add_uint64(missing,
				    ZPOOL_CONFIG_ID, c) != 0 ||
				    nvlist_add_uint64(missing,
				    ZPOOL_CONFIG_GUID, 0ULL) != 0) {
					nvlist_free(missing);
					goto nomem;
				}
				child[c] = missing;
			}
		}

		/*
		 * Put all of this pool's top-level vdevs into a root vdev.
		 */
		if (nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) != 0)
			goto nomem;
		if (nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE,
		    VDEV_TYPE_ROOT) != 0 ||
		    nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) != 0 ||
		    nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, guid) != 0 ||
		    nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
		    child, children) != 0) {
			nvlist_free(nvroot);
			goto nomem;
		}

		for (c = 0; c < children; c++)
			nvlist_free(child[c]);
		free(child);
		children = 0;
		child = NULL;

		/*
		 * Go through and fix up any paths and/or devids based on our
		 * known list of vdev GUID -> path mappings.
		 */
		if (fix_paths(nvroot, pl->names) != 0) {
			nvlist_free(nvroot);
			goto nomem;
		}

		/*
		 * Add the root vdev to this pool's configuration.
		 */
		if (nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
		    nvroot) != 0) {
			nvlist_free(nvroot);
			goto nomem;
		}
		nvlist_free(nvroot);

		/*
		 * zdb uses this path to report on active pools that were
		 * imported or created using -R.
		 */
		if (active_ok)
			goto add_pool;

		/*
		 * Determine if this pool is currently active, in which case we
		 * can't actually import it.
		 */
		verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
		    &name) == 0);
		verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
		    &guid) == 0);

		if (pool_active(hdl, name, guid, &isactive) != 0)
			goto error;

		if (isactive) {
			nvlist_free(config);
			config = NULL;
			continue;
		}

		if ((nvl = refresh_config(hdl, config)) == NULL) {
			nvlist_free(config);
			config = NULL;
			continue;
		}

		nvlist_free(config);
		config = nvl;

		/*
		 * Go through and update the paths for spares, now that we have
		 * them.
		 */
		verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
		    &nvroot) == 0);
		if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
		    &spares, &nspares) == 0) {
			for (i = 0; i < nspares; i++) {
				if (fix_paths(spares[i], pl->names) != 0)
					goto nomem;
			}
		}

		/*
		 * Update the paths for l2cache devices.
		 */
		if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
		    &l2cache, &nl2cache) == 0) {
			for (i = 0; i < nl2cache; i++) {
				if (fix_paths(l2cache[i], pl->names) != 0)
					goto nomem;
			}
		}

		/*
		 * Restore the original information read from the actual label.
		 */
		(void) nvlist_remove(config, ZPOOL_CONFIG_HOSTID,
		    DATA_TYPE_UINT64);
		(void) nvlist_remove(config, ZPOOL_CONFIG_HOSTNAME,
		    DATA_TYPE_STRING);
		if (hostid != 0) {
			verify(nvlist_add_uint64(config, ZPOOL_CONFIG_HOSTID,
			    hostid) == 0);
			verify(nvlist_add_string(config, ZPOOL_CONFIG_HOSTNAME,
			    hostname) == 0);
		}

add_pool:
		/*
		 * Add this pool to the list of configs.
		 */
		verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
		    &name) == 0);
		if (nvlist_add_nvlist(ret, name, config) != 0)
			goto nomem;

		found_one = B_TRUE;
		nvlist_free(config);
		config = NULL;
	}

	if (!found_one) {
		nvlist_free(ret);
		ret = NULL;
	}

	return (ret);

nomem:
	(void) no_memory(hdl);
error:
	nvlist_free(config);
	nvlist_free(ret);
	for (c = 0; c < children; c++)
		nvlist_free(child[c]);
	free(child);

	return (NULL);
}
示例#12
0
文件: libzfs_import.c 项目: AB17/zfs
/*
 * Go through and fix up any path and/or devid information for the given vdev
 * configuration.
 */
static int
fix_paths(nvlist_t *nv, name_entry_t *names)
{
	nvlist_t **child;
	uint_t c, children;
	uint64_t guid;
	name_entry_t *ne, *best;
	char *path, *devid;

	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
	    &child, &children) == 0) {
		for (c = 0; c < children; c++)
			if (fix_paths(child[c], names) != 0)
				return (-1);
		return (0);
	}

	/*
	 * This is a leaf (file or disk) vdev.  In either case, go through
	 * the name list and see if we find a matching guid.  If so, replace
	 * the path and see if we can calculate a new devid.
	 *
	 * There may be multiple names associated with a particular guid, in
	 * which case we have overlapping partitions or multiple paths to the
	 * same disk.  In this case we prefer to use the path name which
	 * matches the ZPOOL_CONFIG_PATH.  If no matching entry is found we
	 * use the lowest order device which corresponds to the first match
	 * while traversing the ZPOOL_IMPORT_PATH search path.
	 */
	verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0);
	if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) != 0)
		path = NULL;

	best = NULL;
	for (ne = names; ne != NULL; ne = ne->ne_next) {
		if (ne->ne_guid == guid) {

			if (path == NULL) {
				best = ne;
				break;
			}

			if ((strlen(path) == strlen(ne->ne_name)) &&
			    !strncmp(path, ne->ne_name, strlen(path))) {
				best = ne;
				break;
			}

			if (best == NULL || ne->ne_order < best->ne_order)
				best = ne;
		}
	}

	if (best == NULL)
		return (0);

	if (nvlist_add_string(nv, ZPOOL_CONFIG_PATH, best->ne_name) != 0)
		return (-1);

	if ((devid = get_devid(best->ne_name)) == NULL) {
		(void) nvlist_remove_all(nv, ZPOOL_CONFIG_DEVID);
	} else {
		if (nvlist_add_string(nv, ZPOOL_CONFIG_DEVID, devid) != 0)
			return (-1);
		devid_str_free(devid);
	}

	return (0);
}
示例#13
0
文件: libzfs_import.c 项目: AB17/zfs
/*
 * Given a cache file, return the contents as a list of importable pools.
 * poolname or guid (but not both) are provided by the caller when trying
 * to import a specific pool.
 */
nvlist_t *
zpool_find_import_cached(libzfs_handle_t *hdl, const char *cachefile,
    char *poolname, uint64_t guid)
{
	char *buf;
	int fd;
	struct stat64 statbuf;
	nvlist_t *raw, *src, *dst;
	nvlist_t *pools;
	nvpair_t *elem;
	char *name;
	uint64_t this_guid;
	boolean_t active;

	verify(poolname == NULL || guid == 0);

	if ((fd = open(cachefile, O_RDONLY)) < 0) {
		zfs_error_aux(hdl, "%s", strerror(errno));
		(void) zfs_error(hdl, EZFS_BADCACHE,
		    dgettext(TEXT_DOMAIN, "failed to open cache file"));
		return (NULL);
	}

	if (fstat64(fd, &statbuf) != 0) {
		zfs_error_aux(hdl, "%s", strerror(errno));
		(void) close(fd);
		(void) zfs_error(hdl, EZFS_BADCACHE,
		    dgettext(TEXT_DOMAIN, "failed to get size of cache file"));
		return (NULL);
	}

	if ((buf = zfs_alloc(hdl, statbuf.st_size)) == NULL) {
		(void) close(fd);
		return (NULL);
	}

	if (read(fd, buf, statbuf.st_size) != statbuf.st_size) {
		(void) close(fd);
		free(buf);
		(void) zfs_error(hdl, EZFS_BADCACHE,
		    dgettext(TEXT_DOMAIN,
		    "failed to read cache file contents"));
		return (NULL);
	}

	(void) close(fd);

	if (nvlist_unpack(buf, statbuf.st_size, &raw, 0) != 0) {
		free(buf);
		(void) zfs_error(hdl, EZFS_BADCACHE,
		    dgettext(TEXT_DOMAIN,
		    "invalid or corrupt cache file contents"));
		return (NULL);
	}

	free(buf);

	/*
	 * Go through and get the current state of the pools and refresh their
	 * state.
	 */
	if (nvlist_alloc(&pools, 0, 0) != 0) {
		(void) no_memory(hdl);
		nvlist_free(raw);
		return (NULL);
	}

	elem = NULL;
	while ((elem = nvlist_next_nvpair(raw, elem)) != NULL) {
		verify(nvpair_value_nvlist(elem, &src) == 0);

		verify(nvlist_lookup_string(src, ZPOOL_CONFIG_POOL_NAME,
		    &name) == 0);
		if (poolname != NULL && strcmp(poolname, name) != 0)
			continue;

		verify(nvlist_lookup_uint64(src, ZPOOL_CONFIG_POOL_GUID,
		    &this_guid) == 0);
		if (guid != 0) {
			verify(nvlist_lookup_uint64(src, ZPOOL_CONFIG_POOL_GUID,
			    &this_guid) == 0);
			if (guid != this_guid)
				continue;
		}

		if (pool_active(hdl, name, this_guid, &active) != 0) {
			nvlist_free(raw);
			nvlist_free(pools);
			return (NULL);
		}

		if (active)
			continue;

		if ((dst = refresh_config(hdl, src)) == NULL) {
			nvlist_free(raw);
			nvlist_free(pools);
			return (NULL);
		}

		if (nvlist_add_nvlist(pools, nvpair_name(elem), dst) != 0) {
			(void) no_memory(hdl);
			nvlist_free(dst);
			nvlist_free(raw);
			nvlist_free(pools);
			return (NULL);
		}
		nvlist_free(dst);
	}

	nvlist_free(raw);
	return (pools);
}
示例#14
0
文件: libzfs_import.c 项目: AB17/zfs
/*
 * Given a list of directories to search, find all pools stored on disk.  This
 * includes partial pools which are not available to import.  If no args are
 * given (argc is 0), then the default directory (/dev/dsk) is searched.
 * poolname or guid (but not both) are provided by the caller when trying
 * to import a specific pool.
 */
static nvlist_t *
zpool_find_import_impl(libzfs_handle_t *hdl, importargs_t *iarg)
{
	int i, dirs = iarg->paths;
	DIR *dirp = NULL;
	struct dirent64 *dp;
	char path[MAXPATHLEN];
	char *end, **dir = iarg->path;
	size_t pathleft;
	struct stat64 statbuf;
	nvlist_t *ret = NULL, *config;
	int fd;
	pool_list_t pools = { 0 };
	pool_entry_t *pe, *penext;
	vdev_entry_t *ve, *venext;
	config_entry_t *ce, *cenext;
	name_entry_t *ne, *nenext;

	verify(iarg->poolname == NULL || iarg->guid == 0);

	if (dirs == 0) {
#ifdef HAVE_LIBBLKID
		/* Use libblkid to scan all device for their type */
		if (zpool_find_import_blkid(hdl, &pools) == 0)
			goto skip_scanning;

		(void) zfs_error_fmt(hdl, EZFS_BADCACHE,
		    dgettext(TEXT_DOMAIN, "blkid failure falling back "
		    "to manual probing"));
#endif /* HAVE_LIBBLKID */

		dir = zpool_default_import_path;
		dirs = DEFAULT_IMPORT_PATH_SIZE;
	}

	/*
	 * Go through and read the label configuration information from every
	 * possible device, organizing the information according to pool GUID
	 * and toplevel GUID.
	 */
	for (i = 0; i < dirs; i++) {
		char *rdsk;
		int dfd;

		/* use realpath to normalize the path */
		if (realpath(dir[i], path) == 0) {

			/* it is safe to skip missing search paths */
			if (errno == ENOENT)
				continue;

			zfs_error_aux(hdl, strerror(errno));
			(void) zfs_error_fmt(hdl, EZFS_BADPATH,
			    dgettext(TEXT_DOMAIN, "cannot open '%s'"), dir[i]);
			goto error;
		}
		end = &path[strlen(path)];
		*end++ = '/';
		*end = 0;
		pathleft = &path[sizeof (path)] - end;

		/*
		 * Using raw devices instead of block devices when we're
		 * reading the labels skips a bunch of slow operations during
		 * close(2) processing, so we replace /dev/dsk with /dev/rdsk.
		 */
		if (strcmp(path, "/dev/dsk/") == 0)
			rdsk = "/dev/rdsk/";
		else
			rdsk = path;

		if ((dfd = open64(rdsk, O_RDONLY)) < 0 ||
		    (dirp = fdopendir(dfd)) == NULL) {
			zfs_error_aux(hdl, strerror(errno));
			(void) zfs_error_fmt(hdl, EZFS_BADPATH,
			    dgettext(TEXT_DOMAIN, "cannot open '%s'"),
			    rdsk);
			goto error;
		}

		/*
		 * This is not MT-safe, but we have no MT consumers of libzfs
		 */
		while ((dp = readdir64(dirp)) != NULL) {
			const char *name = dp->d_name;
			if (name[0] == '.' &&
			    (name[1] == 0 || (name[1] == '.' && name[2] == 0)))
				continue;

			/*
			 * Skip checking devices with well known prefixes:
			 * watchdog - A special close is required to avoid
			 *            triggering it and resetting the system.
			 * fuse     - Fuse control device.
			 * ppp      - Generic PPP driver.
			 * tty*     - Generic serial interface.
			 * vcs*     - Virtual console memory.
			 * parport* - Parallel port interface.
			 * lp*      - Printer interface.
			 * fd*      - Floppy interface.
			 * hpet     - High Precision Event Timer, crashes qemu
			 *            when accessed from a virtual machine.
			 * core     - Symlink to /proc/kcore, causes a crash
			 *            when access from Xen dom0.
			 */
			if ((strncmp(name, "watchdog", 8) == 0) ||
			    (strncmp(name, "fuse", 4) == 0)     ||
			    (strncmp(name, "ppp", 3) == 0)      ||
			    (strncmp(name, "tty", 3) == 0)      ||
			    (strncmp(name, "vcs", 3) == 0)      ||
			    (strncmp(name, "parport", 7) == 0)  ||
			    (strncmp(name, "lp", 2) == 0)       ||
			    (strncmp(name, "fd", 2) == 0)       ||
			    (strncmp(name, "hpet", 4) == 0)     ||
			    (strncmp(name, "core", 4) == 0))
				continue;

			/*
			 * Ignore failed stats.  We only want regular
			 * files and block devices.
			 */
			if ((fstatat64(dfd, name, &statbuf, 0) != 0) ||
			    (!S_ISREG(statbuf.st_mode) &&
			    !S_ISBLK(statbuf.st_mode)))
				continue;

			if ((fd = openat64(dfd, name, O_RDONLY)) < 0)
				continue;

			if ((zpool_read_label(fd, &config)) != 0) {
				(void) close(fd);
				(void) no_memory(hdl);
				goto error;
			}

			(void) close(fd);

			if (config != NULL) {
				boolean_t matched = B_TRUE;
				char *pname;

				if ((iarg->poolname != NULL) &&
				    (nvlist_lookup_string(config,
				    ZPOOL_CONFIG_POOL_NAME, &pname) == 0)) {

					if (strcmp(iarg->poolname, pname))
					       matched = B_FALSE;

				} else if (iarg->guid != 0) {
					uint64_t this_guid;

					matched = nvlist_lookup_uint64(config,
					    ZPOOL_CONFIG_POOL_GUID,
					    &this_guid) == 0 &&
					    iarg->guid == this_guid;
				}
				if (!matched) {
					nvlist_free(config);
					config = NULL;
					continue;
				}
				/* use the non-raw path for the config */
				(void) strlcpy(end, name, pathleft);
				if (add_config(hdl, &pools, path, i+1, config))
					goto error;
			}
		}

		(void) closedir(dirp);
		dirp = NULL;
	}

#ifdef HAVE_LIBBLKID
skip_scanning:
#endif
	ret = get_configs(hdl, &pools, iarg->can_be_active);

error:
	for (pe = pools.pools; pe != NULL; pe = penext) {
		penext = pe->pe_next;
		for (ve = pe->pe_vdevs; ve != NULL; ve = venext) {
			venext = ve->ve_next;
			for (ce = ve->ve_configs; ce != NULL; ce = cenext) {
				cenext = ce->ce_next;
				if (ce->ce_config)
					nvlist_free(ce->ce_config);
				free(ce);
			}
			free(ve);
		}
		free(pe);
	}

	for (ne = pools.names; ne != NULL; ne = nenext) {
		nenext = ne->ne_next;
		if (ne->ne_name)
			free(ne->ne_name);
		free(ne);
	}

	if (dirp)
		(void) closedir(dirp);

	return (ret);
}
示例#15
0
    // static
    QuerySolution* QueryPlannerAnalysis::analyzeDataAccess(const CanonicalQuery& query,
                                                           const QueryPlannerParams& params,
                                                           QuerySolutionNode* solnRoot) {
        auto_ptr<QuerySolution> soln(new QuerySolution());
        soln->filterData = query.getQueryObj();
        verify(soln->filterData.isOwned());
        soln->indexFilterApplied = params.indexFiltersApplied;

        solnRoot->computeProperties();

        // solnRoot finds all our results.  Let's see what transformations we must perform to the
        // data.

        // If we're answering a query on a sharded system, we need to drop documents that aren't
        // logically part of our shard.
        if (params.options & QueryPlannerParams::INCLUDE_SHARD_FILTER) {
            // TODO: We could use params.shardKey to do fetch analysis instead of always fetching.
            if (!solnRoot->fetched()) {
                FetchNode* fetch = new FetchNode();
                fetch->children.push_back(solnRoot);
                solnRoot = fetch;
            }
            ShardingFilterNode* sfn = new ShardingFilterNode();
            sfn->children.push_back(solnRoot);
            solnRoot = sfn;
        }

        bool hasSortStage = false;
        solnRoot = analyzeSort(query, params, solnRoot, &hasSortStage);

        // This can happen if we need to create a blocking sort stage and we're not allowed to.
        if (NULL == solnRoot) { return NULL; }

        // A solution can be blocking if it has a blocking sort stage or
        // a hashed AND stage.
        bool hasAndHashStage = hasNode(solnRoot, STAGE_AND_HASH);
        soln->hasBlockingStage = hasSortStage || hasAndHashStage;

        // If we can (and should), add the keep mutations stage.

        // We cannot keep mutated documents if:
        //
        // 1. The query requires an index to evaluate the predicate ($text).  We can't tell whether
        // or not the doc actually satisfies the $text predicate since we can't evaluate a
        // text MatchExpression.
        //
        // 2. The query implies a sort ($geoNear).  It would be rather expensive and hacky to merge
        // the document at the right place.
        //
        // 3. There is an index-provided sort.  Ditto above comment about merging.
        //
        // TODO: do we want some kind of pre-planning step where we look for certain nodes and cache
        // them?  We do lookups in the tree a few times.  This may not matter as most trees are
        // shallow in terms of query nodes.
        bool cannotKeepFlagged = hasNode(solnRoot, STAGE_TEXT)
                              || hasNode(solnRoot, STAGE_GEO_NEAR_2D)
                              || hasNode(solnRoot, STAGE_GEO_NEAR_2DSPHERE)
                              || (!query.getParsed().getSort().isEmpty() && !hasSortStage);

        // Only these stages can produce flagged results.  A stage has to hold state past one call
        // to work(...) in order to possibly flag a result.
        bool couldProduceFlagged = hasAndHashStage
                                || hasNode(solnRoot, STAGE_AND_SORTED)
                                || hasNode(solnRoot, STAGE_FETCH);

        bool shouldAddMutation = !cannotKeepFlagged && couldProduceFlagged;

        if (shouldAddMutation && (params.options & QueryPlannerParams::KEEP_MUTATIONS)) {
            KeepMutationsNode* keep = new KeepMutationsNode();

            // We must run the entire expression tree to make sure the document is still valid.
            keep->filter.reset(query.root()->shallowClone());

            if (STAGE_SORT == solnRoot->getType()) {
                // We want to insert the invalidated results before the sort stage, if there is one.
                verify(1 == solnRoot->children.size());
                keep->children.push_back(solnRoot->children[0]);
                solnRoot->children[0] = keep;
            }
            else {
                keep->children.push_back(solnRoot);
                solnRoot = keep;
            }
        }

        // Project the results.
        if (NULL != query.getProj()) {
            QLOG() << "PROJECTION: fetched status: " << solnRoot->fetched() << endl;
            QLOG() << "PROJECTION: Current plan is:\n" << solnRoot->toString() << endl;

            ProjectionNode::ProjectionType projType = ProjectionNode::DEFAULT;
            BSONObj coveredKeyObj;

            if (query.getProj()->requiresDocument()) {
                QLOG() << "PROJECTION: claims to require doc adding fetch.\n";
                // If the projection requires the entire document, somebody must fetch.
                if (!solnRoot->fetched()) {
                    FetchNode* fetch = new FetchNode();
                    fetch->children.push_back(solnRoot);
                    solnRoot = fetch;
                }
            }
            else if (!query.getProj()->wantIndexKey()) {
                // The only way we're here is if it's a simple projection.  That is, we can pick out
                // the fields we want to include and they're not dotted.  So we want to execute the
                // projection in the fast-path simple fashion.  Just don't know which fast path yet.
                QLOG() << "PROJECTION: requires fields\n";
                const vector<string>& fields = query.getProj()->getRequiredFields();
                bool covered = true;
                for (size_t i = 0; i < fields.size(); ++i) {
                    if (!solnRoot->hasField(fields[i])) {
                        QLOG() << "PROJECTION: not covered due to field "
                             << fields[i] << endl;
                        covered = false;
                        break;
                    }
                }

                QLOG() << "PROJECTION: is covered?: = " << covered << endl;

                // If any field is missing from the list of fields the projection wants,
                // a fetch is required.
                if (!covered) {
                    FetchNode* fetch = new FetchNode();
                    fetch->children.push_back(solnRoot);
                    solnRoot = fetch;

                    // It's simple but we'll have the full document and we should just iterate
                    // over that.
                    projType = ProjectionNode::SIMPLE_DOC;
                    QLOG() << "PROJECTION: not covered, fetching.";
                }
                else {
                    if (solnRoot->fetched()) {
                        // Fetched implies hasObj() so let's run with that.
                        projType = ProjectionNode::SIMPLE_DOC;
                        QLOG() << "PROJECTION: covered via FETCH, using SIMPLE_DOC fast path";
                    }
                    else {
                        // If we're here we're not fetched so we're covered.  Let's see if we can
                        // get out of using the default projType.  If there's only one leaf
                        // underneath and it's giving us index data we can use the faster covered
                        // impl.
                        vector<QuerySolutionNode*> leafNodes;
                        getLeafNodes(solnRoot, &leafNodes);

                        if (1 == leafNodes.size()) {
                            // Both the IXSCAN and DISTINCT stages provide covered key data.
                            if (STAGE_IXSCAN == leafNodes[0]->getType()) {
                                projType = ProjectionNode::COVERED_ONE_INDEX;
                                IndexScanNode* ixn = static_cast<IndexScanNode*>(leafNodes[0]);
                                coveredKeyObj = ixn->indexKeyPattern;
                                QLOG() << "PROJECTION: covered via IXSCAN, using COVERED fast path";
                            }
                            else if (STAGE_DISTINCT == leafNodes[0]->getType()) {
                                projType = ProjectionNode::COVERED_ONE_INDEX;
                                DistinctNode* dn = static_cast<DistinctNode*>(leafNodes[0]);
                                coveredKeyObj = dn->indexKeyPattern;
                                QLOG() << "PROJECTION: covered via DISTINCT, using COVERED fast path";
                            }
                        }
                    }
                }
            }

            // We now know we have whatever data is required for the projection.
            ProjectionNode* projNode = new ProjectionNode();
            projNode->children.push_back(solnRoot);
            projNode->fullExpression = query.root();
            projNode->projection = query.getParsed().getProj();
            projNode->projType = projType;
            projNode->coveredKeyObj = coveredKeyObj;
            solnRoot = projNode;
        }
        else {
            // If there's no projection, we must fetch, as the user wants the entire doc.
            if (!solnRoot->fetched()) {
                FetchNode* fetch = new FetchNode();
                fetch->children.push_back(solnRoot);
                solnRoot = fetch;
            }
        }

        if (0 != query.getParsed().getSkip()) {
            SkipNode* skip = new SkipNode();
            skip->skip = query.getParsed().getSkip();
            skip->children.push_back(solnRoot);
            solnRoot = skip;
        }

        // When there is both a blocking sort and a limit, the limit will
        // be enforced by the blocking sort.
        // Otherwise, we need to limit the results in the case of a hard limit
        // (ie. limit in raw query is negative)
        if (0 != query.getParsed().getNumToReturn() &&
            !hasSortStage &&
            !query.getParsed().wantMore()) {

            LimitNode* limit = new LimitNode();
            limit->limit = query.getParsed().getNumToReturn();
            limit->children.push_back(solnRoot);
            solnRoot = limit;
        }

        soln->root.reset(solnRoot);
        return soln.release();
    }
示例#16
0
/*
 * Given a ZFS handle and a property, construct a complete list of datasets
 * that need to be modified as part of this process.  For anything but the
 * 'mountpoint' and 'sharenfs' properties, this just returns an empty list.
 * Otherwise, we iterate over all children and look for any datasets that
 * inherit the property.  For each such dataset, we add it to the list and
 * mark whether it was shared beforehand.
 */
prop_changelist_t *
changelist_gather(zfs_handle_t *zhp, zfs_prop_t prop, int gather_flags,
    int mnt_flags)
{
	prop_changelist_t *clp;
	prop_changenode_t *cn;
	zfs_handle_t *temp;
	char property[ZFS_MAXPROPLEN];
	uu_compare_fn_t *compare = NULL;
	boolean_t legacy = B_FALSE;

	if ((clp = zfs_alloc(zhp->zfs_hdl, sizeof (prop_changelist_t))) == NULL)
		return (NULL);

	/*
	 * For mountpoint-related tasks, we want to sort everything by
	 * mountpoint, so that we mount and unmount them in the appropriate
	 * order, regardless of their position in the hierarchy.
	 */
	if (prop == ZFS_PROP_NAME || prop == ZFS_PROP_ZONED ||
	    prop == ZFS_PROP_MOUNTPOINT || prop == ZFS_PROP_SHARENFS ||
	    prop == ZFS_PROP_SHARESMB) {

		if (zfs_prop_get(zhp, ZFS_PROP_MOUNTPOINT,
		    property, sizeof (property),
		    NULL, NULL, 0, B_FALSE) == 0 &&
		    (strcmp(property, "legacy") == 0 ||
		    strcmp(property, "none") == 0)) {

			legacy = B_TRUE;
		}
		if (!legacy) {
			compare = compare_mountpoints;
			clp->cl_sorted = B_TRUE;
		}
	}

	clp->cl_pool = uu_list_pool_create("changelist_pool",
	    sizeof (prop_changenode_t),
	    offsetof(prop_changenode_t, cn_listnode),
	    compare, 0);
	if (clp->cl_pool == NULL) {
		assert(uu_error() == UU_ERROR_NO_MEMORY);
		(void) zfs_error(zhp->zfs_hdl, EZFS_NOMEM, "internal error");
		changelist_free(clp);
		return (NULL);
	}

	clp->cl_list = uu_list_create(clp->cl_pool, NULL,
	    clp->cl_sorted ? UU_LIST_SORTED : 0);
	clp->cl_gflags = gather_flags;
	clp->cl_mflags = mnt_flags;

	if (clp->cl_list == NULL) {
		assert(uu_error() == UU_ERROR_NO_MEMORY);
		(void) zfs_error(zhp->zfs_hdl, EZFS_NOMEM, "internal error");
		changelist_free(clp);
		return (NULL);
	}

	/*
	 * If this is a rename or the 'zoned' property, we pretend we're
	 * changing the mountpoint and flag it so we can catch all children in
	 * change_one().
	 *
	 * Flag cl_alldependents to catch all children plus the dependents
	 * (clones) that are not in the hierarchy.
	 */
	if (prop == ZFS_PROP_NAME) {
		clp->cl_prop = ZFS_PROP_MOUNTPOINT;
		clp->cl_alldependents = B_TRUE;
	} else if (prop == ZFS_PROP_ZONED) {
		clp->cl_prop = ZFS_PROP_MOUNTPOINT;
		clp->cl_allchildren = B_TRUE;
	} else if (prop == ZFS_PROP_CANMOUNT) {
		clp->cl_prop = ZFS_PROP_MOUNTPOINT;
	} else if (prop == ZFS_PROP_VOLSIZE) {
		clp->cl_prop = ZFS_PROP_MOUNTPOINT;
	} else {
		clp->cl_prop = prop;
	}
	clp->cl_realprop = prop;

	if (clp->cl_prop != ZFS_PROP_MOUNTPOINT &&
	    clp->cl_prop != ZFS_PROP_SHARENFS &&
	    clp->cl_prop != ZFS_PROP_SHARESMB)
		return (clp);

	/*
	 * If watching SHARENFS or SHARESMB then
	 * also watch its companion property.
	 */
	if (clp->cl_prop == ZFS_PROP_SHARENFS)
		clp->cl_shareprop = ZFS_PROP_SHARESMB;
	else if (clp->cl_prop == ZFS_PROP_SHARESMB)
		clp->cl_shareprop = ZFS_PROP_SHARENFS;

	if (clp->cl_alldependents) {
		if (zfs_iter_dependents(zhp, B_TRUE, change_one, clp) != 0) {
			changelist_free(clp);
			return (NULL);
		}
	} else if (zfs_iter_children(zhp, change_one, clp) != 0) {
		changelist_free(clp);
		return (NULL);
	}

	/*
	 * We have to re-open ourselves because we auto-close all the handles
	 * and can't tell the difference.
	 */
	if ((temp = zfs_open(zhp->zfs_hdl, zfs_get_name(zhp),
	    ZFS_TYPE_DATASET)) == NULL) {
		changelist_free(clp);
		return (NULL);
	}

	/*
	 * Always add ourself to the list.  We add ourselves to the end so that
	 * we're the last to be unmounted.
	 */
	if ((cn = zfs_alloc(zhp->zfs_hdl,
	    sizeof (prop_changenode_t))) == NULL) {
		zfs_close(temp);
		changelist_free(clp);
		return (NULL);
	}

	cn->cn_handle = temp;
	cn->cn_mounted = (clp->cl_gflags & CL_GATHER_MOUNT_ALWAYS) ||
	    zfs_is_mounted(temp, NULL);
	cn->cn_shared = zfs_is_shared(temp);
	cn->cn_zoned = zfs_prop_get_int(zhp, ZFS_PROP_ZONED);
	cn->cn_needpost = B_TRUE;

	uu_list_node_init(cn, &cn->cn_listnode, clp->cl_pool);
	if (clp->cl_sorted) {
		uu_list_index_t idx;
		(void) uu_list_find(clp->cl_list, cn, NULL, &idx);
		uu_list_insert(clp->cl_list, cn, idx);
	} else {
		/*
		 * Add the target dataset to the end of the list.
		 * The list is not really unsorted. The list will be
		 * in reverse dataset name order. This is necessary
		 * when the original mountpoint is legacy or none.
		 */
		verify(uu_list_insert_after(clp->cl_list,
		    uu_list_last(clp->cl_list), cn) == 0);
	}

	/*
	 * If the mountpoint property was previously 'legacy', or 'none',
	 * record it as the behavior of changelist_postfix() will be different.
	 */
	if ((clp->cl_prop == ZFS_PROP_MOUNTPOINT) && legacy) {
		/*
		 * do not automatically mount ex-legacy datasets if
		 * we specifically set canmount to noauto
		 */
		if (zfs_prop_get_int(zhp, ZFS_PROP_CANMOUNT) !=
		    ZFS_CANMOUNT_NOAUTO)
			clp->cl_waslegacy = B_TRUE;
	}

	return (clp);
}
示例#17
0
    PlanStage::StageState MergeSortStage::work(WorkingSetID* out) {
        ++_commonStats.works;

        if (isEOF()) { return PlanStage::IS_EOF; }

        if (!_noResultToMerge.empty()) {
            // We have some child that we don't have a result from.  Each child must have a result
            // in order to pick the minimum result among all our children.  Work a child.
            PlanStage* child = _noResultToMerge.front();
            WorkingSetID id = WorkingSet::INVALID_ID;
            StageState code = child->work(&id);

            if (PlanStage::ADVANCED == code) {
                // If we're deduping...
                if (_dedup) {
                    WorkingSetMember* member = _ws->get(id);

                    if (!member->hasLoc()) {
                        // Can't dedup data unless there's a DiskLoc.  We go ahead and use its
                        // result.
                        _noResultToMerge.pop();
                    }
                    else {
                        ++_specificStats.dupsTested;
                        // ...and there's a diskloc and and we've seen the DiskLoc before
                        if (_seen.end() != _seen.find(member->loc)) {
                            // ...drop it.
                            _ws->free(id);
                            ++_commonStats.needTime;
                            ++_specificStats.dupsDropped;
                            return PlanStage::NEED_TIME;
                        }
                        else {
                            // Otherwise, note that we've seen it.
                            _seen.insert(member->loc);
                            // We're going to use the result from the child, so we remove it from
                            // the queue of children without a result.
                            _noResultToMerge.pop();
                        }
                    }
                }
                else {
                    // Not deduping.  We use any result we get from the child.  Remove the child
                    // from the queue of things without a result.
                    _noResultToMerge.pop();
                }

                // Store the result in our list.
                StageWithValue value;
                value.id = id;
                value.stage = child;
                _mergingData.push_front(value);

                // Insert the result (indirectly) into our priority queue.
                _merging.push(_mergingData.begin());

                ++_commonStats.needTime;
                return PlanStage::NEED_TIME;
            }
            else if (PlanStage::IS_EOF == code) {
                // There are no more results possible from this child.  Don't bother with it
                // anymore.
                _noResultToMerge.pop();
                ++_commonStats.needTime;
                return PlanStage::NEED_TIME;
            }
            else if (PlanStage::FAILURE == code) {
                *out = id;
                return code;
            }
            else {
                if (PlanStage::NEED_FETCH == code) {
                    *out = id;
                    ++_commonStats.needFetch;
                }
                else if (PlanStage::NEED_TIME == code) {
                    ++_commonStats.needTime;
                }
                return code;
            }
        }

        // If we're here, for each non-EOF child, we have a valid WSID.
        verify(!_merging.empty());

        // Get the 'min' WSID.  _merging is a priority queue so its top is the smallest.
        MergingRef top = _merging.top();
        _merging.pop();

        // Since we're returning the WSID that came from top->stage, we need to work(...) it again
        // to get a new result.
        _noResultToMerge.push(top->stage);

        // Save the ID that we're returning and remove the returned result from our data.
        WorkingSetID idToTest = top->id;
        _mergingData.erase(top);

        // Return the min.
        *out = idToTest;
        ++_commonStats.advanced;

        // But don't return it if it's flagged.
        if (_ws->isFlagged(*out)) {
            _ws->free(*out);
            return PlanStage::NEED_TIME;
        }

        return PlanStage::ADVANCED;
    }
示例#18
0
int main(int argd, char*  args[]){
    init();
    connected_comp();
    verify();
    return 0;
}
示例#19
0
    intrusive_ptr<Pipeline> Pipeline::parseCommand(
        string &errmsg, BSONObj &cmdObj,
        const intrusive_ptr<ExpressionContext> &pCtx) {
        intrusive_ptr<Pipeline> pPipeline(new Pipeline(pCtx));
        vector<BSONElement> pipeline;

        /* gather the specification for the aggregation */
        for(BSONObj::iterator cmdIterator = cmdObj.begin();
                cmdIterator.more(); ) {
            BSONElement cmdElement(cmdIterator.next());
            const char *pFieldName = cmdElement.fieldName();

            // ignore top-level fields prefixed with $. They are for the command processor, not us.
            if (pFieldName[0] == '$') {
                continue;
            }

            // ignore cursor options since they are handled externally.
            if (str::equals(pFieldName, "cursor")) {
                continue;
            }

            /* look for the aggregation command */
            if (!strcmp(pFieldName, commandName)) {
                pPipeline->collectionName = cmdElement.String();
                continue;
            }

            /* check for the collection name */
            if (!strcmp(pFieldName, pipelineName)) {
                pipeline = cmdElement.Array();
                continue;
            }

            /* check for explain option */
            if (!strcmp(pFieldName, explainName)) {
                pPipeline->explain = cmdElement.Bool();
                continue;
            }

            /* if the request came from the router, we're in a shard */
            if (!strcmp(pFieldName, fromRouterName)) {
                pCtx->setInShard(cmdElement.Bool());
                continue;
            }

            /* check for debug options */
            if (!strcmp(pFieldName, splitMongodPipelineName)) {
                pPipeline->splitMongodPipeline = true;
                continue;
            }

            /* we didn't recognize a field in the command */
            ostringstream sb;
            sb <<
               "unrecognized field \"" <<
               cmdElement.fieldName();
            errmsg = sb.str();
            return intrusive_ptr<Pipeline>();
        }

        /*
          If we get here, we've harvested the fields we expect for a pipeline.

          Set up the specified document source pipeline.
        */
        SourceContainer& sources = pPipeline->sources; // shorthand

        /* iterate over the steps in the pipeline */
        const size_t nSteps = pipeline.size();
        for(size_t iStep = 0; iStep < nSteps; ++iStep) {
            /* pull out the pipeline element as an object */
            BSONElement pipeElement(pipeline[iStep]);
            uassert(15942, str::stream() << "pipeline element " <<
                    iStep << " is not an object",
                    pipeElement.type() == Object);
            BSONObj bsonObj(pipeElement.Obj());

            // Parse a pipeline stage from 'bsonObj'.
            uassert(16435, "A pipeline stage specification object must contain exactly one field.",
                    bsonObj.nFields() == 1);
            BSONElement stageSpec = bsonObj.firstElement();
            const char* stageName = stageSpec.fieldName();

            // Create a DocumentSource pipeline stage from 'stageSpec'.
            StageDesc key;
            key.pName = stageName;
            const StageDesc* pDesc = (const StageDesc*)
                    bsearch(&key, stageDesc, nStageDesc, sizeof(StageDesc),
                            stageDescCmp);

            uassert(16436,
                    str::stream() << "Unrecognized pipeline stage name: '" << stageName << "'",
                    pDesc);
            intrusive_ptr<DocumentSource> stage = (*pDesc->pFactory)(&stageSpec, pCtx);
            verify(stage);
            stage->setPipelineStep(iStep);
            sources.push_back(stage);
        }

        /* if there aren't any pipeline stages, there's nothing more to do */
        if (sources.empty())
            return pPipeline;

        /*
          Move filters up where possible.

          CW TODO -- move filter past projections where possible, and noting
          corresponding field renaming.
        */

        /*
          Wherever there is a match immediately following a sort, swap them.
          This means we sort fewer items.  Neither changes the documents in
          the stream, so this transformation shouldn't affect the result.

          We do this first, because then when we coalesce operators below,
          any adjacent matches will be combined.
         */
        for (size_t srcn = sources.size(), srci = 1; srci < srcn; ++srci) {
            intrusive_ptr<DocumentSource> &pSource = sources[srci];
            if (dynamic_cast<DocumentSourceMatch *>(pSource.get())) {
                intrusive_ptr<DocumentSource> &pPrevious = sources[srci - 1];
                if (dynamic_cast<DocumentSourceSort *>(pPrevious.get())) {
                    /* swap this item with the previous */
                    intrusive_ptr<DocumentSource> pTemp(pPrevious);
                    pPrevious = pSource;
                    pSource = pTemp;
                }
            }
        }

        /* Move limits in front of skips. This is more optimal for sharding
         * since currently, we can only split the pipeline at a single source
         * and it is better to limit the results coming from each shard
         */
        for(int i = sources.size() - 1; i >= 1 /* not looking at 0 */; i--) {
            DocumentSourceLimit* limit =
                dynamic_cast<DocumentSourceLimit*>(sources[i].get());
            DocumentSourceSkip* skip =
                dynamic_cast<DocumentSourceSkip*>(sources[i-1].get());
            if (limit && skip) {
                // Increase limit by skip since the skipped docs now pass through the $limit
                limit->setLimit(limit->getLimit() + skip->getSkip());
                swap(sources[i], sources[i-1]);

                // Start at back again. This is needed to handle cases with more than 1 $limit
                // (S means skip, L means limit)
                //
                // These two would work without second pass (assuming back to front ordering)
                // SL   -> LS
                // SSL  -> LSS
                //
                // The following cases need a second pass to handle the second limit
                // SLL  -> LLS
                // SSLL -> LLSS
                // SLSL -> LLSS
                i = sources.size(); // decremented before next pass
            }
        }

        /*
          Coalesce adjacent filters where possible.  Two adjacent filters
          are equivalent to one filter whose predicate is the conjunction of
          the two original filters' predicates.  For now, capture this by
          giving any DocumentSource the option to absorb it's successor; this
          will also allow adjacent projections to coalesce when possible.

          Run through the DocumentSources, and give each one the opportunity
          to coalesce with its successor.  If successful, remove the
          successor.

          Move all document sources to a temporary list.
        */
        SourceContainer tempSources;
        sources.swap(tempSources);

        /* move the first one to the final list */
        sources.push_back(tempSources[0]);

        /* run through the sources, coalescing them or keeping them */
        for (size_t tempn = tempSources.size(), tempi = 1; tempi < tempn; ++tempi) {
            /*
              If we can't coalesce the source with the last, then move it
              to the final list, and make it the new last.  (If we succeeded,
              then we're still on the same last, and there's no need to move
              or do anything with the source -- the destruction of tempSources
              will take care of the rest.)
            */
            intrusive_ptr<DocumentSource> &pLastSource = sources.back();
            intrusive_ptr<DocumentSource> &pTemp = tempSources[tempi];
            verify(pTemp && pLastSource);
            if (!pLastSource->coalesce(pTemp))
                sources.push_back(pTemp);
        }

        /* optimize the elements in the pipeline */
        for(SourceContainer::iterator iter(sources.begin()),
                                      listEnd(sources.end());
                                    iter != listEnd;
                                    ++iter) {
            if (!*iter) {
                errmsg = "Pipeline received empty document as argument";
                return intrusive_ptr<Pipeline>();
            }

            (*iter)->optimize();
        }

        return pPipeline;
    }
示例#20
0
 void Shard::setAddress( const ConnectionString& cs) {
     verify( _name.size() );
     _addr = cs.toString();
     _cs = cs;
     staticShardInfo.set( _name , *this , true , false );
 }
示例#21
0
文件: demo.c 项目: 627656505/sort
void run_tests(void) {
  int i;
  int64_t arr[SIZE];
  int64_t dst[SIZE];
  double start_time;
  double end_time;
  double total_time;
  printf("Running tests\n");
  srand48(SEED);
  total_time = 0.0;

  for (i = 0; i < RUNS; i++) {
    fill(arr, SIZE);
    memcpy(dst, arr, sizeof(int64_t) * SIZE);
    start_time = utime();
    qsort(dst, SIZE, sizeof(int64_t), simple_cmp);
    end_time = utime();
    total_time += end_time - start_time;
    verify(dst, SIZE);
  }

  printf("stdlib qsort time:          %10.2f us per iteration\n", total_time / RUNS);
#ifndef __linux__
  srand48(SEED);
  total_time = 0.0;

  for (i = 0; i < RUNS; i++) {
    fill(arr, SIZE);
    memcpy(dst, arr, sizeof(int64_t) * SIZE);
    start_time = utime();
    heapsort(dst, SIZE, sizeof(int64_t), simple_cmp);
    end_time = utime();
    total_time += end_time - start_time;
    verify(dst, SIZE);
  }

  printf("stdlib heapsort time:       %10.2f us per iteration\n", total_time / RUNS);
  srand48(SEED);
  total_time = 0.0;

  for (i = 0; i < RUNS; i++) {
    fill(arr, SIZE);
    memcpy(dst, arr, sizeof(int64_t) * SIZE);
    start_time = utime();
    mergesort(dst, SIZE, sizeof(int64_t), simple_cmp);
    end_time = utime();
    total_time += end_time - start_time;
    verify(dst, SIZE);
  }

  printf("stdlib mergesort time:      %10.2f us per iteration\n", total_time / RUNS);
#endif
  srand48(SEED);
  total_time = 0.0;

  for (i = 0; i < RUNS; i++) {
    fill(arr, SIZE);
    memcpy(dst, arr, sizeof(int64_t) * SIZE);
    start_time = utime();
    sorter_quick_sort(dst, SIZE);
    end_time = utime();
    total_time += end_time - start_time;
    verify(dst, SIZE);
  }

  printf("quick sort time:            %10.2f us per iteration\n", total_time / RUNS);
  srand48(SEED);
  total_time = 0.0;

  for (i = 0; i < RUNS; i++) {
    fill(arr, SIZE);
    memcpy(dst, arr, sizeof(int64_t) * SIZE);
    start_time = utime();
    sorter_selection_sort(dst, SIZE);
    end_time = utime();
    total_time += end_time - start_time;
    verify(dst, SIZE);
  }

  printf("selection sort time:        %10.2f us per iteration\n", total_time / RUNS);
  srand48(SEED);
  total_time = 0.0;

  for (i = 0; i < RUNS; i++) {
    fill(arr, SIZE);
    memcpy(dst, arr, sizeof(int64_t) * SIZE);
    start_time = utime();
    sorter_merge_sort(dst, SIZE);
    end_time = utime();
    total_time += end_time - start_time;
    verify(dst, SIZE);
  }

  printf("merge sort time:            %10.2f us per iteration\n", total_time / RUNS);
  srand48(SEED);
  total_time = 0.0;

  for (i = 0; i < RUNS; i++) {
    fill(arr, SIZE);
    memcpy(dst, arr, sizeof(int64_t) * SIZE);
    start_time = utime();
    sorter_binary_insertion_sort(dst, SIZE);
    end_time = utime();
    total_time += end_time - start_time;
    verify(dst, SIZE);
  }

  printf("binary insertion sort time: %10.2f us per iteration\n", total_time / RUNS);
  srand48(SEED);
  total_time = 0.0;

  for (i = 0; i < RUNS; i++) {
    fill(arr, SIZE);
    memcpy(dst, arr, sizeof(int64_t) * SIZE);
    start_time = utime();
    sorter_heap_sort(dst, SIZE);
    end_time = utime();
    total_time += end_time - start_time;
    verify(dst, SIZE);
  }

  printf("heap sort time:             %10.2f us per iteration\n", total_time / RUNS);
  srand48(SEED);
  total_time = 0.0;

  for (i = 0; i < RUNS; i++) {
    fill(arr, SIZE);
    memcpy(dst, arr, sizeof(int64_t) * SIZE);
    start_time = utime();
    sorter_shell_sort(dst, SIZE);
    end_time = utime();
    total_time += end_time - start_time;
    verify(dst, SIZE);
  }

  printf("shell sort time:            %10.2f us per iteration\n", total_time / RUNS);
  srand48(SEED);
  total_time = 0.0;

  for (i = 0; i < RUNS; i++) {
    fill(arr, SIZE);
    memcpy(dst, arr, sizeof(int64_t) * SIZE);
    start_time = utime();
    sorter_tim_sort(dst, SIZE);
    end_time = utime();
    total_time += end_time - start_time;
    verify(dst, SIZE);
  }

  printf("tim sort time:              %10.2f us per iteration\n", total_time / RUNS);
  srand48(SEED);
  total_time = 0.0;

  for (i = 0; i < RUNS; i++) {
    fill(arr, SIZE);
    memcpy(dst, arr, sizeof(int64_t) * SIZE);
    start_time = utime();
    sorter_merge_sort_in_place(dst, SIZE);
    end_time = utime();
    total_time += end_time - start_time;
    verify(dst, SIZE);
  }

  printf("in-place merge sort time:   %10.2f us per iteration\n", total_time / RUNS);
  srand48(SEED);
  total_time = 0.0;

  for (i = 0; i < RUNS; i++) {
    fill(arr, SIZE);
    memcpy(dst, arr, sizeof(int64_t) * SIZE);
    start_time = utime();
    sorter_grail_sort(dst, SIZE);
    end_time = utime();
    total_time += end_time - start_time;
    verify(dst, SIZE);
  }

  printf("grail sort time:            %10.2f us per iteration\n", total_time / RUNS);
  srand48(SEED);
  total_time = 0.0;

  for (i = 0; i < RUNS; i++) {
    fill(arr, SIZE);
    memcpy(dst, arr, sizeof(int64_t) * SIZE);
    start_time = utime();
    sorter_sqrt_sort(dst, SIZE);
    end_time = utime();
    total_time += end_time - start_time;
    verify(dst, SIZE);
  }

  printf("sqrt sort time:             %10.2f us per iteration\n", total_time / RUNS);
}
示例#22
0
int
utimecmp (char const *dst_name,
          struct stat const *dst_stat,
          struct stat const *src_stat,
          int options)
{
    /* Things to watch out for:

       The code uses a static hash table internally and is not safe in the
       presence of signals, multiple threads, etc.

       int and long int might be 32 bits.  Many of the calculations store
       numbers up to 2 billion, and multiply by 10; they have to avoid
       multiplying 2 billion by 10, as this exceeds 32-bit capabilities.

       time_t might be unsigned.  */

    verify (TYPE_IS_INTEGER (time_t));
    verify (TYPE_TWOS_COMPLEMENT (int));

    /* Destination and source time stamps.  */
    time_t dst_s = dst_stat->st_mtime;
    time_t src_s = src_stat->st_mtime;
    int dst_ns = get_stat_mtime_ns (dst_stat);
    int src_ns = get_stat_mtime_ns (src_stat);

    if (options & UTIMECMP_TRUNCATE_SOURCE)
    {
        /* Look up the time stamp resolution for the destination device.  */

        /* Hash table for devices.  */
        static Hash_table *ht;

        /* Information about the destination file system.  */
        static struct fs_res *new_dst_res;
        struct fs_res *dst_res;

        /* Time stamp resolution in nanoseconds.  */
        int res;

        /* Quick exit, if possible.  Since the worst resolution is 2
           seconds, anything that differs by more than that does not
           needs source truncation.  */
        if (dst_s == src_s && dst_ns == src_ns)
            return 0;
        if (dst_s <= src_s - 2)
            return -1;
        if (src_s <= dst_s - 2)
            return 1;

        if (! ht)
            ht = hash_initialize (16, NULL, dev_info_hash, dev_info_compare, free);
        if (! new_dst_res)
        {
            new_dst_res = xmalloc (sizeof *new_dst_res);
            new_dst_res->resolution = 2 * BILLION;
            new_dst_res->exact = false;
        }
        new_dst_res->dev = dst_stat->st_dev;
        dst_res = hash_insert (ht, new_dst_res);
        if (! dst_res)
            xalloc_die ();

        if (dst_res == new_dst_res)
        {
            /* NEW_DST_RES is now in use in the hash table, so allocate a
               new entry next time.  */
            new_dst_res = NULL;
        }

        res = dst_res->resolution;

#ifdef _PC_TIMESTAMP_RESOLUTION
        /* If the system will tell us the resolution, we're set!  */
        if (! dst_res->exact)
        {
            res = pathconf (dst_name, _PC_TIMESTAMP_RESOLUTION);
            if (0 < res)
            {
                dst_res->resolution = res;
                dst_res->exact = true;
            }
        }
#endif

        if (! dst_res->exact)
        {
            /* This file system's resolution is not known exactly.
               Deduce it, and store the result in the hash table.  */

            time_t dst_a_s = dst_stat->st_atime;
            time_t dst_c_s = dst_stat->st_ctime;
            time_t dst_m_s = dst_s;
            int dst_a_ns = get_stat_atime_ns (dst_stat);
            int dst_c_ns = get_stat_ctime_ns (dst_stat);
            int dst_m_ns = dst_ns;

            /* Set RES to an upper bound on the file system resolution
               (after truncation due to SYSCALL_RESOLUTION) by inspecting
               the atime, ctime and mtime of the existing destination.
               We don't know of any file system that stores atime or
               ctime with a higher precision than mtime, so it's valid to
               look at them too.  */
            {
                bool odd_second = (dst_a_s | dst_c_s | dst_m_s) & 1;

                if (SYSCALL_RESOLUTION == BILLION)
                {
                    if (odd_second | dst_a_ns | dst_c_ns | dst_m_ns)
                        res = BILLION;
                }
                else
                {
                    int a = dst_a_ns;
                    int c = dst_c_ns;
                    int m = dst_m_ns;

                    /* Write it this way to avoid mistaken GCC warning
                       about integer overflow in constant expression.  */
                    int SR10 = SYSCALL_RESOLUTION;
                    SR10 *= 10;

                    if ((a % SR10 | c % SR10 | m % SR10) != 0)
                        res = SYSCALL_RESOLUTION;
                    else
                        for (res = SR10, a /= SR10, c /= SR10, m /= SR10;
                                (res < dst_res->resolution
                                 && (a % 10 | c % 10 | m % 10) == 0);
                                res *= 10, a /= 10, c /= 10, m /= 10)
                            if (res == BILLION)
                            {
                                if (! odd_second)
                                    res *= 2;
                                break;
                            }
                }

                dst_res->resolution = res;
            }

            if (SYSCALL_RESOLUTION < res)
            {
                struct timespec timespec[2];
                struct stat dst_status;

                /* Ignore source time stamp information that must necessarily
                   be lost when filtered through utimens.  */
                src_ns -= src_ns % SYSCALL_RESOLUTION;

                /* If the time stamps disagree widely enough, there's no need
                   to interrogate the file system to deduce the exact time
                   stamp resolution; return the answer directly.  */
                {
                    time_t s = src_s & ~ (res == 2 * BILLION);
                    if (src_s < dst_s || (src_s == dst_s && src_ns <= dst_ns))
                        return 1;
                    if (dst_s < s
                            || (dst_s == s && dst_ns < src_ns - src_ns % res))
                        return -1;
                }

                /* Determine the actual time stamp resolution for the
                   destination file system (after truncation due to
                   SYSCALL_RESOLUTION) by setting the access time stamp of the
                   destination to the existing access time, except with
                   trailing nonzero digits.  */

                timespec[0].tv_sec = dst_a_s;
                timespec[0].tv_nsec = dst_a_ns;
                timespec[1].tv_sec = dst_m_s | (res == 2 * BILLION);
                timespec[1].tv_nsec = dst_m_ns + res / 9;

                /* Set the modification time.  But don't try to set the
                   modification time of symbolic links; on many hosts this sets
                   the time of the pointed-to file.  */
                if ((S_ISLNK (dst_stat->st_mode)
                        ? lutimens (dst_name, timespec)
                        : utimens (dst_name, timespec)) != 0)
                    return -2;

                /* Read the modification time that was set.  */
                {
                    int stat_result = (S_ISLNK (dst_stat->st_mode)
                                       ? lstat (dst_name, &dst_status)
                                       : stat (dst_name, &dst_status));

                    if (stat_result
                            | (dst_status.st_mtime ^ dst_m_s)
                            | (get_stat_mtime_ns (&dst_status) ^ dst_m_ns))
                    {
                        /* The modification time changed, or we can't tell whether
                           it changed.  Change it back as best we can.  */
                        timespec[1].tv_sec = dst_m_s;
                        timespec[1].tv_nsec = dst_m_ns;
                        if (S_ISLNK (dst_stat->st_mode))
                            lutimens (dst_name, timespec);
                        else
                            utimens (dst_name, timespec);
                    }

                    if (stat_result != 0)
                        return -2;
                }

                /* Determine the exact resolution from the modification time
                   that was read back.  */
                {
                    int old_res = res;
                    int a = (BILLION * (dst_status.st_mtime & 1)
                             + get_stat_mtime_ns (&dst_status));

                    res = SYSCALL_RESOLUTION;

                    for (a /= res; a % 10 != 0; a /= 10)
                    {
                        if (res == BILLION)
                        {
                            res *= 2;
                            break;
                        }
                        res *= 10;
                        if (res == old_res)
                            break;
                    }
                }
            }

            dst_res->resolution = res;
            dst_res->exact = true;
        }

        /* Truncate the source's time stamp according to the resolution.  */
        src_s &= ~ (res == 2 * BILLION);
        src_ns -= src_ns % res;
    }

    /* Compare the time stamps and return -1, 0, 1 accordingly.  */
    return (dst_s < src_s ? -1
            : dst_s > src_s ? 1
            : dst_ns < src_ns ? -1
            : dst_ns > src_ns);
}
示例#23
0
void ngen_CC_Param(shil_opcode* op,shil_param* par,CanonicalParamType tp)
{
    switch(tp)
    {
    //push the contents
    case CPT_u32:
    case CPT_f32:
        if (par->is_reg())
        {
            if (reg.IsAllocg(*par))
                x86e->Emit(op_push32,reg.mapg(*par));
            else if (reg.IsAllocf(*par))
            {
                x86e->Emit(op_sub32,ESP,4);
                x86e->Emit(op_movss,x86_mrm(ESP), reg.mapf(*par));
            }
            else
            {
                die("Must not happen !\n");
                x86e->Emit(op_push32,x86_ptr(par->reg_ptr()));
            }
        }
        else if (par->is_imm())
            x86e->Emit(op_push,par->_imm);
        else
            die("invalid combination");
        ngen_CC_BytesPushed+=4;
        break;
    //push the ptr itself
    case CPT_ptr:
        verify(par->is_reg());

        die("FAIL");
        x86e->Emit(op_push,(unat)par->reg_ptr());

        for (u32 ri=0; ri<(*par).count(); ri++)
        {
            if (reg.IsAllocf(*par,ri))
            {
                x86e->Emit(op_sub32,ESP,4);
                x86e->Emit(op_movss,x86_mrm(ESP),reg.mapfv(*par,ri));
            }
            else
            {
                verify(!reg.IsAllocAny((Sh4RegType)(par->_reg+ri)));
            }
        }


        ngen_CC_BytesPushed+=4;
        break;

    //store from EAX
    case CPT_u64rvL:
    case CPT_u32rv:
        if (reg.IsAllocg(*par))
            x86e->Emit(op_mov32,reg.mapg(*par),EAX);
        /*else if (reg.IsAllocf(*par))
        	x86e->Emit(op_movd_xmm_from_r32,reg.mapf(*par),EAX);*/
        else
            die("Must not happen!\n");
        break;

    case CPT_u64rvH:
        if (reg.IsAllocg(*par))
            x86e->Emit(op_mov32,reg.mapg(*par),EDX);
        else
            die("Must not happen!\n");
        break;

    //Store from ST(0)
    case CPT_f32rv:
        verify(reg.IsAllocf(*par));
        x86e->Emit(op_fstp32f,x86_ptr(par->reg_ptr()));
        x86e->Emit(op_movss,reg.mapf(*par),x86_ptr(par->reg_ptr()));
        break;

    }
}
示例#24
0
intrusive_ptr<DocumentSource> DocumentSourceSort::getShardSource() {
    verify(!_mergingPresorted);
    return this;
}
示例#25
0
    static FEMeshType create( const size_t proc_count ,
                              const size_t proc_local ,
                              const size_t gang_count ,
                              const size_t elems_x ,
                              const size_t elems_y ,
                              const size_t elems_z ,
                              const double x_coord_curve = 1 ,
                              const double y_coord_curve = 1 ,
                              const double z_coord_curve = 1 )
    {
        const size_t vertices_x = elems_x + 1 ;
        const size_t vertices_y = elems_y + 1 ;
        const size_t vertices_z = elems_z + 1 ;

        const BoxBoundsLinear vertex_box_bounds ;
        const ElementSpec element ;

        // Partition based upon vertices:

        BoxType vertex_box_global ;
        std::vector< BoxType > vertex_box_parts( proc_count );

        vertex_box_global[0][0] = 0 ;
        vertex_box_global[0][1] = vertices_x ;
        vertex_box_global[1][0] = 0 ;
        vertex_box_global[1][1] = vertices_y ;
        vertex_box_global[2][0] = 0 ;
        vertex_box_global[2][1] = vertices_z ;

        box_partition_rcb( vertex_box_global , vertex_box_parts );

        const BoxType vertex_box_local_owned = vertex_box_parts[ proc_local ];

        // Determine interior and used vertices:

        BoxType vertex_box_local_interior ;
        BoxType vertex_box_local_used ;

        vertex_box_bounds.apply( vertex_box_global ,
                                 vertex_box_local_owned ,
                                 vertex_box_local_interior ,
                                 vertex_box_local_used );

        // Element counts:

        const long local_elems_x =
            ( vertex_box_local_used[0][1] - vertex_box_local_used[0][0] ) - 1 ;
        const long local_elems_y =
            ( vertex_box_local_used[1][1] - vertex_box_local_used[1][0] ) - 1 ;
        const long local_elems_z =
            ( vertex_box_local_used[2][1] - vertex_box_local_used[2][0] ) - 1 ;

        const size_t elem_count_total = std::max( long(0) , local_elems_x ) *
                                        std::max( long(0) , local_elems_y ) *
                                        std::max( long(0) , local_elems_z );

        const long interior_elems_x =
            ( vertex_box_local_owned[0][1] - vertex_box_local_owned[0][0] ) - 1 ;
        const long interior_elems_y =
            ( vertex_box_local_owned[1][1] - vertex_box_local_owned[1][0] ) - 1 ;
        const long interior_elems_z =
            ( vertex_box_local_owned[2][1] - vertex_box_local_owned[2][0] ) - 1 ;

        const size_t elem_count_interior = std::max( long(0) , interior_elems_x ) *
                                           std::max( long(0) , interior_elems_y ) *
                                           std::max( long(0) , interior_elems_z );

        // Expand vertex boxes to node boxes:

        BoxType node_box_global ;
        BoxType node_box_local_used ;
        std::vector< BoxType > node_box_parts ;

        element.create_node_boxes_from_vertex_boxes(
            vertex_box_global , vertex_box_parts ,
            node_box_global , node_box_parts );

        // Node communication maps:

        size_t node_count_interior = 0 ;
        size_t node_count_owned    = 0 ;
        size_t node_count_total    = 0 ;
        std::vector<size_t>                 node_used_id_map ;
        std::vector<size_t>                 node_part_counts ;
        std::vector< std::vector<size_t> >  node_send_map ;

        box_partition_maps( node_box_global ,
                            node_box_parts ,
                            element.box_bounds ,
                            proc_local ,
                            node_box_local_used ,
                            node_used_id_map ,
                            node_count_interior ,
                            node_count_owned ,
                            node_count_total ,
                            node_part_counts ,
                            node_send_map );

        size_t node_count_send = 0 ;
        for ( size_t i = 0 ; i < node_send_map.size() ; ++i ) {
            node_count_send += node_send_map[i].size();
        }

        size_t recv_msg_count = 0 ;
        size_t send_msg_count = 0 ;
        size_t send_count = 0 ;

        for ( size_t i = 1 ; i < proc_count ; ++i ) {
            if ( node_part_counts[i] ) ++recv_msg_count ;
            if ( node_send_map[i].size() ) {
                ++send_msg_count ;
                send_count += node_send_map[i].size();
            }
        }

        // Finite element mesh:

        FEMeshType mesh ;

        if ( node_count_total ) {
            mesh.node_coords = node_coords_type( "node_coords", node_count_total );
        }

        if ( elem_count_total ) {
            mesh.elem_node_ids =
                elem_node_ids_type( "elem_node_ids", elem_count_total );
        }

        mesh.parallel_data_map.assign( node_count_interior ,
                                       node_count_owned ,
                                       node_count_total ,
                                       recv_msg_count ,
                                       send_msg_count ,
                                       send_count );

        typename node_coords_type::HostMirror node_coords =
            Kokkos::create_mirror( mesh.node_coords );

        typename elem_node_ids_type::HostMirror elem_node_ids =
            Kokkos::create_mirror( mesh.elem_node_ids );

        //------------------------------------
        // set node coordinates to grid location for subsequent verification

        for ( size_t iz = node_box_local_used[2][0] ;
                iz < node_box_local_used[2][1] ; ++iz ) {

            for ( size_t iy = node_box_local_used[1][0] ;
                    iy < node_box_local_used[1][1] ; ++iy ) {

                for ( size_t ix = node_box_local_used[0][0] ;
                        ix < node_box_local_used[0][1] ; ++ix ) {

                    const size_t node_local_id =
                        box_map_id( node_box_local_used , node_used_id_map , ix , iy , iz );

                    node_coords( node_local_id , 0 ) = ix ;
                    node_coords( node_local_id , 1 ) = iy ;
                    node_coords( node_local_id , 2 ) = iz ;
                }
            }
        }

        //------------------------------------
        // Initialize element-node connectivity:

        if ( 1 < gang_count ) {
            layout_elements_partitioned( vertex_box_local_used ,
                                         vertex_box_local_owned ,
                                         node_box_local_used ,
                                         node_used_id_map ,
                                         element ,
                                         gang_count ,
                                         elem_node_ids );
        }
        else {
            layout_elements_interior_exterior( vertex_box_local_used ,
                                               vertex_box_local_owned ,
                                               node_box_local_used ,
                                               node_used_id_map ,
                                               element ,
                                               elem_count_interior ,
                                               elem_node_ids );
        }

        //------------------------------------
        // Populate node->element connectivity:

        std::vector<size_t> node_elem_work( node_count_total , (size_t) 0 );

        for ( size_t i = 0 ; i < elem_count_total ; ++i ) {
            for ( size_t n = 0 ; n < element_node_count  ; ++n ) {
                ++node_elem_work[ elem_node_ids(i,n) ];
            }
        }

        mesh.node_elem_ids =
            Kokkos::create_staticcrsgraph< node_elem_ids_type >( "node_elem_ids" , node_elem_work );

        typename node_elem_ids_type::HostMirror
        node_elem_ids = Kokkos::create_mirror( mesh.node_elem_ids );

        for ( size_t i = 0 ; i < node_count_total ; ++i ) {
            node_elem_work[i] = node_elem_ids.row_map[i];
        }

        // Looping in element order insures the list of elements
        // is sorted by element index.

        for ( size_t i = 0 ; i < elem_count_total ; ++i ) {
            for ( size_t n = 0 ; n < element_node_count ; ++n ) {
                const unsigned nid = elem_node_ids(i, n);
                const unsigned j = node_elem_work[nid] ;
                ++node_elem_work[nid] ;

                node_elem_ids.entries( j , 0 ) = i ;
                node_elem_ids.entries( j , 1 ) = n ;
            }
        }
        //------------------------------------
        // Verify setup with node coordinates matching grid indices.
        verify( node_coords , elem_node_ids , node_elem_ids );

        //------------------------------------
        // Scale node coordinates to problem extent with
        // nonlinear mapping.
        {
            const double problem_extent[3] =
            {   static_cast<double>( vertex_box_global[0][1] - 1 ) ,
                static_cast<double>( vertex_box_global[1][1] - 1 ) ,
                static_cast<double>( vertex_box_global[2][1] - 1 )
            };

            const double grid_extent[3] =
            {   static_cast<double>( node_box_global[0][1] - 1 ) ,
                static_cast<double>( node_box_global[1][1] - 1 ) ,
                static_cast<double>( node_box_global[2][1] - 1 )
            };

            for ( size_t i = 0 ; i < node_count_total ; ++i ) {
                const double x_unit = node_coords(i,0) / grid_extent[0] ;
                const double y_unit = node_coords(i,1) / grid_extent[1] ;
                const double z_unit = node_coords(i,2) / grid_extent[2] ;

                node_coords(i,0) = coordinate_scalar_type( problem_extent[0] * std::pow( x_unit , x_coord_curve ) );
                node_coords(i,1) = coordinate_scalar_type( problem_extent[1] * std::pow( y_unit , y_coord_curve ) );
                node_coords(i,2) = coordinate_scalar_type( problem_extent[2] * std::pow( z_unit , z_coord_curve ) );
            }
        }

        Kokkos::deep_copy( mesh.node_coords ,   node_coords );
        Kokkos::deep_copy( mesh.elem_node_ids , elem_node_ids );
        Kokkos::deep_copy( mesh.node_elem_ids.entries , node_elem_ids.entries );

        //------------------------------------
        // Communication lists:
        {
            recv_msg_count = 0 ;
            send_msg_count = 0 ;
            send_count = 0 ;

            for ( size_t i = 1 ; i < proc_count ; ++i ) {

                // Order sending starting with the local processor rank
                // to try to smooth out the amount of messages simultaneously
                // send to a particular processor.

                const int proc = ( proc_local + i ) % proc_count ;
                if ( node_part_counts[i] ) {
                    mesh.parallel_data_map.host_recv(recv_msg_count,0) = proc ;
                    mesh.parallel_data_map.host_recv(recv_msg_count,1) = node_part_counts[i] ;
                    ++recv_msg_count ;
                }
                if ( node_send_map[i].size() ) {
                    mesh.parallel_data_map.host_send(send_msg_count,0) = proc ;
                    mesh.parallel_data_map.host_send(send_msg_count,1) = node_send_map[i].size() ;
                    for ( size_t j = 0 ; j < node_send_map[i].size() ; ++j , ++send_count ) {
                        mesh.parallel_data_map.host_send_item(send_count) = node_send_map[i][j] - node_count_interior ;
                    }
                    ++send_msg_count ;
                }
            }
        }

        return mesh ;
    }
示例#26
0
文件: keygen.c 项目: odzhan/shells
int main(int argc, char *argv[])
{
  int i, g=0, s=0, v=0;
  char opt;
  
  printf ("\n\n  [ RSA key generation/signing/verifcation\n\n");
  
  for (i=1; i<argc; i++)
  {
    if (argv[i][0]=='-' || argv[i][0]=='/')
    {
      opt=argv[i][1];
      switch (opt)
      {
        case 'g': // generate RSA key pair
          g=1;
          break;
        case 'm': // sign a message using RSA (just for testing)
          input=getparam (argc, argv, &i);
          s=1;
          break;
        case 'k': // key length (max is 1024-bits)
          keylen=atoi(getparam(argc, argv, &i));
          break;
        case 'v': // verify RSA signature (just for testing)
          signature=getparam (argc, argv, &i);
          v=1;
          break;
        default:
          usage();
          break;
      }
    }
  }
  // generate keys?
  if (g)
  {
    printf ("  [ generating RSA key pair of %i-bits\n", keylen);
    genkeys();
  } else 
  // generate signature of message using RSA private key?
  if (s==1 && v==0) {
    // have input?
    if (input==NULL)
    {
      printf ("  [ signing requires a message, use -m option\n");
      return 0;
    }
    printf ("  [ signing message using RSA\n");
    sign ();
  } else 
  // verify signature using RSA public key?
  if (v) {
    // have input + signature?
    if (input==NULL || signature==NULL)
    {
      printf ("  [ verification requires message and signature\n");
      return 0;
    }
    printf ("  [ verifying message and signature using RSA\n");
    verify ();
  } else {
    usage();
  }
  return 0;
}
示例#27
0
 /** notification on unmapping so we can clear writable bits */
 void MemoryMappedFile::clearWritableBits(void *p) {
     for( unsigned i = ((size_t)p)/ChunkSize; i <= (((size_t)p)+len)/ChunkSize; i++ ) {
         writable.clear(i);
         verify( !writable.get(i) );
     }
 }
示例#28
0
        void operator()( DBClientCursorBatchIterator &i ) {
            Lock::GlobalWrite lk;
            DurTransaction txn;
            context.relocked();

            bool createdCollection = false;
            Collection* collection = NULL;

            while( i.moreInCurrentBatch() ) {
                if ( numSeen % 128 == 127 /*yield some*/ ) {
                    collection = NULL;
                    time_t now = time(0);
                    if( now - lastLog >= 60 ) {
                        // report progress
                        if( lastLog )
                            log() << "clone " << to_collection << ' ' << numSeen << endl;
                        lastLog = now;
                    }
                    mayInterrupt( _mayBeInterrupted );
                    dbtempreleaseif t( _mayYield );
                }

                if ( isindex == false && collection == NULL ) {
                    collection = context.db()->getCollection( to_collection );
                    if ( !collection ) {
                        massert( 17321,
                                 str::stream()
                                 << "collection dropped during clone ["
                                 << to_collection << "]",
                                 !createdCollection );
                        createdCollection = true;
                        collection = context.db()->createCollection( &txn, to_collection );
                        verify( collection );
                    }
                }

                BSONObj tmp = i.nextSafe();

                /* assure object is valid.  note this will slow us down a little. */
                const Status status = validateBSON(tmp.objdata(), tmp.objsize());
                if (!status.isOK()) {
                    out() << "Cloner: skipping corrupt object from " << from_collection
                          << ": " << status.reason();
                    continue;
                }

                ++numSeen;

                BSONObj js = tmp;
                if ( isindex ) {
                    verify(nsToCollectionSubstring(from_collection) == "system.indexes");
                    js = fixindex(context.db()->name(), tmp);
                    indexesToBuild->push_back( js.getOwned() );
                    continue;
                }

                verify(nsToCollectionSubstring(from_collection) != "system.indexes");

                StatusWith<DiskLoc> loc = collection->insertDocument( &txn, js, true );
                if ( !loc.isOK() ) {
                    error() << "error: exception cloning object in " << from_collection
                            << ' ' << loc.toString() << " obj:" << js;
                }
                uassertStatusOK( loc.getStatus() );
                if ( logForRepl )
                    logOp("i", to_collection, js);

                getDur().commitIfNeeded();

                RARELY if ( time( 0 ) - saveLast > 60 ) {
                    log() << numSeen << " objects cloned so far from collection " << from_collection;
                    saveLast = time( 0 );
                }
            }
        }
示例#29
0
 Client::Context::~Context() {
     DEV verify( _client == currentClient.get() );
     _client->_curOp->recordGlobalTime( _timer.micros() );
     _client->_curOp->leave( this );
     _client->_context = _oldContext; // note: _oldContext may be null
 }
示例#30
0
文件: zfs_iter.c 项目: FirmOS/fre
/*
 * Sort datasets by specified columns.
 *
 * o  Numeric types sort in ascending order.
 * o  String types sort in alphabetical order.
 * o  Types inappropriate for a row sort that row to the literal
 *    bottom, regardless of the specified ordering.
 *
 * If no sort columns are specified, or two datasets compare equally
 * across all specified columns, they are sorted alphabetically by name
 * with snapshots grouped under their parents.
 */
static int
zfs_sort(const void *larg, const void *rarg, void *data)
{
	zfs_handle_t *l = ((zfs_node_t *)larg)->zn_handle;
	zfs_handle_t *r = ((zfs_node_t *)rarg)->zn_handle;
	zfs_sort_column_t *sc = (zfs_sort_column_t *)data;
	zfs_sort_column_t *psc;

	for (psc = sc; psc != NULL; psc = psc->sc_next) {
		char lbuf[ZFS_MAXPROPLEN], rbuf[ZFS_MAXPROPLEN];
		char *lstr, *rstr;
		uint64_t lnum, rnum;
		boolean_t lvalid, rvalid;
		int ret = 0;

		/*
		 * We group the checks below the generic code.  If 'lstr' and
		 * 'rstr' are non-NULL, then we do a string based comparison.
		 * Otherwise, we compare 'lnum' and 'rnum'.
		 */
		lstr = rstr = NULL;
		if (psc->sc_prop == ZPROP_INVAL) {
			nvlist_t *luser, *ruser;
			nvlist_t *lval, *rval;

			luser = zfs_get_user_props(l);
			ruser = zfs_get_user_props(r);

			lvalid = (nvlist_lookup_nvlist(luser,
			    psc->sc_user_prop, &lval) == 0);
			rvalid = (nvlist_lookup_nvlist(ruser,
			    psc->sc_user_prop, &rval) == 0);

			if (lvalid)
				verify(nvlist_lookup_string(lval,
				    ZPROP_VALUE, &lstr) == 0);
			if (rvalid)
				verify(nvlist_lookup_string(rval,
				    ZPROP_VALUE, &rstr) == 0);

		} else if (zfs_prop_is_string(psc->sc_prop)) {
			lvalid = (zfs_prop_get(l, psc->sc_prop, lbuf,
			    sizeof (lbuf), NULL, NULL, 0, B_TRUE) == 0);
			rvalid = (zfs_prop_get(r, psc->sc_prop, rbuf,
			    sizeof (rbuf), NULL, NULL, 0, B_TRUE) == 0);

			lstr = lbuf;
			rstr = rbuf;
		} else {
			lvalid = zfs_prop_valid_for_type(psc->sc_prop,
			    zfs_get_type(l));
			rvalid = zfs_prop_valid_for_type(psc->sc_prop,
			    zfs_get_type(r));

			if (lvalid)
				(void) zfs_prop_get_numeric(l, psc->sc_prop,
				    &lnum, NULL, NULL, 0);
			if (rvalid)
				(void) zfs_prop_get_numeric(r, psc->sc_prop,
				    &rnum, NULL, NULL, 0);
		}

		if (!lvalid && !rvalid)
			continue;
		else if (!lvalid)
			return (1);
		else if (!rvalid)
			return (-1);

		if (lstr)
			ret = strcmp(lstr, rstr);
		else if (lnum < rnum)
			ret = -1;
		else if (lnum > rnum)
			ret = 1;

		if (ret != 0) {
			if (psc->sc_reverse == B_TRUE)
				ret = (ret < 0) ? 1 : -1;
			return (ret);
		}
	}

	return (zfs_compare(larg, rarg, NULL));
}