Beispiel #1
0
// Loads the program header table from an ELF file into a read-only private
// anonymous mmap-ed block.
bool ElfReader_ReadProgramHeader(ElfReader* er) {
  er->phdr_num_ = er->header_.e_phnum;

  // Like the kernel, we only accept program header tables that
  // are smaller than 64KiB.
  if (er->phdr_num_ < 1 || er->phdr_num_ > 65536/sizeof(Elf32_Phdr)) {
    DL_ERR("\"%s\" has invalid e_phnum: %d", er->name_, er->phdr_num_);
    return false;
  }

  Elf32_Addr page_min = PAGE_START(er->header_.e_phoff);
  Elf32_Addr page_max = PAGE_END(er->header_.e_phoff + (er->phdr_num_ * sizeof(Elf32_Phdr)));
  Elf32_Addr page_offset = PAGE_OFFSET(er->header_.e_phoff);

  er->phdr_size_ = page_max - page_min;

  void* mmap_result = mmap(NULL, er->phdr_size_, PROT_READ, MAP_PRIVATE, er->fd_, page_min);
  if (mmap_result == MAP_FAILED) {
    DL_ERR("\"%s\" phdr mmap failed: %s", er->name_, strerror(errno));
    return false;
  }

  er->phdr_mmap_ = mmap_result;
  er->phdr_table_ = (Elf32_Phdr*)((char*)(mmap_result) + page_offset);
  return true;
}
Beispiel #2
0
// Loads the program header table from an ELF file into a read-only private
// anonymous mmap-ed block.
bool ElfReader::ReadProgramHeader() {
  phdr_num_ = header_.e_phnum;

  // Like the kernel, we only accept program header tables that
  // are smaller than 64KiB.
  if (phdr_num_ < 1 || phdr_num_ > 65536/sizeof(Elf32_Phdr)) {
    DL_ERR("\"%s\" has invalid e_phnum: %d", name_, phdr_num_);
    return false;
  }

  Elf32_Addr page_min = PAGE_START(header_.e_phoff);
  Elf32_Addr page_max = PAGE_END(header_.e_phoff + (phdr_num_ * sizeof(Elf32_Phdr)));
  Elf32_Addr page_offset = PAGE_OFFSET(header_.e_phoff);

  phdr_size_ = page_max - page_min;

  void* mmap_result = mmap(NULL, phdr_size_, PROT_READ, MAP_PRIVATE, fd_, page_min);
  if (mmap_result == MAP_FAILED) {
    DL_ERR("\"%s\" phdr mmap failed: %s", name_, strerror(errno));
    return false;
  }

  phdr_mmap_ = mmap_result;
  phdr_table_ = reinterpret_cast<Elf32_Phdr*>(reinterpret_cast<char*>(mmap_result) + page_offset);
  return true;
}
Beispiel #3
0
// Reserve a virtual address range big enough to hold all loadable
// segments of a program header table. This is done by creating a
// private anonymous mmap() with PROT_NONE.
bool ElfReader::ReserveAddressSpace() {
  load_size_ = phdr_table_get_load_size(phdr_table_, phdr_num_);
  if (load_size_ == 0) {
    DL_ERR("\"%s\" has no loadable segments", name_);
    return false;
  }

  int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS;
  void* start = mmap(NULL, load_size_, PROT_NONE, mmap_flags, -1, 0);
  if (start == MAP_FAILED) {
    DL_ERR("couldn't reserve %d bytes of address space for \"%s\"", load_size_, name_);
    return false;
  }

  load_start_ = start;
  load_bias_ = 0;

  for (size_t i = 0; i < phdr_num_; ++i) {
    const Elf32_Phdr* phdr = &phdr_table_[i];
    if (phdr->p_type == PT_LOAD) {
      load_bias_ = reinterpret_cast<Elf32_Addr>(start) - PAGE_START(phdr->p_vaddr);
      break;
    }
  }
  return true;
}
Beispiel #4
0
bool ElfReader::ReadElfHeader() {
  ssize_t rc = TEMP_FAILURE_RETRY(read(fd_, &header_, sizeof(header_)));
  if (rc < 0) {
    DL_ERR("can't read file \"%s\": %s", name_, strerror(errno));
    return false;
  }
  if (rc != sizeof(header_)) {
    DL_ERR("\"%s\" is too small to be an ELF executable", name_);
    return false;
  }
  return true;
}
Beispiel #5
0
bool ElfReader_ReadElfHeader(ElfReader* er) {
  ssize_t rc = TEMP_FAILURE_RETRY(read(er->fd_, &er->header_, sizeof(er->header_)));
  if (rc < 0) {
    DL_ERR("can't read file \"%s\": %s", er->name_, strerror(errno));
    return false;
  }
  if (rc != sizeof(er->header_)) {
    DL_ERR("\"%s\" is too small to be an ELF executable. Expected at least %d bytes, only found %d bytes.",
           er->name_, sizeof(er->header_), rc);
    return false;
  }
  return true;
}
Beispiel #6
0
// Returns the address of the program header table as it appears in the loaded
// segments in memory. This is in contrast with 'phdr_table_' which
// is temporary and will be released before the library is relocated.
bool ElfReader::FindPhdr() {
  const Elf32_Phdr* phdr_limit = phdr_table_ + phdr_num_;

  // If there is a PT_PHDR, use it directly.
  for (const Elf32_Phdr* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
    if (phdr->p_type == PT_PHDR) {
      return CheckPhdr(load_bias_ + phdr->p_vaddr);
    }
  }

  // Otherwise, check the first loadable segment. If its file offset
  // is 0, it starts with the ELF header, and we can trivially find the
  // loaded program header from it.
  for (const Elf32_Phdr* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
    if (phdr->p_type == PT_LOAD) {
      if (phdr->p_offset == 0) {
        Elf32_Addr  elf_addr = load_bias_ + phdr->p_vaddr;
        const Elf32_Ehdr* ehdr = (const Elf32_Ehdr*)(void*)elf_addr;
        Elf32_Addr  offset = ehdr->e_phoff;
        return CheckPhdr((Elf32_Addr)ehdr + offset);
      }
      break;
    }
  }

  DL_ERR("can't find loaded phdr for \"%s\"", name_);
  return false;
}
Beispiel #7
0
void
init_transport(xport_state_t *transport)
{
    int clear = 0;
    xport_state_t 	*current = transport;

    if (!(IS_GENERIC_API(transport->library->un->type))) {
        for (; current != (xport_state_t *)0; current = current->next)
            if (clear_transport(current->library, current))
                clear++;

        if (clear) {
            dev_ent_t *un;
            char *lc_mess;

            SANITY_CHECK(transport != (xport_state_t *)0);
            SANITY_CHECK(transport->library != (library_t *)0);
            SANITY_CHECK(transport->library->un != (dev_ent_t *)0);
            un = transport->library->un;
            lc_mess = un->dis_mes[DIS_MES_CRIT];
            memccpy(lc_mess,
                    catgets(catfd, SET, 9077,
                            "needs operator attention"),
                    '\0', DIS_MES_LEN);
            DevLog(DL_ERR(5152));
            DownDevice(un, SAM_STATE_CHANGE);
            exit(1);	/* this kill all threads */
        }
    }
    cond_signal(&transport->condit); /* signal done */
}
Beispiel #8
0
// Reserve a virtual address range big enough to hold all loadable
// segments of a program header table. This is done by creating a
// private anonymous mmap() with PROT_NONE.
bool ElfReader_ReserveAddressSpace(ElfReader* er) {
  Elf32_Addr min_vaddr;
  er->load_size_ = phdr_table_get_load_size(er->phdr_table_, er->phdr_num_, &min_vaddr, NULL);
  if (er->load_size_ == 0) {
    DL_ERR("\"%s\" has no loadable segments", er->name_);
    return false;
  }

  uint8_t* addr = (uint8_t*)(min_vaddr);
  int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS;
  void* start = mmap(NULL, er->load_size_, PROT_NONE, mmap_flags, -1, 0);
  if (start == MAP_FAILED) {
    DL_ERR("couldn't reserve %d bytes of address space for \"%s\"", er->load_size_, er->name_);
    return false;
  }

  er->load_start_ = start;
  er->load_bias_ = (uint8_t*)(start) - addr;
  return true;
}
Beispiel #9
0
// Ensures that our program header is actually within a loadable
// segment. This should help catch badly-formed ELF files that
// would cause the linker to crash later when trying to access it.
bool ElfReader::CheckPhdr(Elf32_Addr loaded) {
  const Elf32_Phdr* phdr_limit = phdr_table_ + phdr_num_;
  Elf32_Addr loaded_end = loaded + (phdr_num_ * sizeof(Elf32_Phdr));
  for (Elf32_Phdr* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
    if (phdr->p_type != PT_LOAD) {
      continue;
    }
    Elf32_Addr seg_start = phdr->p_vaddr + load_bias_;
    Elf32_Addr seg_end = phdr->p_filesz + seg_start;
    if (seg_start <= loaded && loaded_end <= seg_end) {
      loaded_phdr_ = reinterpret_cast<const Elf32_Phdr*>(loaded);
      return true;
    }
  }
  DL_ERR("\"%s\" loaded phdr %x not in loadable segment", name_, loaded);
  return false;
}
Beispiel #10
0
void
move_drive_error(
    library_t *library,
    uint_t source,
    uint_t dest,
    int *err)
{
    /* Down the drive(s), not the robot */
    int		downed_one = 0;
    drive_state_t	*drive_to_down;
    dev_ent_t	*un = library->un;
    uchar_t		buf[library->ele_dest_len +
                    sizeof (element_status_data_t) + sizeof (element_status_page_t) +
                    50];
    storage_element_t *desc;

    if (drive_to_down = find_element(library->drive, source)) {
        /* The source is a drive */
        down_drive(drive_to_down, SAM_STATE_CHANGE);
        downed_one++;
    } else {
        /*
         * The source is not a drive. Read the storage element of the
         * source. If full then set error to RECOVERED_MEDIA_MOVE. This
         * tells the requester to set the CES_occupied status bit in the
         * catalog entry.
         */
        mutex_unlock(&library->un->io_mutex);
        if (read_element_status(library, STORAGE_ELEMENT, source,
                                1, buf, sizeof (buf)) > 0) {
            desc = (storage_element_t *)(buf +
                                         sizeof (element_status_data_t) +
                                         sizeof (element_status_page_t));
            if (desc->full)
                *err = RECOVERED_MEDIA_MOVE;
        }
        mutex_lock(&library->un->io_mutex);
    }

    if (drive_to_down = find_element(library->drive, dest)) {
        down_drive(drive_to_down, SAM_STATE_CHANGE);
        downed_one++;
    }
    if (downed_one == 0)
        DevLog(DL_ERR(5333), source, dest, 0);
}
Beispiel #11
0
// Ensures that our program header is actually within a loadable
// segment. This should help catch badly-formed ELF files that
// would cause the linker to crash later when trying to access it.
bool ElfReader_CheckPhdr(ElfReader* er, Elf32_Addr loaded) {
  const Elf32_Phdr* phdr_limit = er->phdr_table_ + er->phdr_num_;
  Elf32_Addr loaded_end = loaded + (er->phdr_num_ * sizeof(Elf32_Phdr));
  Elf32_Phdr* phdr;
  for (phdr = er->phdr_table_; phdr < phdr_limit; ++phdr) {
    if (phdr->p_type != PT_LOAD) {
      continue;
    }
    Elf32_Addr seg_start = phdr->p_vaddr + er->load_bias_;
    Elf32_Addr seg_end = phdr->p_filesz + seg_start;
    if (seg_start <= loaded && loaded_end <= seg_end) {
      er->loaded_phdr_ = (const Elf32_Phdr*)(loaded);
      return true;
    }
  }
  DL_ERR("\"%s\" loaded phdr %x not in loadable segment", er->name_, loaded);
  return false;
}
Beispiel #12
0
bool ElfReader::VerifyElfHeader() {
  if (header_.e_ident[EI_MAG0] != ELFMAG0 ||
      header_.e_ident[EI_MAG1] != ELFMAG1 ||
      header_.e_ident[EI_MAG2] != ELFMAG2 ||
      header_.e_ident[EI_MAG3] != ELFMAG3) {
    DL_ERR("\"%s\" has bad ELF magic", name_);
    return false;
  }

  if (header_.e_ident[EI_CLASS] != ELFCLASS32) {
    DL_ERR("\"%s\" not 32-bit: %d", name_, header_.e_ident[EI_CLASS]);
    return false;
  }
  if (header_.e_ident[EI_DATA] != ELFDATA2LSB) {
    DL_ERR("\"%s\" not little-endian: %d", name_, header_.e_ident[EI_DATA]);
    return false;
  }

  if (header_.e_type != ET_DYN) {
    DL_ERR("\"%s\" has unexpected e_type: %d", name_, header_.e_type);
    return false;
  }

  if (header_.e_version != EV_CURRENT) {
    DL_ERR("\"%s\" has unexpected e_version: %d", name_, header_.e_version);
    return false;
  }

  if (header_.e_machine !=
#ifdef ANDROID_ARM_LINKER
      EM_ARM
#elif defined(ANDROID_MIPS_LINKER)
      EM_MIPS
#elif defined(ANDROID_X86_LINKER)
      EM_386
#endif
  ) {
    DL_ERR("\"%s\" has unexpected e_machine: %d", name_, header_.e_machine);
    return false;
  }

  return true;
}
Beispiel #13
0
/*
 *	Main thread.  Sits on the message queue and waits for something to do.
 */
void *
transport_thread(
    void *vxport)
{
    int 		exit_status = 0, err;
    robo_event_t 	*event;
    xport_state_t 	*transport = (xport_state_t *)vxport;
    int 		is_api = IS_GENERIC_API(transport->library->un->type);
    dev_ent_t 	*un = transport->library->un;

    mutex_lock(&transport->mutex);	/* wait for go */
    mutex_unlock(&transport->mutex);

    for (;;) {
        mutex_lock(&transport->list_mutex);
        if (transport->active_count == 0)
            cond_wait(&transport->list_condit,
                      &transport->list_mutex);

        if (transport->active_count == 0) {	/* check to make sure */
            mutex_unlock(&transport->list_mutex);
            continue;
        }
        event = transport->first;
        transport->first = unlink_list(event);
        transport->active_count--;
        mutex_unlock(&transport->list_mutex);
        ETRACE((LOG_NOTICE, "EvTr %#x(%#x) -",
                event, (event->type == EVENT_TYPE_MESS) ?
                event->request.message.command :
                event->request.internal.command));
        err = 0;

        switch (event->type) {
        case EVENT_TYPE_INTERNAL:
            switch (event->request.internal.command) {
            case ROBOT_INTRL_MOVE_MEDIA:
                if (is_api == TRUE) {
                    err = EINVAL;
                    break;
                } else {
                    if (un->state <= DEV_IDLE) {
                        err = move(transport->library,
                                   event);
                    } else {
                        err = EINVAL;
                    }
                }
                break;

            case ROBOT_INTRL_EXCH_MEDIA:
                if (is_api == TRUE) {
                    err = EINVAL;
                    break;
                } else {
                    if (un->state <= DEV_IDLE) {
                        err = exchange(
                                  transport->library, event);
                    } else {
                        err = EINVAL;
                    }
                }
                break;

            case ROBOT_INTRL_INIT:
                init_transport(transport);
                if (is_api == TRUE) {
                    disp_of_event(transport->library,
                                  event, 0);
                }
                break;

            case ROBOT_INTRL_SHUTDOWN:
                transport->thread = (thread_t)- 1;
                thr_exit(&exit_status);
                break;

            case ROBOT_INTRL_LOAD_MEDIA:
                if (is_api == FALSE) {
                    err = EINVAL;
                    break;
                }
                event->next = (robo_event_t *)
                              transport->library;
                err = thr_create(NULL, MD_THR_STK,
                                 api_load_command,
                                 (void *)event, THR_DETACHED, NULL);
                if (err)
                    DevLog(DL_ERR(6038),
                           event->request.internal.command,
                           err);
                break;

            case ROBOT_INTRL_FORCE_MEDIA:
                if (is_api == FALSE) {
                    err = EINVAL;
                    break;
                }
                event->next = (robo_event_t *)
                              transport->library;
                err = thr_create(NULL, MD_THR_STK,
                                 api_force_command,
                                 (void *)event, THR_DETACHED, NULL);
                if (err)
                    DevLog(DL_ERR(6038),
                           event->request.internal.command,
                           err);
                break;

            case ROBOT_INTRL_DISMOUNT_MEDIA:
                if (is_api == FALSE) {
                    err = EINVAL;
                    break;
                }
                event->next = (robo_event_t *)
                              transport->library;
                err = thr_create(NULL, MD_THR_STK,
                                 api_dismount_command,
                                 (void *)event, THR_DETACHED, NULL);
                if (err)
                    DevLog(DL_ERR(6038),
                           event->request.internal.command,
                           err);
                break;

            case ROBOT_INTRL_VIEW_DATABASE:
                if (is_api == FALSE) {
                    err = EINVAL;
                    break;
                }
                event->next = (robo_event_t *)
                              transport->library;
                err = thr_create(NULL, MD_THR_STK,
                                 api_view_command,
                                 (void *)event, THR_DETACHED, NULL);
                if (err)
                    DevLog(DL_ERR(6038),
                           event->request.internal.command,
                           err);
                break;

            case ROBOT_INTRL_DRIVE_ACCESS:
                if (is_api == FALSE) {
                    err = EINVAL;
                    break;
                }
                event->next = (robo_event_t *)
                              transport->library;
                err = thr_create(NULL, MD_THR_STK,
                                 api_drive_access_command,
                                 (void *)event, THR_DETACHED, NULL);
                if (err)
                    DevLog(DL_ERR(6038),
                           event->request.internal.command,
                           err);
                break;

            case ROBOT_INTRL_QUERY_DRIVE:
                if (is_api == FALSE) {
                    err = EINVAL;
                    break;
                }
                event->next = (robo_event_t *)
                              transport->library;
                err = thr_create(NULL, MD_THR_STK,
                                 api_query_drive_command,
                                 (void *)event, THR_DETACHED, NULL);
                if (err)
                    DevLog(DL_ERR(6038),
                           event->request.internal.command,
                           err);
                break;

            case ROBOT_INTRL_GET_SIDE_INFO:
                if (is_api == FALSE) {
                    err = EINVAL;
                    break;
                }
                event->next = (robo_event_t *)
                              transport->library;
                err = thr_create(NULL, MD_THR_STK,
                                 api_getsideinfo_command,
                                 (void *)event, THR_DETACHED, NULL);
                if (err)
                    DevLog(DL_ERR(6038),
                           event->request.internal.command,
                           err);
                break;

            default:
                err = EINVAL;
                break;
            }
            break;

        case EVENT_TYPE_MESS:
            if (event->request.message.magic != MESSAGE_MAGIC) {
                if (DBG_LVL(SAM_DBG_DEBUG))
                    sam_syslog(LOG_DEBUG,
                               "xpt_thr:bad magic: %s:%d.",
                               __FILE__, __LINE__);
                break;
            }
            switch (event->request.message.command) {
            default:
                if (DBG_LVL(SAM_DBG_DEBUG))
                    sam_syslog(LOG_DEBUG,
                               "xpt_thr:msq_bad: %s:%d.",
                               __FILE__, __LINE__);
                err = EINVAL;
                break;
            }

        default:
            if (DBG_LVL(SAM_DBG_DEBUG))
                sam_syslog(LOG_DEBUG,
                           "xpt_thr:event_bad: %s:%d.",
                           __FILE__, __LINE__);
            err = EINVAL;
            break;
        }
        if (is_api == FALSE) {
            disp_of_event(transport->library, event, err);
        } else if (err) {
            /* call disp_of_event only if an error on grau */
            if (err < 0)
                err = errno;
            disp_of_event(transport->library, event, err);
        }
    }
}
Beispiel #14
0
/*
 * clean - clean the drive.
 *
 */
void
clean(
	drive_state_t *drive,
	robo_event_t *event)
{
	dev_ent_t	*un;
	int		err, retry;
	uint32_t	access_count, status = 0;
	char	   *d_mess;
	char	   *l_mess;
	struct CatalogEntry ced;
	struct CatalogEntry *ce = &ced;
	library_t	*library;
	move_flags_t    move_flags;

	SANITY_CHECK(drive != (drive_state_t *)0);
	un = drive->un;
	SANITY_CHECK(un != (dev_ent_t *)0);
	library = (library_t *)drive->library;
	SANITY_CHECK(library != (library_t *)0);
	d_mess = drive->un->dis_mes[DIS_MES_NORM];
	l_mess = library->un->dis_mes[DIS_MES_NORM];
	mutex_lock(&drive->mutex);
	if (clear_drive(drive)) {
		mutex_lock(&drive->un->mutex);
		drive->un->status.bits |= DVST_CLEANING;
		mutex_unlock(&drive->un->mutex);
		drive->status.b.cln_inprog = FALSE;
		mutex_unlock(&drive->mutex);
		disp_of_event(library, event, ENOENT);
		return;
	}
	mutex_lock(&drive->un->mutex);
	drive->un->status.bits |= (DVST_REQUESTED | DVST_CLEANING);
	if (drive->un->open_count) {
		clear_driver_idle(drive, drive->open_fd);
		close_unit(drive->un, &drive->open_fd);
		DEC_OPEN(un);
	}
	mutex_unlock(&drive->un->mutex);
	mutex_unlock(&drive->mutex);

	DevLog(DL_ALL(5075));

	memccpy(d_mess, catgets(catfd, SET, 9025, "needs cleaning"),
	    '\0', DIS_MES_LEN);

	ce = CatalogGetCleaningVolume(library->un->eq, &ced);

	if (ce == NULL) {
		memccpy(l_mess,
		    catgets(catfd, SET, 9026,
	    "no cleaning cartridge available"),
		    '\0', DIS_MES_LEN);
		DevLog(DL_ERR(5141));
		SendCustMsg(HERE, 9347);
		mutex_lock(&drive->mutex);
		drive->status.b.cln_inprog = FALSE;
		down_drive(drive, SAM_STATE_CHANGE);
		mutex_lock(&drive->un->mutex);
		drive->un->status.bits &= ~DVST_REQUESTED;
		mutex_unlock(&drive->un->mutex);
		mutex_unlock(&drive->mutex);
		disp_of_event(library, event, EAGAIN);
		return;
	} else {

		status &= ~CES_occupied;
		(void) CatalogSetFieldByLoc(ce->CeEq, ce->CeSlot, ce->CePart,
		    CEF_Status, status, CES_occupied);


	}


	if (library->un->equ_type == DT_3570C) {
		clean_3570(drive, event, ce);
		return;
	}
	mutex_lock(&drive->mutex);

	if (IS_GENERIC_API(library->un->type)) {
		int		local_retry, d_errno, last_derrno = -1;
		api_errs_t	ret;
		char	   *tag = "load on clean";

		local_retry = 3;
		ret = API_ERR_TR;

		while (local_retry > 0) {
			if (aci_load_media(library, drive, ce, &d_errno) == 0)
				break;
			else {
				/* Error return on api call */
				if (d_errno == 0) {
					/*
					 * if call did not happen - error
					 * return but no error
					 */
					local_retry = -1;
					d_errno = EAMUCOMM;
				} else if ((last_derrno == -1) ||
				    (last_derrno != d_errno)) {
					/* Save error if repeated */
					last_derrno = d_errno;
					if (api_valid_error(library->un->type,
					    d_errno, library->un)) {
				/* Indentation for cstyle */
				if (library->un->slot != ROBOT_NO_SLOT) {
					DevLog(DL_DEBUG(6001),
					    library->un->slot, tag, d_errno,
					    d_errno, api_return_message(
					    library->un->type, d_errno));
				} else {
					DevLog(DL_DEBUG(6043), tag, d_errno,
					    d_errno, api_return_message(
					    library->un->type, d_errno));
				}

				local_retry = api_return_retry(
				    library->un->type, d_errno);
				ret = api_return_degree(
				    library->un->type, d_errno);
					} else {
						local_retry = -2;
					}
				}
				if (local_retry > 0) {
					/* delay before retrying */
					local_retry--;
					if (local_retry > 0)
						sleep(api_return_sleep(
						    library->un->type,
						    d_errno));
				}
			}
		}
		if (d_errno != EOK) {
			DevLog(DL_ERR(6036), ce->CeBarCode);
			memccpy(drive->un->dis_mes[DIS_MES_CRIT],
			    catgets(catfd, SET, 9029,
			"unable to load cleaning cartridge, move failed"),
			    '\0', DIS_MES_LEN);

			if (local_retry == -1) {
				/* The call didn't happen */
				DevLog(DL_ERR(6040), tag);
			} else if (local_retry == 0) {
				/* retries exceeded */
				DevLog(DL_ERR(6039), tag);
			} else {
				if (api_valid_error(drive->library->un->type,
				    d_errno, drive->library->un)) {
					if (drive->library->un->slot !=
					    ROBOT_NO_SLOT) {
						DevLog(DL_ERR(6001),
						    drive->library->un->slot,
						    tag, d_errno, d_errno,
						    api_return_message(
						    library->un->type,
						    d_errno));
					} else {
						DevLog(DL_ERR(6043), tag,
						    d_errno, d_errno,
						    api_return_message(
						    library->un->type,
						    d_errno));
					}
				}
			}

			if (ret == API_ERR_DD)
				down_drive(drive, SAM_STATE_CHANGE);
			else if (ret == API_ERR_DM)
				set_bad_media(un);
			else if (ret == API_ERR_DL)
				down_library(library, SAM_STATE_CHANGE);

			drive->status.b.cln_inprog = FALSE;
			mutex_lock(&drive->un->mutex);
			drive->un->status.bits &= ~(DVST_CLEANING |
			    DVST_REQUESTED);
			mutex_unlock(&drive->un->mutex);
			mutex_unlock(&drive->mutex);
			disp_of_event(library, event, EIO);
			return;
		}
	} else {
		move_flags.bits = 0;
		memccpy(d_mess,
		    catgets(catfd, SET, 9009, "waiting for media changer"),
		    '\0', DIS_MES_LEN);
		/*
		 * SPECTRA LOGIC NOTE:  In 3.3.1, the invert argument on this
		 * move was set to one.  This apparently was done for the
		 * spectra-logic robot which overloaded the invert argument
		 * to be the clean argument (See scsi_command case
		 * SCMD_MOVE_MEDIUM).  Unfortunately, the Qualstar robot,
		 * which is mapped to the spectra-logic, implements the
		 * mailbox control for the same bit that spectra-logic uses
		 * as clean.  Confused? Yep.
		 *
		 * Now to add to the confusion. Somewhere in the catalog
		 * rewrite, for reasons lost in the mists of time, the invert
		 * argument was set to zero.  This is inadverentately half
		 * the fix for snap 4966.  It simplifies things, ignoring any
		 * "special" cleaning logic and treating the cleaning tape as
		 * an ordinary load/unload. It seems to work.
		 *
		 * See #ifdef UNKNOWN_SPECTRA_LOGIC below for the other half of
		 * the fix.
		 */
		if (move_media(library, 0, ELEMENT_ADDRESS(library, ce->CeSlot),
		    drive->element, 0, move_flags)) {
			memccpy(drive->un->dis_mes[DIS_MES_CRIT],
			    catgets(catfd, SET, 9029,
			"unable to load cleaning cartridge, move failed"),
			    '\0', DIS_MES_LEN);

			DevLog(DL_ERR(5143));
			down_drive(drive, SAM_STATE_CHANGE);
			drive->status.b.cln_inprog = FALSE;
			mutex_unlock(&drive->mutex);
			disp_of_event(library, event, EIO);
			return;
		}
	}

	mutex_unlock(&drive->mutex);

	/*
	 * Log successful mount of cleaning tape
	 */
	DevLog(DL_ALL(10042), drive->un->eq);
	tapeclean_media(drive->un);

	/*
	 * move_media does not set up the un, so UpdateCatalog can't be
	 * called from here. Using generic_get_media instead of move_media
	 * leaves the drive hung up.
	 */
	status &= ~CES_occupied;
	(void) CatalogSetFieldByLoc(ce->CeEq, ce->CeSlot, ce->CePart,
	    CEF_Status, status, CES_occupied);
	access_count = ce->CeAccess;
	access_count--;
	(void) CatalogSetFieldByLoc(ce->CeEq, ce->CeSlot, ce->CePart,
	    CEF_Access, access_count, 0);
	(void) CatalogSetFieldByLoc(ce->CeEq, ce->CeSlot, ce->CePart,
	    CEF_MountTime, time(NULL), 0);

	retry = 7;
	err = 0;

	if (IS_GENERIC_API(library->un->equ_type)) {
		char	   *tag = "unload for clean";
		int		local_retry, d_errno, last_derrno;
		api_errs_t	ret;
		do {
			memccpy(d_mess,
			    catgets(catfd, SET, 9030,
			    "waiting for cleaning cycle"),
			    '\0', DIS_MES_LEN);
			sleep(3 * 60);	/* wait 3 minutes */
			tapeclean_media(drive->un);
			mutex_lock(&drive->mutex);
			memccpy(d_mess,
			    catgets(catfd, SET, 9031,
			    "attempt to unload cleaning cartridge"),
			    '\0', DIS_MES_LEN);

			local_retry = 3;
			ret = API_ERR_TR;
			last_derrno = -1;

			while (local_retry > 0) {
				/*
				 * vsn is not set, use aci_force_media()
				 * instead of aci_dismount_media()
				 */
				if (aci_force_media(library, drive,
				    &d_errno) == 0)
					break;
				else {
					/* Error return on api call */
					if (d_errno == 0) {
						/*
						 * if call did not happen -
						 * error return but no error
						 */
						local_retry = -1;
						d_errno = EAMUCOMM;
					} else if ((last_derrno == -1) ||
					    (last_derrno != d_errno)) {
						/* Save error if repeated */
						last_derrno = d_errno;
						if (api_valid_error(
						    drive->library->un->type,
						    d_errno,
						    drive->library->un)) {

						if (drive->library->un->slot !=
						    ROBOT_NO_SLOT) {
						DevLog(DL_DEBUG(6001),
						    drive->library->un->slot,
						    tag, d_errno, d_errno,
						    api_return_message(
						    library->un->type,
						    d_errno));
							} else {
						DevLog(DL_DEBUG(6043),
						    tag, d_errno, d_errno,
						    api_return_message(
						    library->un->type,
						    d_errno));
						}

						local_retry = api_return_retry(
						    library->un->type, d_errno);
						ret = api_return_degree(
						    library->un->type, d_errno);
						} else {
							local_retry = -2;
						}
					}
					if (local_retry > 0) {
						/* delay before retrying */
						local_retry--;
						if (local_retry > 0)
							sleep(api_return_sleep(
							    library->un->type,
							    d_errno));
					}
				}
			}
			if (d_errno != EOK) {
				DevLog(DL_ERR(6033), ce->CeBarCode);

				if (local_retry == -1) {
					/* The call didn't happen */
					DevLog(DL_ERR(6040), tag);
				} else if (local_retry == 0) {
					/* retries exceeded */
					DevLog(DL_ERR(6039), tag);
				} else {
					if (api_valid_error(
					    drive->library->un->type,
					    d_errno, drive->library->un))
						if (drive->library->un->slot !=
						    ROBOT_NO_SLOT) {
						DevLog(DL_ERR(6001),
						    drive->library->un->slot,
						    tag, d_errno, d_errno,
						    api_return_message(
						    library->un->type,
						    d_errno));
						} else {
						DevLog(DL_ERR(6043),
						    d_errno, d_errno,
						    api_return_message(
						    library->un->type,
						    d_errno));
						}
				}
				if (ret == API_ERR_DL)
					down_library(library, SAM_STATE_CHANGE);
				else if (ret == API_ERR_DD)
					down_drive(drive, SAM_STATE_CHANGE);
			}
			mutex_unlock(&drive->mutex);
		} while (err != 0 && retry-- != 0);
	} else {
		/*
		 * SPECTRA LOGIC NOTE:  Due to the removal of "special"
		 * cleaning code, we must unload the cleaning tape for all
		 * robot types.
		 *
		 * See the previous SPECTRA LOGIC NOTE:
		 */
#ifdef UNKNOWN_SPECTRA_LOGIC
		if (library->un->equ_type != DT_SPECLOG)
#endif
			do {
				memccpy(d_mess,
				    catgets(catfd, SET, 9030,
				    "wait for cleaning cycle"),
				    '\0', DIS_MES_LEN);
				sleep(3 * 60);	/* wait 3 minutes */
				tapeclean_media(drive->un);
				mutex_lock(&drive->mutex);
				DevLog(DL_DETAIL(5077));
				memccpy(d_mess,
				    catgets(catfd, SET, 9031,
				    "attempt to unload cleaning cartridge"),
				    '\0', DIS_MES_LEN);
				err = move_media(library, 0, drive->element,
				    ELEMENT_ADDRESS(library, ce->CeSlot),
				    0, move_flags);
				if (err) {
					DevLog(DL_ERR(5078), retry);
				} else {
					DevLog(DL_DETAIL(5079));
				}
				mutex_unlock(&drive->mutex);
			} while (err != 0 && retry-- != 0);
	}

	tapeclean_media(drive->un);

	if (err != 0) {
		DevLog(DL_ERR(5080));
		memccpy(drive->un->dis_mes[DIS_MES_CRIT],
		    catgets(catfd, SET, 9032,
		    "unable to unload cleaning cartridge"),
		    '\0', DIS_MES_LEN);
		mutex_lock(&drive->mutex);
		drive->status.b.cln_inprog = FALSE;
		down_drive(drive, SAM_STATE_CHANGE);
		mutex_unlock(&drive->mutex);
		disp_of_event(library, event, EIO);
		return;
	}
	status = CES_occupied;
	if (drive->un->status.b.bad_media) {
		/* cleaning media marked in catalog as bad */
		status |= CES_bad_media;

		/* reset bad media flag */
		drive->un->status.b.bad_media = 0;
	}
	(void) CatalogSetFieldByLoc(library->un->eq, ce->CeSlot, 0,
	    CEF_Status, status, 0);

	DevLog(DL_ALL(5334), access_count);

	if ((status & CES_bad_media) == 0) {
		memccpy(d_mess,
		    catgets(catfd, SET, 9034, "drive has been cleaned"),
		    '\0', DIS_MES_LEN);
	}
	mutex_lock(&drive->mutex);
	drive->status.b.cln_inprog = FALSE;
	mutex_lock(&drive->un->mutex);
	if ((status & CES_bad_media) == 0) {
		/* drive was cleaned */
		drive->un->status.bits &= ~(DVST_CLEANING | DVST_REQUESTED);
	} else {
		/* drive was not cleaned, it needs to still be cleaned */
		drive->un->status.bits &= ~(DVST_REQUESTED);
	}
	mutex_unlock(&drive->un->mutex);
	mutex_unlock(&drive->mutex);
	if (ce->CeAccess == 0 || (status & CES_bad_media)) {
		char	   *MES_9035 = catgets(catfd, SET, 9035,
		    "cleaning cartridge in slot %d has expired");

		char *mess = (char *)malloc_wait(
		    strlen(MES_9035) + 15, 2, 0);
		sprintf(mess, MES_9035, ce->CeSlot);
		memccpy(l_mess, mess, '\0', DIS_MES_LEN);
		free(mess);
		switch (library->un->type) {
		case DT_METD28:
		case DT_DLT2700:
		case DT_GRAUACI:
			DevLog(DL_ERR(5144), ce->CeSlot);
			break;

		default:
			schedule_export(library, ce->CeSlot);
			DevLog(DL_ERR(5145), ce->CeSlot);
			break;
		}
	} else if (tapeclean_drive(drive->un)) {
		memccpy(l_mess,
		    catgets(catfd, SET, 2983, "clean failed"),
		    '\0', DIS_MES_LEN);
		DevLog(DL_ERR(5364));
		mutex_lock(&drive->mutex);
		down_drive(drive, SAM_STATE_CHANGE);
		mutex_unlock(&drive->mutex);
		disp_of_event(library, event, EIO);
		return;
	}
	disp_of_event(library, event, 0);
}
Beispiel #15
0
/*
 * audit - start auditing
 *
 *
 */
void
audit(
	drive_state_t *drive,	/* drive state pointer */
	const uint_t slot,		/* slot to audit */
	const int audit_eod)
{				/* flag to find eod during audit */
	int		part, err;
	uint_t	   myslot = 0;
	dev_ent_t	*un;
	sam_defaults_t *defaults;
	struct CatalogEntry ced;
	struct CatalogEntry *ce = &ced;
	int		skip_audit_eod = 0;

	defaults = GetDefaults();

	SANITY_CHECK(drive != (drive_state_t *)0);
	SANITY_CHECK(drive->library != (library_t *)0);
	SANITY_CHECK(drive->library->un != (dev_ent_t *)0);
	SANITY_CHECK(drive->library->un != drive->un);
	un = drive->un;

	if ((slot == ROBOT_NO_SLOT) &&
	    IS_GENERIC_API(drive->library->un->type)) {
		DevLog(DL_ERR(6004));
		return;
	}
	mutex_lock(&drive->mutex);

	if (drive->status.b.full) {
		mutex_lock(&un->mutex);
		un->status.b.requested = TRUE;
		mutex_unlock(&un->mutex);
		if (clear_drive(drive)) {
			mutex_lock(&un->mutex);
			un->status.b.requested = TRUE;
			mutex_unlock(&un->mutex);
			mutex_unlock(&drive->mutex);
			return;
		}
		if (drive->open_fd >= 0) {
			mutex_lock(&un->mutex);
			close_unit(un, &drive->open_fd);
			DEC_OPEN(un);
			mutex_unlock(&un->mutex);
		}
	}
	mutex_unlock(&drive->mutex);

	mutex_lock(&un->mutex);
	un->status.b.requested = TRUE;
	un->status.b.labeled = FALSE;
	un->status.b.ready = FALSE;
	mutex_unlock(&un->mutex);

	if (slot == ROBOT_NO_SLOT) {
		mutex_lock(&drive->library->mutex);
		drive->library->countdown--;
		drive->library->drives_auditing++;
		mutex_unlock(&drive->library->mutex);

		/*
		 * ok not to lock here wait for all drives to clear
		 */
		while (drive->library->countdown > 0)
			sleep(4);
	}
	for (;;) {

		mutex_lock(&drive->mutex);
		if (slot == ROBOT_NO_SLOT) {
			/* get the next slot number (s) */
			mutex_lock(&drive->library->mutex);
			myslot = drive->library->audit_index;
			if (myslot <= drive->library->range.storage_count) {
				drive->library->audit_index++;
				mutex_unlock(&drive->library->mutex);
			} else {
				/* No more slots to audit */
				mutex_unlock(&drive->library->mutex);
				mutex_lock(&drive->library->un->mutex);
				drive->library->un->status.b.mounted = TRUE;
				drive->library->un->status.b.audit = FALSE;
				drive->library->un->status.b.ready = TRUE;
				mutex_unlock(&drive->library->un->mutex);
				if (drive->status.b.full) {
					clear_drive(drive);
					if (drive->open_fd >= 0)
						mutex_lock(&un->mutex);
					close_unit(un, &drive->open_fd);
					mutex_unlock(&un->mutex);
				}
				mutex_lock(&un->mutex);
				un->status.b.requested = FALSE;
				mutex_unlock(&un->mutex);
				mutex_unlock(&drive->mutex);
				return;
			}
		} else {
			/* get specific slot */
			myslot = slot;
		}

		/*
		 * Should we audit this media? (is occupied, not cleaning and
		 * is a sam tape)
		 */
		if (drive->library->status.b.two_sided) {
			part = 1;
		} else {
			part = 0;
		}
		ce = CatalogGetCeByLoc(drive->library->un->eq,
			    myslot, part, &ced);
		if (ce == NULL ||
		    (!(ce->CeStatus & CES_occupied)) ||
		    (ce->CeStatus & CES_cleaning) ||
		    (ce->CeStatus & CES_non_sam)) {

			mutex_unlock(&drive->mutex);
			if (slot != ROBOT_NO_SLOT) {	/* only one slot */
				mutex_lock(&un->mutex);
				un->status.b.requested = FALSE;
				mutex_unlock(&un->mutex);
				return;
			}
			continue;
		}
		/*
		 * The following lines of code get a tape mounted, or if
		 * two-sided media, mounts the "A" side.
		 */
		err = get_media(drive->library, drive, NULL, ce);

		if (err) {
			mutex_lock(&un->mutex);
			un->status.b.requested = FALSE;
			DEC_ACTIVE(un);
			mutex_unlock(&un->mutex);
			mutex_unlock(&drive->mutex);
			return;
		}
		mutex_lock(&un->mutex);
		un->status.b.scanning = TRUE;
		mutex_unlock(&un->mutex);
		if (spin_drive(drive, SPINUP, NOEJECT)) {

			mutex_lock(&drive->un->mutex);
			drive->un->status.b.scanning &= ~DVST_SCANNING;
			drive->un->status.bits &= ~DVST_REQUESTED;
			mutex_unlock(&drive->un->mutex);

			if (un->state > DEV_ON) {
				clear_drive(drive);
				mutex_lock(&un->mutex);
				clear_driver_idle(drive, drive->open_fd);
				DEC_ACTIVE(un);
				close_unit(un, &drive->open_fd);
				mutex_unlock(&un->mutex);
				mutex_unlock(&drive->mutex);
			} else {
				mutex_lock(&un->mutex);
				clear_driver_idle(drive, drive->open_fd);
				DEC_ACTIVE(un);
				close_unit(un, &drive->open_fd);
				mutex_unlock(&un->mutex);
				mutex_unlock(&drive->mutex);
			}
			SendCustMsg(HERE, 9348);
			DevLog(DL_ERR(5218));
			return;
		}
		un->status.bits |= DVST_AUDIT;

		un->mid = ce->CeMid;
		un->status.b.labeled = FALSE;
		un->i.ViPart = ce->CePart;
		scan_a_device(un, drive->open_fd);
		if (drive->status.b.bar_code) {
			(void) CatalogSetStringByLoc(drive->library->un->eq,
			ce->CeSlot, ce->CePart,
			CEF_BarCode, (char *)drive->bar_code);
		}
		/*
		 * If the cleaning light came on while scanning, leave the
		 * audit bit set and unload the drive.
		 */
		if (un->status.bits & DVST_CLEANING) {
			mutex_lock(&un->mutex);
			un->mtime = 0;
			DEC_ACTIVE(un);
			close_unit(un, &drive->open_fd);
			un->status.b.requested = FALSE;
			mutex_unlock(&un->mutex);
			clear_drive(drive);
			mutex_unlock(&drive->mutex);
			return;
		} else {
			un->status.bits &= ~DVST_AUDIT;
		}

		mutex_lock(&un->mutex);
		/*
		 * This next check keeps us from auditing media that is not
		 * really labeled (label lie). I'm not sure why the un->mutex
		 * is held for this.
		 */
		if (!un->status.b.labeled &&
		    (ce->CeStatus & CES_bar_code) &&
		    (defaults->flags & DF_LABEL_BARCODE)) {
			int tmp;

			if (IS_TAPE(un)) {
				tmp = LEN_TAPE_VSN;
			} else {
				tmp = LEN_OPTIC_VSN;
			}
			vsn_from_barcode(un->vsn, ce->CeBarCode, defaults, tmp);
			un->status.b.labeled = TRUE;
			un->space = un->capacity;
			skip_audit_eod = 1;
		}
		if (IS_TAPE(un)) {
			if (un->status.b.labeled &&
			    audit_eod && !skip_audit_eod) {
				DevLog(DL_DETAIL(5074), un->vsn);
				mutex_unlock(&un->mutex);
				mutex_lock(&un->io_mutex);
				tape_append(drive->open_fd, un, NULL);
				mutex_unlock(&un->io_mutex);
				mutex_lock(&un->mutex);
			} else {
				if (!un->status.b.labeled) {
					un->space = un->capacity;
				} else {
					un->space = ce->CeSpace;
				}
			}
		}
		UpdateCatalog(un, 0, CatalogVolumeLoaded);

		/*
		 * Now do the "B" side if this is optical media.
		 * flip_and_scan calls CatalogVolumeLoaded so it is not done
		 * here.
		 */
		if (drive->library->status.b.two_sided && (ce->CePart == 1)) {
			mutex_unlock(&un->mutex);
			if (flip_and_scan(ce->CePart, drive)) {
				clear_drive(drive);
				mutex_lock(&un->mutex);
				un->status.b.requested = FALSE;
				clear_driver_idle(drive, drive->open_fd);
				close_unit(un, &drive->open_fd);
				DEC_ACTIVE(un);
				mutex_unlock(&un->mutex);
				mutex_unlock(&drive->mutex);
				return;
			}
			mutex_unlock(&drive->mutex);
		} else {
			mutex_unlock(&un->mutex);
			mutex_unlock(&drive->mutex);
		}

		mutex_lock(&un->mutex);
		close_unit(un, &drive->open_fd);
		DEC_ACTIVE(un);
		un->status.b.requested = TRUE;
		mutex_unlock(&un->mutex);

		if (slot != ROBOT_NO_SLOT) {	/* only one slot */
			mutex_lock(&un->mutex);
			un->status.b.requested = FALSE;
			mutex_unlock(&un->mutex);
			mutex_lock(&drive->library->un->mutex);
			drive->library->un->status.b.mounted = TRUE;
			mutex_unlock(&drive->library->un->mutex);
			return;
		}
	}
}
Beispiel #16
0
/*
 * clean_3570 - attempt to load cleaning tape into 3570.
 */
void
clean_3570(
	drive_state_t *drive,
	robo_event_t *event,
	struct CatalogEntry *ce)
{
	int		retry;
	char	   *dev_name;
	char	   *d_mess = drive->un->dis_mes[DIS_MES_NORM];
	dev_ent_t	*un = drive->un;
	library_t	*library = drive->library;
	move_flags_t    move_flags;

	mutex_lock(&drive->mutex);
	move_flags.bits = 0;
	/*
	 * The 3570 does not return from the move until the cleaning cycle
	 * has completed.
	 */
	memccpy(d_mess, catgets(catfd, SET, 9030, "wait for cleaning cycle"),
	    '\0', DIS_MES_LEN);
	if (generic_get_media(library, drive, event, ce)) {
		memccpy(drive->un->dis_mes[DIS_MES_CRIT],
		    catgets(catfd, SET, 9029,
		    "unable to load cleaning cartridge, move failed"),
		    '\0', DIS_MES_LEN);

		DevLog(DL_ERR(5145), ce->CeSlot);
		down_drive(drive, SAM_STATE_CHANGE);
		drive->status.b.cln_inprog = FALSE;
		mutex_unlock(&drive->mutex);
		disp_of_event(library, event, EIO);
		return;
	}
	mutex_unlock(&drive->mutex);
	sleep(4);
	dev_name = samst_devname(un);
	mutex_lock(&un->mutex);
	drive->open_fd = open_unit(un, dev_name, 10);
	mutex_unlock(&un->mutex);
	free(dev_name);

	un->i.ViEq = un->fseq;
	un->i.ViSlot = un->slot;
	un->i.ViPart = 0;
	un->i.ViFlags = VI_cart;
	UpdateCatalog(drive->un, 0, CatalogVolumeLoaded);

	/* Wait for cleaning to finish */

	retry = 60;
	while (retry--) {
		sam_extended_sense_t *sense = (sam_extended_sense_t *)
		    SHM_REF_ADDR(un->sense);

		mutex_lock(&un->io_mutex);
		memset(sense, 0, sizeof (sam_extended_sense_t));
		if (scsi_cmd(drive->open_fd, un, SCMD_TEST_UNIT_READY, 20) ||
		    sense->es_key != 0) {
			/* If cleaning in progress */
			if (sense->es_key == 0x02 &&
			    sense->es_add_code == 0x30 &&
			    sense->es_qual_code == 0x03) {
				mutex_unlock(&un->io_mutex);
				sleep(30);
				continue;
			}
			if (sense->es_key == 0x06 &&
			    sense->es_add_code == 0x82 &&
			    sense->es_qual_code == 0x83)
				break;

			mutex_unlock(&un->io_mutex);
			sprintf(d_mess, "sense %x, %x, %x", sense->es_key,
			    sense->es_add_code, sense->es_qual_code);
			sleep(10);
		}
	}
	if (retry <= 0)
		DevLog(DL_ERR(5216));

	memccpy(d_mess, catgets(catfd, SET, 9034, "drive has been cleaned"),
	    '\0', DIS_MES_LEN);
	mutex_unlock(&un->io_mutex);
	mutex_lock(&un->mutex);
	close_unit(un, &drive->open_fd);
	mutex_unlock(&un->mutex);
	mutex_lock(&drive->mutex);
	move_flags.bits = 0;
	memccpy(d_mess,
	    catgets(catfd, SET, 9009, "waiting for media changer"),
	    '\0', DIS_MES_LEN);
	if (move_media(library, 0, drive->element, 0xff, 1, move_flags)) {
		memccpy(drive->un->dis_mes[DIS_MES_CRIT],
		    catgets(catfd, SET, 9032,
		    "unable to unload cleaning cartridge"),
		    '\0', DIS_MES_LEN);
		DevLog(DL_ERR(5147));
		drive->status.b.cln_inprog = FALSE;
		down_drive(drive, SAM_STATE_CHANGE);
		mutex_unlock(&drive->mutex);
		disp_of_event(library, event, EIO);
		return;
	}
	if (CatalogVolumeUnloaded(&un->i, "") == -1) {
		DevLog(DL_SYSERR(5336), ce->CeSlot);
	}
	drive->status.b.cln_inprog = FALSE;
	mutex_lock(&drive->un->mutex);
	drive->un->status.bits &= ~(DVST_CLEANING | DVST_REQUESTED);
	un->label_time = 0;
	mutex_unlock(&drive->un->mutex);
	mutex_unlock(&drive->mutex);
	disp_of_event(library, event, 0);
}
Beispiel #17
0
/*
 *	init_elements - get status for all elements in the library.
 *
 * exit -
 */
int				/* 0 = all ok !0 = failure */
init_elements(
		library_t *library)
{
	uint16_t	count, start_element;
	uint16_t	avail_drives;
	int		i, err, conlevel = 5;
	size_t	  retry;
	dev_ent_t	*un;
	char	   *drv_tbl;
	mode_sense_t   *mode_sense;
	drive_state_t  *drive;
	xport_state_t  *xport;
	iport_state_t  *import;
	robot_ms_page1d_t *pg1d = NULL;
	robot_ms_page1e_t *pg1e = NULL;
	robot_ms_page1f_t *pg1f = NULL;
	sam_extended_sense_t *sense;

	SANITY_CHECK(library != (library_t *)0);
	un = library->un;
	SANITY_CHECK(un != (dev_ent_t *)0);

	/* Put mode sense data into shared memory. */

	/* LINTED pointer cast may result in improper alignment */
	mode_sense = (mode_sense_t *)SHM_REF_ADDR(un->mode_sense);
	sense = (sam_extended_sense_t *)SHM_REF_ADDR(un->sense);
	SANITY_CHECK(mode_sense != (mode_sense_t *)0);
	SANITY_CHECK(sense != (sam_extended_sense_t *)0);
	(void) memset(mode_sense, 0, sizeof (mode_sense_t));

	mutex_lock(&un->io_mutex);
	pg1d = (robot_ms_page1d_t *)lib_mode_sense(library, 0x1d,
	    (uchar_t *)& mode_sense->u.robot_ms.pg1d,
	    sizeof (robot_ms_page1d_t));
	pg1f = (robot_ms_page1f_t *)lib_mode_sense(library, 0x1f,
	    (uchar_t *)& mode_sense->u.robot_ms.pg1f,
	    sizeof (robot_ms_page1f_t));
	pg1e = (robot_ms_page1e_t *)lib_mode_sense(library, 0x1e,
	    (uchar_t *)& mode_sense->u.robot_ms.pg1e,
	    sizeof (robot_ms_page1e_t));
	mutex_unlock(&un->io_mutex);

	if (pg1d == NULL || pg1f == NULL || pg1e == NULL) {
		DevLog(DL_ERR(5115));
		return (1);
	}
	library->status.b.two_sided = pg1e->transport_sets[0].rotate;
	if (un->type == DT_CYGNET)
		library->status.b.two_sided = 0;

	/* Allocate the drive tables. */
	BE16toH(&pg1d->first_drive, &start_element);
	BE16toH(&pg1d->num_drive, &count);
	library->range.drives_lower = start_element;
	library->range.drives_count = count;
	library->range.drives_upper = start_element + count - 1;

	/*
	 * This code is currently applied to IBM3584 only since the IBM3584
	 * returns a valid status if drive unit is not installed in a
	 * library. ASC/ASCQ:0x82/0x00. May need to add other library types
	 * to this check, check scsi docs.
	 *
	 * If drive is not fully populated and there is an empty slot for the
	 * drive, we don't need to create a redundant drive_thread.
	 */
	avail_drives = count;
	drv_tbl = malloc_wait(count, 2, 0);
	(void) memset(drv_tbl, TRUE, count);
	if (DT_IBM3584 == un->type)
		if ((avail_drives =
		    (uint16_t)populate_drives(library, drv_tbl)) == 0) {
			/*
			 * No drives installed, assum fully populated.
			 */
			DevLog(DL_ERR(5361));
			avail_drives = count;
			(void) memset(drv_tbl, TRUE, count);
		} else if (avail_drives > count) {
			avail_drives = count;
		}
	DevLog(DL_DETAIL(5362), avail_drives);

	/* one for the drive, one for stage and one for the stage helper */
	conlevel += (avail_drives * 3);

	library->drive = (drive_state_t *)malloc_wait(
	    sizeof (drive_state_t), 5, 0);
	library->index = library->drive;
	(void) memset(library->drive, 0, sizeof (drive_state_t));

	/*
	 * For each drive, build the drive state structure, put the init
	 * request on the list and start a thread with a new lwp.
	 */
	for (drive = library->drive, i = 0;
	    i < (int)count && avail_drives > 0; i++) {

		if (drv_tbl[i] == FALSE) {
			continue;
		}
		/* assign element number */
		drive->element = start_element + i;
		drive->library = library;
		/* hold the lock until ready */
		mutex_lock(&drive->mutex);
		drive->new_slot = ROBOT_NO_SLOT;
		drive->open_fd = -1;
		drive->active_count = 1;
		drive->first = (robo_event_t *)malloc_wait(
		    sizeof (robo_event_t), 5, 0);
		(void) memset(drive->first, 0, sizeof (robo_event_t));
		drive->first->type = EVENT_TYPE_INTERNAL;
		drive->first->status.bits = REST_FREEMEM;
		drive->first->request.internal.command = ROBOT_INTRL_INIT;
		if (thr_create(NULL, MD_THR_STK, &drive_thread, (void *) drive,
		    (THR_NEW_LWP | THR_BOUND | THR_DETACHED),
		    &drive->thread)) {
			DevLog(DL_SYSERR(5116));
			drive->status.b.offline = TRUE;
			drive->thread = (thread_t)- 1;
		}
		if (--avail_drives <= 0) {
			break;
		} else {
			/* Allocate next entry */
			drive->next = (drive_state_t *)malloc_wait(
			    sizeof (drive_state_t), 5, 0);
			(void) memset(drive->next, 0, sizeof (drive_state_t));
			drive->next->previous = drive;	/* set back link */
			drive = drive->next;
		}
	}

	drive->next = NULL;	/* no next drive */
	library->drive->previous = NULL;	/* no previous drive */
	free(drv_tbl);

	/* Allocate transport tables */

	BE16toH(&pg1d->first_tport, &start_element);
	BE16toH(&pg1d->num_tport, &count);
	library->range.transport_lower = start_element;
	library->range.transport_count = count;
	library->range.transport_upper = start_element + count - 1;
	library->range.default_transport = 0;
	library->page1f = pg1f;
	conlevel += count;
	library->transports =
	    (xport_state_t *)malloc_wait(sizeof (xport_state_t), 5, 0);
	(void) memset(library->transports, 0, sizeof (xport_state_t));

	for (xport = library->transports, i = 0; i < (int)count; i++) {
		/* assign element number */
		xport->element = start_element + i;
		xport->library = library;

		mutex_lock(&xport->mutex);
		/* start only one transport thread */
		if (i == 0) {
			xport->first =
			    (robo_event_t *)malloc_wait(
			    sizeof (robo_event_t), 5, 0);
			(void) memset(xport->first, 0, sizeof (robo_event_t));
			xport->first->type = EVENT_TYPE_INTERNAL;
			xport->first->status.bits = REST_FREEMEM;
			xport->first->request.internal.command =
			    ROBOT_INTRL_INIT;
			xport->active_count = 1;

			if (thr_create(NULL, SM_THR_STK,
			    &transport_thread, (void *) xport,
			    (THR_NEW_LWP | THR_BOUND | THR_DETACHED),
			    &xport->thread)) {
				DevLog(DL_SYSERR(5117));
				xport->thread = (thread_t)- 1;
			}
		}
		/* Allocate next entry */
		if (i != (count - 1)) {
			xport->next = (xport_state_t *)malloc_wait(
			    sizeof (xport_state_t), 5, 0);
			(void) memset(xport->next, 0, sizeof (xport_state_t));
			xport->next->previous = xport;	/* set back link */
			xport = xport->next;
		}
	}

	/* for the metrum d-360 the last transport is used with import export */
	xport->next = NULL;	/* no next transport */
	library->transports->previous = NULL;

	/* Allocate mailbox (import/export) tables */

	BE16toH(&pg1d->first_mail, &start_element);
	BE16toH(&pg1d->num_mail, &count);
	library->range.ie_lower = start_element;
	library->range.ie_count = count;
	if (count != 0)
		library->range.ie_upper = start_element + count - 1;
	else
		library->range.ie_upper = 0;

	conlevel += 1;		/* only one import/export thread */
	library->import = (iport_state_t *)malloc_wait(
	    sizeof (iport_state_t), 5, 0);
	(void) memset(library->import, 0, sizeof (iport_state_t));

	/* store the transport used in import/export for the metrum D-360 */
	if (un->type == DT_METD28)
		library->import->xport = xport;

	for (import = library->import, i = 0; i < (int)count; i++) {
		SANITY_CHECK(import != (iport_state_t *)0);
		/* assign element number */
		import->element = start_element + i;
		import->library = library;

		mutex_lock(&import->mutex);
		/* Create only one mailbox thread */
		if (i == 0) {
			import->active_count = 1;
			import->first = (robo_event_t *)malloc_wait(
			    sizeof (robo_event_t), 5, 0);
			(void) memset(import->first, 0, sizeof (robo_event_t));
			import->first->type = EVENT_TYPE_INTERNAL;
			import->first->status.bits = REST_FREEMEM;
			import->first->request.internal.command =
			    ROBOT_INTRL_INIT;
			if (thr_create(NULL, SM_THR_STK,
			    &import_thread, (void *) import,
			    (THR_DETACHED | THR_BOUND | THR_NEW_LWP),
			    &import->thread)) {
				DevLog(DL_SYSERR(5118));
				import->thread = (thread_t)- 1;
			}
		}
		if (i != (count - 1)) {	/* Allocate next entry */
			import->next = (iport_state_t *)malloc_wait(
			    sizeof (iport_state_t), 5, 0);
			(void) memset(import->next, 0, sizeof (iport_state_t));
			/* set back link */
			import->next->previous = import;
			import = import->next;
		}
	}

	import->next = NULL;	/* no next mailbox */
	SANITY_CHECK(library->import != (iport_state_t *)0);
	library->import->previous = NULL;

	/* allocate the audit table if needed */

	BE16toH(&pg1d->first_stor, &start_element);
	BE16toH(&pg1d->num_stor, &count);
	library->range.storage_lower = start_element;
	library->range.storage_count = count;
	library->range.storage_upper = start_element + count - 1;

	/* add for the import/export door slots */
	if (un->type == DT_ACL452)
		count += library->range.ie_count;

	DevLog(DL_DETAIL(5220), library->range.drives_count,
	    library->range.transport_count, library->range.storage_count,
	    library->range.ie_count);

	if (thr_setconcurrency(conlevel)) {
		DevLog(DL_SYSERR(5058));
	}
	/*
	 * If the audit table is the wrong length (based on the number of
	 * storage elements returned by mode-sense) or the audit bit is set,
	 * the set up for an audit.
	 */
	if ((library->audit_tab_len == 0) || un->status.b.audit) {
		int		added_more_time = FALSE;
		char	   *l_mess = un->dis_mes[DIS_MES_NORM];

		/*
		 * Audit table does not exist or is the wrong length.  This
		 * is generally a bad thing and  will force an initialize
		 * element scsi command and an audit. Both of these take a
		 * long time.
		 */
		/* tell the outside world */
		un->status.b.audit = TRUE;
		memccpy(l_mess, catgets(catfd, SET, 9022,
		    "initializing elements"),
		    '\0', DIS_MES_LEN);

		mutex_lock(&un->io_mutex);
		retry = 2;
		do {
			/*
			 * Allow 16 seconds for each storage element and 30
			 * seconds of slop.
			 */
			(void) memset(sense, 0, sizeof (sam_extended_sense_t));
			if ((err = scsi_cmd(library->open_fd, un,
			    SCMD_INIT_ELEMENT_STATUS,
			    (count << 4) + 30)) < 0) {
			TAPEALERT_SKEY(library->open_fd, un);
			GENERIC_SCSI_ERROR_PROCESSING(un,
			    library->scsi_err_tab, 0,
			    err, added_more_time, retry,
				/* code for DOWN_EQU */
			    down_library(library, SAM_STATE_CHANGE);
				mutex_unlock(&un->io_mutex);
				return (-1);
				/* MACRO for cstyle */,
				/* code for ILLREQ */
				    mutex_unlock(&un->io_mutex);
				return (-1);
				/* MACRO for cstyle */,
Beispiel #18
0
// Map all loadable segments in process' address space.
// This assumes you already called phdr_table_reserve_memory to
// reserve the address space range for the library.
// TODO: assert assumption.
bool ElfReader::LoadSegments() {
  for (size_t i = 0; i < phdr_num_; ++i) {
    const Elf32_Phdr* phdr = &phdr_table_[i];

    if (phdr->p_type != PT_LOAD) {
      continue;
    }

    // Segment addresses in memory.
    Elf32_Addr seg_start = phdr->p_vaddr + load_bias_;
    Elf32_Addr seg_end   = seg_start + phdr->p_memsz;

    Elf32_Addr seg_page_start = PAGE_START(seg_start);
    Elf32_Addr seg_page_end   = PAGE_END(seg_end);

    Elf32_Addr seg_file_end   = seg_start + phdr->p_filesz;

    // File offsets.
    Elf32_Addr file_start = phdr->p_offset;
    Elf32_Addr file_end   = file_start + phdr->p_filesz;

    Elf32_Addr file_page_start = PAGE_START(file_start);

    void* seg_addr = mmap((void*)seg_page_start,
                          file_end - file_page_start,
                          PFLAGS_TO_PROT(phdr->p_flags),
                          MAP_FIXED|MAP_PRIVATE,
                          fd_,
                          file_page_start);
    if (seg_addr == MAP_FAILED) {
      DL_ERR("couldn't map \"%s\" segment %d: %s", name_, i, strerror(errno));
      return false;
    }

    // if the segment is writable, and does not end on a page boundary,
    // zero-fill it until the page limit.
    if ((phdr->p_flags & PF_W) != 0 && PAGE_OFFSET(seg_file_end) > 0) {
      memset((void*)seg_file_end, 0, PAGE_SIZE - PAGE_OFFSET(seg_file_end));
    }

    seg_file_end = PAGE_END(seg_file_end);

    // seg_file_end is now the first page address after the file
    // content. If seg_end is larger, we need to zero anything
    // between them. This is done by using a private anonymous
    // map for all extra pages.
    if (seg_page_end > seg_file_end) {
      void* zeromap = mmap((void*)seg_file_end,
                           seg_page_end - seg_file_end,
                           PFLAGS_TO_PROT(phdr->p_flags),
                           MAP_FIXED|MAP_ANONYMOUS|MAP_PRIVATE,
                           -1,
                           0);
      if (zeromap == MAP_FAILED) {
        DL_ERR("couldn't zero fill \"%s\" gap: %s", name_, strerror(errno));
        return false;
      }
    }
  }
  return true;
}