Esempio n. 1
0
/*
  system msync() only syncs mmap'ed area to fs cache.
  fsync() is required to really sync to disc
*/
int my_msync(int fd, void *addr, size_t len, int flags)
{
  msync(addr, len, flags);
  return my_sync(fd, MYF(0));
}
int maria_extra(MARIA_HA *info, enum ha_extra_function function,
                void *extra_arg)
{
  int error= 0;
  ulong cache_size;
  MARIA_SHARE *share= info->s;
  my_bool block_records= share->data_file_type == BLOCK_RECORD;
  DBUG_ENTER("maria_extra");
  DBUG_PRINT("enter",("function: %d",(int) function));

  switch (function) {
  case HA_EXTRA_RESET_STATE:		/* Reset state (don't free buffers) */
    info->lastinx= ~0;			/* Detect index changes */
    info->last_search_keypage= info->cur_row.lastpos= HA_OFFSET_ERROR;
    info->page_changed= 1;
					/* Next/prev gives first/last */
    if (info->opt_flag & READ_CACHE_USED)
    {
      reinit_io_cache(&info->rec_cache,READ_CACHE,0,
		      (pbool) (info->lock_type != F_UNLCK),
                      (pbool) MY_TEST(info->update & HA_STATE_ROW_CHANGED)
		      );
    }
    info->update= ((info->update & HA_STATE_CHANGED) | HA_STATE_NEXT_FOUND |
		   HA_STATE_PREV_FOUND);
    break;
  case HA_EXTRA_CACHE:
    if (block_records)
      break;                                    /* Not supported */

    if (info->lock_type == F_UNLCK &&
	(share->options & HA_OPTION_PACK_RECORD))
    {
      error= 1;			/* Not possibly if not locked */
      my_errno= EACCES;
      break;
    }
    if (info->s->file_map) /* Don't use cache if mmap */
      break;
#if defined(HAVE_MMAP) && defined(HAVE_MADVISE)
    if ((share->options & HA_OPTION_COMPRESS_RECORD))
    {
      mysql_mutex_lock(&share->intern_lock);
      if (_ma_memmap_file(info))
      {
	/* We don't nead MADV_SEQUENTIAL if small file */
	madvise((char*) share->file_map, share->state.state.data_file_length,
		share->state.state.data_file_length <= RECORD_CACHE_SIZE*16 ?
		MADV_RANDOM : MADV_SEQUENTIAL);
	mysql_mutex_unlock(&share->intern_lock);
	break;
      }
      mysql_mutex_unlock(&share->intern_lock);
    }
#endif
    if (info->opt_flag & WRITE_CACHE_USED)
    {
      info->opt_flag&= ~WRITE_CACHE_USED;
      if ((error= end_io_cache(&info->rec_cache)))
	break;
    }
    if (!(info->opt_flag &
	  (READ_CACHE_USED | WRITE_CACHE_USED | MEMMAP_USED)))
    {
      cache_size= (extra_arg ? *(ulong*) extra_arg :
		   my_default_record_cache_size);
      if (!(init_io_cache(&info->rec_cache, info->dfile.file,
			 (uint) MY_MIN(share->state.state.data_file_length+1,
				    cache_size),
			  READ_CACHE,0L,(pbool) (info->lock_type != F_UNLCK),
			  MYF(share->write_flag & MY_WAIT_IF_FULL))))
      {
	info->opt_flag|= READ_CACHE_USED;
	info->update&=   ~HA_STATE_ROW_CHANGED;
      }
      if (share->non_transactional_concurrent_insert)
	info->rec_cache.end_of_file= info->state->data_file_length;
    }
    break;
  case HA_EXTRA_REINIT_CACHE:
    if (info->opt_flag & READ_CACHE_USED)
    {
      reinit_io_cache(&info->rec_cache, READ_CACHE, info->cur_row.nextpos,
		      (pbool) (info->lock_type != F_UNLCK),
                      (pbool) MY_TEST(info->update & HA_STATE_ROW_CHANGED));
      info->update&= ~HA_STATE_ROW_CHANGED;
      if (share->non_transactional_concurrent_insert)
	info->rec_cache.end_of_file= info->state->data_file_length;
    }
    break;
  case HA_EXTRA_WRITE_CACHE:
    if (info->lock_type == F_UNLCK)
    {
      error= 1;                        	/* Not possibly if not locked */
      break;
    }
    if (block_records)
      break;                            /* Not supported */

    cache_size= (extra_arg ? *(ulong*) extra_arg :
		 my_default_record_cache_size);
    if (!(info->opt_flag &
	  (READ_CACHE_USED | WRITE_CACHE_USED | OPT_NO_ROWS)) &&
	!share->state.header.uniques)
      if (!(init_io_cache(&info->rec_cache, info->dfile.file, cache_size,
                          WRITE_CACHE, info->state->data_file_length,
			  (pbool) (info->lock_type != F_UNLCK),
			  MYF(share->write_flag & MY_WAIT_IF_FULL))))
      {
	info->opt_flag|= WRITE_CACHE_USED;
	info->update&=   ~(HA_STATE_ROW_CHANGED |
                           HA_STATE_WRITE_AT_END |
                           HA_STATE_EXTEND_BLOCK);
      }
    break;
  case HA_EXTRA_PREPARE_FOR_UPDATE:
    if (info->s->data_file_type != DYNAMIC_RECORD)
      break;
    /* Remove read/write cache if dynamic rows */
  case HA_EXTRA_NO_CACHE:
    if (info->opt_flag & (READ_CACHE_USED | WRITE_CACHE_USED))
    {
      info->opt_flag&= ~(READ_CACHE_USED | WRITE_CACHE_USED);
      error= end_io_cache(&info->rec_cache);
      /* Sergei will insert full text index caching here */
    }
#if defined(HAVE_MMAP) && defined(HAVE_MADVISE)
    if (info->opt_flag & MEMMAP_USED)
      madvise((char*) share->file_map, share->state.state.data_file_length,
              MADV_RANDOM);
#endif
    break;
  case HA_EXTRA_FLUSH_CACHE:
    if (info->opt_flag & WRITE_CACHE_USED)
    {
      if ((error= flush_io_cache(&info->rec_cache)))
      {
        /* Fatal error found */
        _ma_set_fatal_error(share, HA_ERR_CRASHED);
      }
    }
    break;
  case HA_EXTRA_NO_READCHECK:
    info->opt_flag&= ~READ_CHECK_USED;		/* No readcheck */
    break;
  case HA_EXTRA_READCHECK:
    info->opt_flag|= READ_CHECK_USED;
    break;
  case HA_EXTRA_KEYREAD:			/* Read only keys to record */
  case HA_EXTRA_REMEMBER_POS:
    info->opt_flag|= REMEMBER_OLD_POS;
    bmove(info->last_key.data + share->base.max_key_length*2,
	  info->last_key.data,
          info->last_key.data_length + info->last_key.ref_length);
    info->save_update=	info->update;
    info->save_lastinx= info->lastinx;
    info->save_lastpos= info->cur_row.lastpos;
    info->save_lastkey_data_length= info->last_key.data_length;
    info->save_lastkey_ref_length= info->last_key.ref_length;
    if (function == HA_EXTRA_REMEMBER_POS)
      break;
    /* fall through */
  case HA_EXTRA_KEYREAD_CHANGE_POS:
    info->opt_flag|= KEY_READ_USED;
    info->read_record= _ma_read_key_record;
    break;
  case HA_EXTRA_NO_KEYREAD:
  case HA_EXTRA_RESTORE_POS:
    if (info->opt_flag & REMEMBER_OLD_POS)
    {
      bmove(info->last_key.data,
	    info->last_key.data + share->base.max_key_length*2,
	    info->save_lastkey_data_length + info->save_lastkey_ref_length);
      info->update=	info->save_update | HA_STATE_WRITTEN;
      info->lastinx=	info->save_lastinx;
      info->cur_row.lastpos= info->save_lastpos;
      info->last_key.data_length= info->save_lastkey_data_length;
      info->last_key.ref_length= info->save_lastkey_ref_length;
      info->last_key.flag= 0;
    }
    info->read_record=	share->read_record;
    info->opt_flag&= ~(KEY_READ_USED | REMEMBER_OLD_POS);
    break;
  case HA_EXTRA_NO_USER_CHANGE: /* Database is somehow locked agains changes */
    info->lock_type= F_EXTRA_LCK; /* Simulate as locked */
    break;
  case HA_EXTRA_WAIT_LOCK:
    info->lock_wait= 0;
    break;
  case HA_EXTRA_NO_WAIT_LOCK:
    info->lock_wait= MY_SHORT_WAIT;
    break;
  case HA_EXTRA_NO_KEYS:
    /* we're going to modify pieces of the state, stall Checkpoint */
    mysql_mutex_lock(&share->intern_lock);
    if (info->lock_type == F_UNLCK)
    {
      mysql_mutex_unlock(&share->intern_lock);
      error= 1;					/* Not possibly if not lock */
      break;
    }
    if (maria_is_any_key_active(share->state.key_map))
    {
      MARIA_KEYDEF *key= share->keyinfo;
      uint i;
      for (i =0 ; i < share->base.keys ; i++,key++)
      {
        if (!(key->flag & HA_NOSAME) && info->s->base.auto_key != i+1)
        {
          maria_clear_key_active(share->state.key_map, i);
          info->update|= HA_STATE_CHANGED;
        }
      }

      if (!share->changed)
      {
	share->changed= 1;			/* Update on close */
	share->state.changed|= STATE_CHANGED | STATE_NOT_ANALYZED;
	if (!share->global_changed)
	{
	  share->global_changed= 1;
	  share->state.open_count++;
	}
      }
      if (!share->now_transactional)
        share->state.state= *info->state;
      /*
        That state write to disk must be done, even for transactional tables;
        indeed the table's share is going to be lost (there was a
        HA_EXTRA_FORCE_REOPEN before, which set share->last_version to
        0), and so the only way it leaves information (share->state.key_map)
        for the posterity is by writing it to disk.
      */
      DBUG_ASSERT(!maria_in_recovery);
      error= _ma_state_info_write(share,
                                  MA_STATE_INFO_WRITE_DONT_MOVE_OFFSET |
                                  MA_STATE_INFO_WRITE_FULL_INFO);
    }
    mysql_mutex_unlock(&share->intern_lock);
    break;
  case HA_EXTRA_FORCE_REOPEN:
    /*
      MySQL uses this case after it has closed all other instances
      of this table.
      We however do a flush here for additional safety.
    */
    /** @todo consider porting these flush-es to MyISAM */
    DBUG_ASSERT(share->reopen == 1);
    error= _ma_flush_table_files(info, MARIA_FLUSH_DATA | MARIA_FLUSH_INDEX,
                                 FLUSH_FORCE_WRITE, FLUSH_FORCE_WRITE);
    if (!error && share->changed)
    {
      mysql_mutex_lock(&share->intern_lock);
      error= _ma_state_info_write(share,
                                  MA_STATE_INFO_WRITE_DONT_MOVE_OFFSET|
                                  MA_STATE_INFO_WRITE_FULL_INFO);
      mysql_mutex_unlock(&share->intern_lock);
    }
    mysql_mutex_lock(&THR_LOCK_maria);
    mysql_mutex_lock(&share->intern_lock); /* protect against Checkpoint */
    /* Safety against assert in checkpoint */
    share->bitmap.changed_not_flushed= 0;
    /* this makes the share not be re-used next time the table is opened */
    share->last_version= 0L;			/* Impossible version */
    mysql_mutex_unlock(&share->intern_lock);
    mysql_mutex_unlock(&THR_LOCK_maria);
    break;
  case HA_EXTRA_PREPARE_FOR_DROP:
    /* Signals about intent to delete this table */
    share->deleting= TRUE;
    share->global_changed= FALSE;     /* force writing changed flag */
    /* To force repair if reopened */
    share->state.open_count= 1;
    share->changed= 1;
    _ma_mark_file_changed_now(share);
    /* Fall trough */
  case HA_EXTRA_PREPARE_FOR_RENAME:
  {
    my_bool do_flush= MY_TEST(function != HA_EXTRA_PREPARE_FOR_DROP);
    my_bool save_global_changed;
    enum flush_type type;
    /*
      This share, to have last_version=0, needs to save all its data/index
      blocks to disk if this is not for a DROP TABLE. Otherwise they would be
      invisible to future openers; and they could even go to disk late and
      cancel the work of future openers.
    */
    if (info->lock_type != F_UNLCK && !info->was_locked)
    {
      info->was_locked= info->lock_type;
      if (maria_lock_database(info, F_UNLCK))
        error= my_errno;
      info->lock_type= F_UNLCK;
    }
    /*
      We don't need to call _mi_decrement_open_count() if we are
      dropping the table, as the files will be removed anyway. If we
      are aborted before the files is removed, it's better to not
      call it as in that case the automatic repair on open will add
      the missing index entries
    */
    mysql_mutex_lock(&share->intern_lock);
    if (share->kfile.file >= 0 && function != HA_EXTRA_PREPARE_FOR_DROP)
      _ma_decrement_open_count(info, 0);
    if (info->trn)
    {
      _ma_remove_table_from_trnman(share, info->trn);
      /* Ensure we don't point to the deleted data in trn */
      info->state= info->state_start= &share->state.state;
    }
    /* Remove history for table */
    _ma_reset_state(info);

    type= do_flush ? FLUSH_RELEASE : FLUSH_IGNORE_CHANGED;
    save_global_changed= share->global_changed;
    share->global_changed= 1;                 /* Don't increment open count */
    mysql_mutex_unlock(&share->intern_lock);
    if (_ma_flush_table_files(info, MARIA_FLUSH_DATA | MARIA_FLUSH_INDEX,
                              type, type))
    {
      error=my_errno;
      share->changed= 1;
    }
    mysql_mutex_lock(&share->intern_lock);
    share->global_changed= save_global_changed;
    if (info->opt_flag & (READ_CACHE_USED | WRITE_CACHE_USED))
    {
      info->opt_flag&= ~(READ_CACHE_USED | WRITE_CACHE_USED);
      if (end_io_cache(&info->rec_cache))
        error= 1;
    }
    if (share->kfile.file >= 0)
    {
      if (do_flush)
      {
        /* Save the state so that others can find it from disk. */
        if ((share->changed &&
             _ma_state_info_write(share,
                                  MA_STATE_INFO_WRITE_DONT_MOVE_OFFSET |
                                  MA_STATE_INFO_WRITE_FULL_INFO)) ||
            mysql_file_sync(share->kfile.file, MYF(0)))
          error= my_errno;
      }
      else
      {
        /* be sure that state is not tried for write as file may be closed */
        share->changed= 0;
        share->global_changed= 0;
        share->state.open_count= 0;
      }
    }
    if (share->data_file_type == BLOCK_RECORD &&
        share->bitmap.file.file >= 0)
    {
      DBUG_ASSERT(share->bitmap.non_flushable == 0 &&
                  share->bitmap.changed == 0);
      if (do_flush && my_sync(share->bitmap.file.file, MYF(0)))
        error= my_errno;
      share->bitmap.changed_not_flushed= 0;
    }
    /* last_version must be protected by intern_lock; See collect_tables() */
    share->last_version= 0L;			/* Impossible version */
    mysql_mutex_unlock(&share->intern_lock);
    break;
  }
  case HA_EXTRA_PREPARE_FOR_FORCED_CLOSE:
    if (info->trn)
    {
      mysql_mutex_lock(&share->intern_lock);
      _ma_remove_table_from_trnman(share, info->trn);
      /* Ensure we don't point to the deleted data in trn */
      info->state= info->state_start= &share->state.state;
      mysql_mutex_unlock(&share->intern_lock);    
    }
    break;
  case HA_EXTRA_FLUSH:
    if (!share->temporary)
      error= _ma_flush_table_files(info, MARIA_FLUSH_DATA | MARIA_FLUSH_INDEX,
                                   FLUSH_KEEP, FLUSH_KEEP);

    _ma_decrement_open_count(info, 1);
    if (share->not_flushed)
    {
      share->not_flushed= 0;
      if (_ma_sync_table_files(info))
	error= my_errno;
      if (error)
      {
	/* Fatal error found */
	share->changed= 1;
        _ma_set_fatal_error(share, HA_ERR_CRASHED);
      }
    }
    break;
  case HA_EXTRA_NORMAL:				/* Theese isn't in use */
    info->quick_mode= 0;
    break;
  case HA_EXTRA_QUICK:
    info->quick_mode= 1;
    break;
  case HA_EXTRA_NO_ROWS:
    if (!share->state.header.uniques)
      info->opt_flag|= OPT_NO_ROWS;
    break;
  case HA_EXTRA_PRELOAD_BUFFER_SIZE:
    info->preload_buff_size= *((ulong *) extra_arg);
    break;
  case HA_EXTRA_CHANGE_KEY_TO_UNIQUE:
  case HA_EXTRA_CHANGE_KEY_TO_DUP:
    maria_extra_keyflag(info, function);
    break;
  case HA_EXTRA_MMAP:
#ifdef HAVE_MMAP
    if (block_records)
      break;                                    /* Not supported */
    mysql_mutex_lock(&share->intern_lock);
    /*
      Memory map the data file if it is not already mapped. It is safe
      to memory map a file while other threads are using file I/O on it.
      Assigning a new address to a function pointer is an atomic
      operation. intern_lock prevents that two or more mappings are done
      at the same time.
    */
    if (!share->file_map)
    {
      if (_ma_dynmap_file(info, share->state.state.data_file_length))
      {
        DBUG_PRINT("warning",("mmap failed: errno: %d",errno));
        error= my_errno= errno;
      }
      else
      {
        share->file_read=  _ma_mmap_pread;
        share->file_write= _ma_mmap_pwrite;
      }
    }
    mysql_mutex_unlock(&share->intern_lock);
#endif
    break;
  case HA_EXTRA_MARK_AS_LOG_TABLE:
    mysql_mutex_lock(&share->intern_lock);
    share->is_log_table= TRUE;
    mysql_mutex_unlock(&share->intern_lock);
    break;
  case HA_EXTRA_KEY_CACHE:
  case HA_EXTRA_NO_KEY_CACHE:
  default:
    break;
  }
  DBUG_RETURN(error);
} /* maria_extra */
Esempio n. 3
0
int my_copy(const char *from, const char *to, myf MyFlags)
{
  size_t Count;
  my_bool new_file_stat= 0; /* 1 if we could stat "to" */
  int create_flag;
  File from_file,to_file;
  uchar buff[IO_SIZE];
  MY_STAT stat_buff,new_stat_buff;
  gid_t gid;
  DBUG_ENTER("my_copy");
  DBUG_PRINT("my",("from %s to %s MyFlags %d", from, to, MyFlags));

  from_file=to_file= -1;
  DBUG_ASSERT(!(MyFlags & (MY_FNABP | MY_NABP))); /* for my_read/my_write */
  if (MyFlags & MY_HOLD_ORIGINAL_MODES)		/* Copy stat if possible */
    new_file_stat= test(my_stat((char*) to, &new_stat_buff, MYF(0)));

  if ((from_file=my_open(from,O_RDONLY | O_SHARE,MyFlags)) >= 0)
  {
    if (!my_stat(from, &stat_buff, MyFlags))
    {
      my_errno=errno;
      goto err;
    }
    if (MyFlags & MY_HOLD_ORIGINAL_MODES && new_file_stat)
      stat_buff=new_stat_buff;
    create_flag= (MyFlags & MY_DONT_OVERWRITE_FILE) ? O_EXCL : O_TRUNC;

    if ((to_file=  my_create(to,(int) stat_buff.st_mode,
			     O_WRONLY | create_flag | O_BINARY | O_SHARE,
			     MyFlags)) < 0)
      goto err;

    while ((Count=my_read(from_file, buff, sizeof(buff), MyFlags)) != 0)
    {
	if (Count == (uint) -1 ||
	    my_write(to_file,buff,Count,MYF(MyFlags | MY_NABP)))
	goto err;
    }

    /* sync the destination file */
    if (MyFlags & MY_SYNC)
    {
      if (my_sync(to_file, MyFlags))
        goto err;
    }

    if (my_close(from_file,MyFlags) | my_close(to_file,MyFlags))
      DBUG_RETURN(-1);				/* Error on close */

    /* Copy modes if possible */

    if (MyFlags & MY_HOLD_ORIGINAL_MODES && !new_file_stat)
	DBUG_RETURN(0);			/* File copyed but not stat */

#if !defined(__WIN__) && !defined(__NETWARE__)
    /* Refresh the new_stat_buff */
    if (!my_stat((char*) to, &new_stat_buff, MYF(0)))
    {
      my_errno= errno;
      goto err;
    }

    /* Copy modes */
    if ((stat_buff.st_mode & 07777) != (new_stat_buff.st_mode & 07777) &&
        chmod(to, stat_buff.st_mode & 07777))
    {
      my_errno= errno;
      if (MyFlags & (MY_FAE+MY_WME))
        my_error(EE_CHANGE_PERMISSIONS, MYF(ME_BELL+ME_WAITTANG), from, errno);
      goto err;
    }

    /* Copy ownership */
    if (stat_buff.st_gid == new_stat_buff.st_gid)
      gid= -1;
    else
      gid= stat_buff.st_gid;
    if ((gid != (gid_t) -1 || stat_buff.st_uid != new_stat_buff.st_uid) &&
        chown(to, stat_buff.st_uid, gid))
    {
      my_errno= errno;
      if (MyFlags & (MY_FAE+MY_WME))
        my_error(EE_CHANGE_OWNERSHIP, MYF(ME_BELL+ME_WAITTANG), from, errno);
      goto err;
    }
#endif
#if !defined(VMS) && !defined(__ZTC__)
    if (MyFlags & MY_COPYTIME)
    {
      struct utimbuf timep;
      timep.actime  = stat_buff.st_atime;
      timep.modtime = stat_buff.st_mtime;
      VOID(utime((char*) to, &timep)); /* last accessed and modified times */
    }
#endif
    DBUG_RETURN(0);
  }

err:
  if (from_file >= 0) VOID(my_close(from_file,MyFlags));
  if (to_file >= 0)
  {
    VOID(my_close(to_file, MyFlags));
    /* attempt to delete the to-file we've partially written */
    VOID(my_delete(to, MyFlags));
  }
  DBUG_RETURN(-1);
} /* my_copy */
Esempio n. 4
0
int mi_extra(MI_INFO *info, enum ha_extra_function function, void *extra_arg)
{
  int error=0;
  ulong cache_size;
  MYISAM_SHARE *share=info->s;
  DBUG_ENTER("mi_extra");
  DBUG_PRINT("enter",("function: %d",(int) function));

  switch (function) {
  case HA_EXTRA_RESET:
    /*
      Free buffers and reset the following flags:
      EXTRA_CACHE, EXTRA_WRITE_CACHE, EXTRA_KEYREAD, EXTRA_QUICK

      If the row buffer cache is large (for dynamic tables), reduce it
      to save memory.
    */
    if (info->opt_flag & (READ_CACHE_USED | WRITE_CACHE_USED))
    {
      info->opt_flag&= ~(READ_CACHE_USED | WRITE_CACHE_USED);
      error=end_io_cache(&info->rec_cache);
    }
    if (share->base.blobs)
      mi_alloc_rec_buff(info, -1, &info->rec_buff);
#if defined(HAVE_MMAP) && defined(HAVE_MADVISE)
    if (info->opt_flag & MEMMAP_USED)
      madvise(share->file_map,share->state.state.data_file_length,MADV_RANDOM);
#endif
    info->opt_flag&= ~(KEY_READ_USED | REMEMBER_OLD_POS);
    info->quick_mode=0;
    /* Fall through */

  case HA_EXTRA_RESET_STATE:		/* Reset state (don't free buffers) */
    info->lastinx= 0;			/* Use first index as def */
    info->last_search_keypage=info->lastpos= HA_OFFSET_ERROR;
    info->page_changed=1;
					/* Next/prev gives first/last */
    if (info->opt_flag & READ_CACHE_USED)
    {
      reinit_io_cache(&info->rec_cache,READ_CACHE,0,
		      (pbool) (info->lock_type != F_UNLCK),
		      (pbool) test(info->update & HA_STATE_ROW_CHANGED)
		      );
    }
    info->update= ((info->update & HA_STATE_CHANGED) | HA_STATE_NEXT_FOUND |
		   HA_STATE_PREV_FOUND);
    break;
  case HA_EXTRA_CACHE:
    if (info->lock_type == F_UNLCK &&
	(share->options & HA_OPTION_PACK_RECORD))
    {
      error=1;			/* Not possibly if not locked */
      my_errno=EACCES;
      break;
    }
#if defined(HAVE_MMAP) && defined(HAVE_MADVISE)
    if ((share->options & HA_OPTION_COMPRESS_RECORD))
    {
      pthread_mutex_lock(&share->intern_lock);
      if (_mi_memmap_file(info))
      {
	/* We don't nead MADV_SEQUENTIAL if small file */
	madvise(share->file_map,share->state.state.data_file_length,
		share->state.state.data_file_length <= RECORD_CACHE_SIZE*16 ?
		MADV_RANDOM : MADV_SEQUENTIAL);
	pthread_mutex_unlock(&share->intern_lock);
	break;
      }
      pthread_mutex_unlock(&share->intern_lock);
    }
#endif
    if (info->opt_flag & WRITE_CACHE_USED)
    {
      info->opt_flag&= ~WRITE_CACHE_USED;
      if ((error=end_io_cache(&info->rec_cache)))
	break;
    }
    if (!(info->opt_flag &
	  (READ_CACHE_USED | WRITE_CACHE_USED | MEMMAP_USED)))
    {
      cache_size= (extra_arg ? *(ulong*) extra_arg :
		   my_default_record_cache_size);
      if (!(init_io_cache(&info->rec_cache,info->dfile,
			 (uint) min(info->state->data_file_length+1,
				    cache_size),
			  READ_CACHE,0L,(pbool) (info->lock_type != F_UNLCK),
			  MYF(share->write_flag & MY_WAIT_IF_FULL))))
      {
	info->opt_flag|=READ_CACHE_USED;
	info->update&= ~HA_STATE_ROW_CHANGED;
      }
      if (share->concurrent_insert)
	info->rec_cache.end_of_file=info->state->data_file_length;
    }
    break;
  case HA_EXTRA_REINIT_CACHE:
    if (info->opt_flag & READ_CACHE_USED)
    {
      reinit_io_cache(&info->rec_cache,READ_CACHE,info->nextpos,
		      (pbool) (info->lock_type != F_UNLCK),
		      (pbool) test(info->update & HA_STATE_ROW_CHANGED));
      info->update&= ~HA_STATE_ROW_CHANGED;
      if (share->concurrent_insert)
	info->rec_cache.end_of_file=info->state->data_file_length;
    }
    break;
  case HA_EXTRA_WRITE_CACHE:
    if (info->lock_type == F_UNLCK)
    {
      error=1;			/* Not possibly if not locked */
      break;
    }
    cache_size= (extra_arg ? *(ulong*) extra_arg :
		 my_default_record_cache_size);
    if (!(info->opt_flag &
	  (READ_CACHE_USED | WRITE_CACHE_USED | OPT_NO_ROWS)) &&
	!share->state.header.uniques)
      if (!(init_io_cache(&info->rec_cache,info->dfile, cache_size,
			 WRITE_CACHE,info->state->data_file_length,
			  (pbool) (info->lock_type != F_UNLCK),
			  MYF(share->write_flag & MY_WAIT_IF_FULL))))
      {
	info->opt_flag|=WRITE_CACHE_USED;
	info->update&= ~(HA_STATE_ROW_CHANGED |
			 HA_STATE_WRITE_AT_END |
			 HA_STATE_EXTEND_BLOCK);
      }
    break;
  case HA_EXTRA_PREPARE_FOR_UPDATE:
    if (info->s->data_file_type != DYNAMIC_RECORD)
      break;
    /* Remove read/write cache if dynamic rows */
  case HA_EXTRA_NO_CACHE:
    if (info->opt_flag & (READ_CACHE_USED | WRITE_CACHE_USED))
    {
      info->opt_flag&= ~(READ_CACHE_USED | WRITE_CACHE_USED);
      error=end_io_cache(&info->rec_cache);
      /* Sergei will insert full text index caching here */
    }
#if defined(HAVE_MMAP) && defined(HAVE_MADVISE)
    if (info->opt_flag & MEMMAP_USED)
      madvise(share->file_map,share->state.state.data_file_length,MADV_RANDOM);
#endif
    break;
  case HA_EXTRA_FLUSH_CACHE:
    if (info->opt_flag & WRITE_CACHE_USED)
    {
      if ((error=flush_io_cache(&info->rec_cache)))
      {
        mi_print_error(info->s, HA_ERR_CRASHED);
	mi_mark_crashed(info);			/* Fatal error found */
      }
    }
    break;
  case HA_EXTRA_NO_READCHECK:
    info->opt_flag&= ~READ_CHECK_USED;		/* No readcheck */
    break;
  case HA_EXTRA_READCHECK:
    info->opt_flag|= READ_CHECK_USED;
    break;
  case HA_EXTRA_KEYREAD:			/* Read only keys to record */
  case HA_EXTRA_REMEMBER_POS:
    info->opt_flag |= REMEMBER_OLD_POS;
    bmove((byte*) info->lastkey+share->base.max_key_length*2,
	  (byte*) info->lastkey,info->lastkey_length);
    info->save_update=	info->update;
    info->save_lastinx= info->lastinx;
    info->save_lastpos= info->lastpos;
    info->save_lastkey_length=info->lastkey_length;
    if (function == HA_EXTRA_REMEMBER_POS)
      break;
    /* fall through */
  case HA_EXTRA_KEYREAD_CHANGE_POS:
    info->opt_flag |= KEY_READ_USED;
    info->read_record=_mi_read_key_record;
    break;
  case HA_EXTRA_NO_KEYREAD:
  case HA_EXTRA_RESTORE_POS:
    if (info->opt_flag & REMEMBER_OLD_POS)
    {
      bmove((byte*) info->lastkey,
	    (byte*) info->lastkey+share->base.max_key_length*2,
	    info->save_lastkey_length);
      info->update=	info->save_update | HA_STATE_WRITTEN;
      info->lastinx=	info->save_lastinx;
      info->lastpos=	info->save_lastpos;
      info->lastkey_length=info->save_lastkey_length;
    }
    info->read_record=	share->read_record;
    info->opt_flag&= ~(KEY_READ_USED | REMEMBER_OLD_POS);
    break;
  case HA_EXTRA_NO_USER_CHANGE: /* Database is somehow locked agains changes */
    info->lock_type= F_EXTRA_LCK; /* Simulate as locked */
    break;
  case HA_EXTRA_WAIT_LOCK:
    info->lock_wait=0;
    break;
  case HA_EXTRA_NO_WAIT_LOCK:
    info->lock_wait=MY_DONT_WAIT;
    break;
  case HA_EXTRA_NO_KEYS:
    if (info->lock_type == F_UNLCK)
    {
      error=1;					/* Not possibly if not lock */
      break;
    }
    if (mi_is_any_key_active(share->state.key_map))
    {
      MI_KEYDEF *key=share->keyinfo;
      uint i;
      for (i=0 ; i < share->base.keys ; i++,key++)
      {
        if (!(key->flag & HA_NOSAME) && info->s->base.auto_key != i+1)
        {
          mi_clear_key_active(share->state.key_map, i);
          info->update|= HA_STATE_CHANGED;
        }
      }

      if (!share->changed)
      {
	share->state.changed|= STATE_CHANGED | STATE_NOT_ANALYZED;
	share->changed=1;			/* Update on close */
	if (!share->global_changed)
	{
	  share->global_changed=1;
	  share->state.open_count++;
	}
      }
      share->state.state= *info->state;
      error=mi_state_info_write(share->kfile,&share->state,1 | 2);
    }
    break;
  case HA_EXTRA_FORCE_REOPEN:
    pthread_mutex_lock(&THR_LOCK_myisam);
    share->last_version= 0L;			/* Impossible version */
    pthread_mutex_unlock(&THR_LOCK_myisam);
    break;
  case HA_EXTRA_PREPARE_FOR_DELETE:
    pthread_mutex_lock(&THR_LOCK_myisam);
    share->last_version= 0L;			/* Impossible version */
#ifdef __WIN__REMOVE_OBSOLETE_WORKAROUND
    /* Close the isam and data files as Win32 can't drop an open table */
    pthread_mutex_lock(&share->intern_lock);
    if (flush_key_blocks(share->key_cache, share->kfile,
			 (function == HA_EXTRA_FORCE_REOPEN ?
			  FLUSH_RELEASE : FLUSH_IGNORE_CHANGED)))
    {
      error=my_errno;
      share->changed=1;
      mi_print_error(info->s, HA_ERR_CRASHED);
      mi_mark_crashed(info);			/* Fatal error found */
    }
    if (info->opt_flag & (READ_CACHE_USED | WRITE_CACHE_USED))
    {
      info->opt_flag&= ~(READ_CACHE_USED | WRITE_CACHE_USED);
      error=end_io_cache(&info->rec_cache);
    }
    if (info->lock_type != F_UNLCK && ! info->was_locked)
    {
      info->was_locked=info->lock_type;
      if (mi_lock_database(info,F_UNLCK))
	error=my_errno;
      info->lock_type = F_UNLCK;
    }
    if (share->kfile >= 0)
      _mi_decrement_open_count(info);
    if (share->kfile >= 0 && my_close(share->kfile,MYF(0)))
      error=my_errno;
    {
      LIST *list_element ;
      for (list_element=myisam_open_list ;
	   list_element ;
	   list_element=list_element->next)
      {
	MI_INFO *tmpinfo=(MI_INFO*) list_element->data;
	if (tmpinfo->s == info->s)
	{
	  if (tmpinfo->dfile >= 0 && my_close(tmpinfo->dfile,MYF(0)))
	    error = my_errno;
	  tmpinfo->dfile= -1;
	}
      }
    }
    share->kfile= -1;				/* Files aren't open anymore */
    pthread_mutex_unlock(&share->intern_lock);
#endif
    pthread_mutex_unlock(&THR_LOCK_myisam);
    break;
  case HA_EXTRA_FLUSH:
    if (!share->temporary)
      flush_key_blocks(share->key_cache, share->kfile, FLUSH_KEEP);
#ifdef HAVE_PWRITE
    _mi_decrement_open_count(info);
#endif
    if (share->not_flushed)
    {
      share->not_flushed=0;
      if (my_sync(share->kfile, MYF(0)))
	error= my_errno;
      if (my_sync(info->dfile, MYF(0)))
	error= my_errno;
      if (error)
      {
	share->changed=1;
        mi_print_error(info->s, HA_ERR_CRASHED);
	mi_mark_crashed(info);			/* Fatal error found */
      }
    }
    if (share->base.blobs)
      mi_alloc_rec_buff(info, -1, &info->rec_buff);
    break;
  case HA_EXTRA_NORMAL:				/* Theese isn't in use */
    info->quick_mode=0;
    break;
  case HA_EXTRA_QUICK:
    info->quick_mode=1;
    break;
  case HA_EXTRA_NO_ROWS:
    if (!share->state.header.uniques)
      info->opt_flag|= OPT_NO_ROWS;
    break;
  case HA_EXTRA_PRELOAD_BUFFER_SIZE:
    info->preload_buff_size= *((ulong *) extra_arg); 
    break;
  case HA_EXTRA_CHANGE_KEY_TO_UNIQUE:
  case HA_EXTRA_CHANGE_KEY_TO_DUP:
    mi_extra_keyflag(info, function);
    break;
  case HA_EXTRA_KEY_CACHE:
  case HA_EXTRA_NO_KEY_CACHE:
  default:
    break;
  }
  {
    char tmp[1];
    tmp[0]=function;
    myisam_log_command(MI_LOG_EXTRA,info,(byte*) tmp,1,error);
  }
  DBUG_RETURN(error);
} /* mi_extra */