int mi_panic(enum ha_panic_function flag) { int error=0; LIST *list_element,*next_open; MI_INFO *info; DBUG_ENTER("mi_panic"); mysql_mutex_lock(&THR_LOCK_myisam); for (list_element=myisam_open_list ; list_element ; list_element=next_open) { next_open=list_element->next; /* Save if close */ info=(MI_INFO*) list_element->data; switch (flag) { case HA_PANIC_CLOSE: mysql_mutex_unlock(&THR_LOCK_myisam); /* Not exactly right... */ if (mi_close(info)) error=my_errno(); mysql_mutex_lock(&THR_LOCK_myisam); break; case HA_PANIC_WRITE: /* Do this to free databases */ if (flush_key_blocks(info->s->key_cache, keycache_thread_var(), info->s->kfile, FLUSH_RELEASE)) error=my_errno(); if (info->opt_flag & WRITE_CACHE_USED) if (flush_io_cache(&info->rec_cache)) error=my_errno(); if (info->opt_flag & READ_CACHE_USED) { if (flush_io_cache(&info->rec_cache)) error=my_errno(); reinit_io_cache(&info->rec_cache,READ_CACHE,0, (pbool) (info->lock_type != F_UNLCK),1); } if (info->lock_type != F_UNLCK && ! info->was_locked) { info->was_locked=info->lock_type; if (mi_lock_database(info,F_UNLCK)) error=my_errno(); } case HA_PANIC_READ: /* Restore to before WRITE */ if (info->was_locked) { if (mi_lock_database(info, info->was_locked)) error=my_errno(); info->was_locked=0; } break; } } if (flag == HA_PANIC_CLOSE) { (void) mi_log(0); /* Close log if neaded */ ft_free_stopwords(); } mysql_mutex_unlock(&THR_LOCK_myisam); if (!error) DBUG_RETURN(0); set_my_errno(error); DBUG_RETURN(error); } /* mi_panic */
void my_b_seek(IO_CACHE *info,my_off_t pos) { my_off_t offset; DBUG_ENTER("my_b_seek"); DBUG_PRINT("enter",("pos: %lu", (ulong) pos)); /* TODO: Verify that it is OK to do seek in the non-append area in SEQ_READ_APPEND cache a) see if this always works b) see if there is a better way to make it work */ if (info->type == SEQ_READ_APPEND) VOID(flush_io_cache(info)); offset=(pos - info->pos_in_file); if (info->type == READ_CACHE || info->type == SEQ_READ_APPEND) { /* TODO: explain why this works if pos < info->pos_in_file */ if ((ulonglong) offset < (ulonglong) (info->read_end - info->buffer)) { /* The read is in the current buffer; Reuse it */ info->read_pos = info->buffer + offset; DBUG_VOID_RETURN; } else { /* Force a new read on next my_b_read */ info->read_pos=info->read_end=info->buffer; } } else if (info->type == WRITE_CACHE) { /* If write is in current buffer, reuse it */ if ((ulonglong) offset < (ulonglong) (info->write_end - info->write_buffer)) { info->write_pos = info->write_buffer + offset; DBUG_VOID_RETURN; } VOID(flush_io_cache(info)); /* Correct buffer end so that we write in increments of IO_SIZE */ info->write_end=(info->write_buffer+info->buffer_length- (pos & (IO_SIZE-1))); } info->pos_in_file=pos; info->seek_not_done=1; DBUG_VOID_RETURN; }
int mi_rrnd(MI_INFO *info, byte *buf, register my_off_t filepos) { my_bool skipp_deleted_blocks; DBUG_ENTER("mi_rrnd"); skipp_deleted_blocks=0; if (filepos == HA_OFFSET_ERROR) { skipp_deleted_blocks=1; if (info->lastpos == HA_OFFSET_ERROR) /* First read ? */ filepos= info->s->pack.header_length; /* Read first record */ else filepos= info->nextpos; } info->lastinx= -1; /* Can't forward or backward */ /* Init all but update-flag */ info->update&= (HA_STATE_CHANGED | HA_STATE_ROW_CHANGED); if (info->opt_flag & WRITE_CACHE_USED && flush_io_cache(&info->rec_cache)) DBUG_RETURN(my_errno); DBUG_RETURN ((*info->s->read_rnd)(info,buf,filepos,skipp_deleted_blocks)); }
int _nisam_read_static_record(register N_INFO *info, register ulong pos, register byte *record) { int error; if (pos != NI_POS_ERROR) { if (info->opt_flag & WRITE_CACHE_USED && info->rec_cache.pos_in_file <= pos && flush_io_cache(&info->rec_cache)) return(-1); info->rec_cache.seek_not_done=1; /* We have done a seek */ error=my_pread(info->dfile,(char*) record,info->s->base.reclength, pos,MYF(MY_NABP)) != 0; if (info->s->r_locks == 0 && info->s->w_locks == 0) VOID(_nisam_writeinfo(info,0)); if (! error) { if (!*record) return(1); /* Record is deleted */ info->update|= HA_STATE_AKTIV; /* Record is read */ my_errno=HA_ERR_RECORD_DELETED; return(0); } return(-1); /* Error on read */ } VOID(_nisam_writeinfo(info,0)); /* No such record */ return(-1); } /* _nisam_read_record */
int _mi_read_static_record(register MI_INFO *info, register my_off_t pos, register uchar *record) { int error; if (pos != HA_OFFSET_ERROR) { if (info->opt_flag & WRITE_CACHE_USED && info->rec_cache.pos_in_file <= pos && flush_io_cache(&info->rec_cache)) return(-1); info->rec_cache.seek_not_done=1; /* We have done a seek */ error=info->s->file_read(info, record, info->s->base.reclength, pos,MYF(MY_NABP)) != 0; fast_mi_writeinfo(info); if (! error) { if (!*record) { my_errno=HA_ERR_RECORD_DELETED; return(1); /* Record is deleted */ } info->update|= HA_STATE_AKTIV; /* Record is read */ return(0); } return(-1); /* Error on read */ } fast_mi_writeinfo(info); /* No such record */ return(-1); }
int _mi_cmp_static_record(register MI_INFO *info, register const uchar *old) { DBUG_ENTER("_mi_cmp_static_record"); if (info->opt_flag & WRITE_CACHE_USED) { if (flush_io_cache(&info->rec_cache)) { DBUG_RETURN(-1); } info->rec_cache.seek_not_done=1; /* We have done a seek */ } if ((info->opt_flag & READ_CHECK_USED)) { /* If check isn't disabled */ info->rec_cache.seek_not_done=1; /* We have done a seek */ if (info->s->file_read(info, info->rec_buff, info->s->base.reclength, info->lastpos, MYF(MY_NABP))) DBUG_RETURN(-1); if (memcmp(info->rec_buff, old, (uint) info->s->base.reclength)) { DBUG_DUMP("read",old,info->s->base.reclength); DBUG_DUMP("disk",info->rec_buff,info->s->base.reclength); my_errno=HA_ERR_RECORD_CHANGED; /* Record have changed */ DBUG_RETURN(1); } } DBUG_RETURN(0); }
int _nisam_cmp_static_record(register N_INFO *info, register const byte *old) { DBUG_ENTER("_nisam_rectest"); /* We are going to do changes; dont let anybody disturb */ dont_break(); /* Dont allow SIGHUP or SIGINT */ if (info->opt_flag & WRITE_CACHE_USED) { if (flush_io_cache(&info->rec_cache)) { DBUG_RETURN(-1); } info->rec_cache.seek_not_done=1; /* We have done a seek */ } if ((info->opt_flag & READ_CHECK_USED)) { /* If check isn't disabled */ info->rec_cache.seek_not_done=1; /* We have done a seek */ VOID(my_seek(info->dfile,info->lastpos,MY_SEEK_SET,MYF(0))); if (my_read(info->dfile, (char*) info->rec_buff, info->s->base.reclength, MYF(MY_NABP))) DBUG_RETURN(-1); if (memcmp((byte*) info->rec_buff, (byte*) old, (uint) info->s->base.reclength)) { DBUG_DUMP("read",old,info->s->base.reclength); DBUG_DUMP("disk",info->rec_buff,info->s->base.reclength); my_errno=HA_ERR_RECORD_CHANGED; /* Record have changed */ DBUG_RETURN(1); } } DBUG_RETURN(0); }
int mi_scan_init(MI_INFO *info) { DBUG_ENTER("mi_scan_init"); info->nextpos=info->s->pack.header_length; /* Read first record */ info->lastinx= -1; /* Can't forward or backward */ if (info->opt_flag & WRITE_CACHE_USED && flush_io_cache(&info->rec_cache)) DBUG_RETURN(my_errno()); DBUG_RETURN(0); }
int _ma_flush_table_files(MARIA_HA *info, uint flush_data_or_index, enum flush_type flush_type_for_data, enum flush_type flush_type_for_index) { int error= 0; MARIA_SHARE *share= info->s; DBUG_ENTER("_ma_flush_table_files"); /* flush data file first because it's more critical */ if (flush_data_or_index & MARIA_FLUSH_DATA) { if ((info->opt_flag & WRITE_CACHE_USED) && flush_type_for_data != FLUSH_IGNORE_CHANGED && flush_io_cache(&info->rec_cache)) error= 1; if (share->data_file_type == BLOCK_RECORD) { if (flush_type_for_data != FLUSH_IGNORE_CHANGED) { if (_ma_bitmap_flush(share)) error= 1; } else { mysql_mutex_lock(&share->bitmap.bitmap_lock); share->bitmap.changed= 0; share->bitmap.changed_not_flushed= 0; mysql_mutex_unlock(&share->bitmap.bitmap_lock); } if (flush_pagecache_blocks(share->pagecache, &info->dfile, flush_type_for_data)) error= 1; } } if ((flush_data_or_index & MARIA_FLUSH_INDEX) && flush_pagecache_blocks(share->pagecache, &share->kfile, flush_type_for_index)) error= 1; if (!error) DBUG_RETURN(0); _ma_set_fatal_error(info->s, HA_ERR_CRASHED); DBUG_RETURN(1); }
int _nisam_check_index(N_INFO *info, int inx) { if (inx == -1) /* Use last index */ inx=info->lastinx; if (inx >= (int) info->s->state.keys || inx < 0) { my_errno=HA_ERR_WRONG_INDEX; return -1; } if (info->lastinx != inx) /* Index changed */ { info->lastinx = inx; info->lastpos = NI_POS_ERROR; info->update= ((info->update & (HA_STATE_CHANGED | HA_STATE_ROW_CHANGED)) | HA_STATE_NEXT_FOUND | HA_STATE_PREV_FOUND); } if (info->opt_flag & WRITE_CACHE_USED && flush_io_cache(&info->rec_cache)) return(-1); return(inx); } /* ni_check_index */
int mi_extra(MI_INFO * info, enum ha_extra_function function) { int error = 0; MYISAM_SHARE *share = info->s; DBUG_ENTER("mi_extra"); switch (function) { case HA_EXTRA_RESET: /* * Free buffers and reset the following flags: EXTRA_CACHE, * EXTRA_WRITE_CACHE, EXTRA_KEYREAD, EXTRA_QUICK */ if (info->opt_flag & (READ_CACHE_USED | WRITE_CACHE_USED)) { info->opt_flag &= ~(READ_CACHE_USED | WRITE_CACHE_USED); error = end_io_cache(&info->rec_cache); } #if defined(HAVE_MMAP) && defined(HAVE_MADVICE) if (info->opt_flag & MEMMAP_USED) madvise(share->file_map, share->state.state.data_file_length, MADV_RANDOM); #endif info->opt_flag &= ~(KEY_READ_USED | REMEMBER_OLD_POS); info->quick_mode = 0; /* Fall through */ case HA_EXTRA_RESET_STATE: /* Reset state (don't free buffers) */ info->lastinx = 0; /* Use first index as def */ info->last_search_keypage = info->lastpos = HA_OFFSET_ERROR; info->page_changed = 1; /* Next/prev gives first/last */ if (info->opt_flag & READ_CACHE_USED) { reinit_io_cache(&info->rec_cache, READ_CACHE, 0, (pbool) (info->lock_type != F_UNLCK), (pbool) test(info->update & HA_STATE_ROW_CHANGED) ); } info->update = ((info->update & HA_STATE_CHANGED) | HA_STATE_NEXT_FOUND | HA_STATE_PREV_FOUND); break; case HA_EXTRA_CACHE: if (info->lock_type == F_UNLCK && (share->options & HA_OPTION_PACK_RECORD)) { error = 1; /* Not possibly if not locked */ my_errno = EACCES; break; } #if defined(HAVE_MMAP) && defined(HAVE_MADVICE) if ((share->options & HA_OPTION_COMPRESS_RECORD)) { pthread_mutex_lock(&share->intern_lock); if (_mi_memmap_file(info)) { /* * We don't nead MADV_SEQUENTIAL if small * file */ madvise(share->file_map, share->state.state.data_file_length, share->state.state.data_file_length <= RECORD_CACHE_SIZE * 16 ? MADV_RANDOM : MADV_SEQUENTIAL); pthread_mutex_unlock(&share->intern_lock); break; } pthread_mutex_unlock(&share->intern_lock); } #endif if (info->opt_flag & WRITE_CACHE_USED) { info->opt_flag &= ~WRITE_CACHE_USED; if ((error = end_io_cache(&info->rec_cache))) break; } if (!(info->opt_flag & (READ_CACHE_USED | WRITE_CACHE_USED | MEMMAP_USED))) { if (!(init_io_cache(&info->rec_cache, info->dfile, (uint) min(info->state->data_file_length + 1, my_default_record_cache_size), READ_CACHE, 0L, (pbool) (info->lock_type != F_UNLCK), MYF(share->write_flag & MY_WAIT_IF_FULL)))) { info->opt_flag |= READ_CACHE_USED; info->update &= ~HA_STATE_ROW_CHANGED; } if (share->concurrent_insert) info->rec_cache.end_of_file = info->state->data_file_length; } break; case HA_EXTRA_REINIT_CACHE: if (info->opt_flag & READ_CACHE_USED) { reinit_io_cache(&info->rec_cache, READ_CACHE, info->nextpos, (pbool) (info->lock_type != F_UNLCK), (pbool) test(info->update & HA_STATE_ROW_CHANGED)); info->update &= ~HA_STATE_ROW_CHANGED; if (share->concurrent_insert) info->rec_cache.end_of_file = info->state->data_file_length; } break; case HA_EXTRA_WRITE_CACHE: if (info->lock_type == F_UNLCK) { error = 1; /* Not possibly if not locked */ break; } if (!(info->opt_flag & (READ_CACHE_USED | WRITE_CACHE_USED | OPT_NO_ROWS)) && !share->state.header.uniques) if (!(init_io_cache(&info->rec_cache, info->dfile, 0, WRITE_CACHE, info->state->data_file_length, (pbool) (info->lock_type != F_UNLCK), MYF(share->write_flag & MY_WAIT_IF_FULL)))) { info->opt_flag |= WRITE_CACHE_USED; info->update &= ~(HA_STATE_ROW_CHANGED | HA_STATE_WRITE_AT_END | HA_STATE_EXTEND_BLOCK); } break; case HA_EXTRA_NO_CACHE: if (info->opt_flag & (READ_CACHE_USED | WRITE_CACHE_USED)) { info->opt_flag &= ~(READ_CACHE_USED | WRITE_CACHE_USED); error = end_io_cache(&info->rec_cache); /* Sergei will insert full text index caching here */ } #if defined(HAVE_MMAP) && defined(HAVE_MADVICE) if (info->opt_flag & MEMMAP_USED) madvise(share->file_map, share->state.state.data_file_length, MADV_RANDOM); #endif break; case HA_EXTRA_FLUSH_CACHE: if (info->opt_flag & WRITE_CACHE_USED) { if ((error = flush_io_cache(&info->rec_cache))) mi_mark_crashed(info); /* Fatal error found */ } break; case HA_EXTRA_NO_READCHECK: info->opt_flag &= ~READ_CHECK_USED; /* No readcheck */ break; case HA_EXTRA_READCHECK: info->opt_flag |= READ_CHECK_USED; break; case HA_EXTRA_KEYREAD: /* Read only keys to record */ case HA_EXTRA_REMEMBER_POS: info->opt_flag |= REMEMBER_OLD_POS; bmove((byte *) info->lastkey + share->base.max_key_length * 2, (byte *) info->lastkey, info->lastkey_length); info->save_update = info->update; info->save_lastinx = info->lastinx; info->save_lastpos = info->lastpos; info->save_lastkey_length = info->lastkey_length; if (function == HA_EXTRA_REMEMBER_POS) break; /* fall through */ case HA_EXTRA_KEYREAD_CHANGE_POS: info->opt_flag |= KEY_READ_USED; info->read_record = _mi_read_key_record; break; case HA_EXTRA_NO_KEYREAD: case HA_EXTRA_RESTORE_POS: if (info->opt_flag & REMEMBER_OLD_POS) { bmove((byte *) info->lastkey, (byte *) info->lastkey + share->base.max_key_length * 2, info->save_lastkey_length); info->update = info->save_update | HA_STATE_WRITTEN; info->lastinx = info->save_lastinx; info->lastpos = info->save_lastpos; info->lastkey_length = info->save_lastkey_length; } info->read_record = share->read_record; info->opt_flag &= ~(KEY_READ_USED | REMEMBER_OLD_POS); break; case HA_EXTRA_NO_USER_CHANGE: /* Database is somehow locked agains * changes */ info->lock_type = F_EXTRA_LCK; /* Simulate as locked */ break; case HA_EXTRA_WAIT_LOCK: info->lock_wait = 0; break; case HA_EXTRA_NO_WAIT_LOCK: info->lock_wait = MY_DONT_WAIT; break; case HA_EXTRA_NO_KEYS: if (info->lock_type == F_UNLCK) { error = 1; /* Not possibly if not lock */ break; } if (share->state.key_map) { share->state.key_map = 0; info->state->key_file_length = share->state.state.key_file_length = share->base.keystart; if (!share->changed) { share->state.changed |= STATE_CHANGED | STATE_NOT_ANALYZED; share->changed = 1; /* Update on close */ if (!share->global_changed) { share->global_changed = 1; share->state.open_count++; } } share->state.state = *info->state; error = mi_state_info_write(share->kfile, &share->state, 1 | 2); } break; case HA_EXTRA_FORCE_REOPEN: pthread_mutex_lock(&THR_LOCK_myisam); share->last_version = 0L; /* Impossible version */ #ifdef __WIN__ /* * Close the isam and data files as Win32 can't drop an open * table */ pthread_mutex_lock(&share->intern_lock); if (flush_key_blocks(share->kfile, FLUSH_RELEASE)) { error = my_errno; share->changed = 1; mi_mark_crashed(info); /* Fatal error found */ } if (info->opt_flag & (READ_CACHE_USED | WRITE_CACHE_USED)) { info->opt_flag &= ~(READ_CACHE_USED | WRITE_CACHE_USED); error = end_io_cache(&info->rec_cache); } if (info->lock_type != F_UNLCK && !info->was_locked) { info->was_locked = info->lock_type; if (mi_lock_database(info, F_UNLCK)) error = my_errno; info->lock_type = F_UNLCK; } if (share->kfile >= 0) _mi_decrement_open_count(info); if (share->kfile >= 0 && my_close(share->kfile, MYF(0))) error = my_errno; { LIST *list_element; for (list_element = myisam_open_list; list_element; list_element = list_element->next) { MI_INFO *tmpinfo = (MI_INFO *) list_element->data; if (tmpinfo->s == info->s) { if (tmpinfo->dfile >= 0 && my_close(tmpinfo->dfile, MYF(0))) error = my_errno; tmpinfo->dfile = -1; } } } share->kfile = -1; /* Files aren't open anymore */ pthread_mutex_unlock(&share->intern_lock); #endif pthread_mutex_unlock(&THR_LOCK_myisam); break; case HA_EXTRA_FLUSH: if (!share->temporary) flush_key_blocks(share->kfile, FLUSH_KEEP); #ifdef HAVE_PWRITE _mi_decrement_open_count(info); #endif if (share->not_flushed) { share->not_flushed = 0; #if defined(__WIN__) if (_commit(share->kfile)) error = errno; if (_commit(info->dfile)) error = errno; #elif defined(HAVE_FDATASYNC) if (fdatasync(share->kfile)) error = errno; if (fdatasync(share->dfile)) error = errno; #elif defined(HAVE_FSYNC) if (fsync(share->kfile)) error = errno; if (fsync(share->dfile)) error = errno; #endif if (error) { share->changed = 1; mi_mark_crashed(info); /* Fatal error found */ } } if (share->base.blobs) { my_free(info->rec_alloc, MYF(MY_ALLOW_ZERO_PTR)); info->rec_alloc = info->rec_buff = 0; mi_fix_rec_buff_for_blob(info, info->s->base.pack_reclength); } break; case HA_EXTRA_NORMAL: /* Theese isn't in use */ info->quick_mode = 0; break; case HA_EXTRA_QUICK: info->quick_mode = 1; break; case HA_EXTRA_NO_ROWS: if (!share->state.header.uniques) info->opt_flag |= OPT_NO_ROWS; break; case HA_EXTRA_KEY_CACHE: case HA_EXTRA_NO_KEY_CACHE: default: break; } { char tmp [1]; tmp[0] = function; myisam_log_command(MI_LOG_EXTRA, info, (byte *) tmp, 1, error); } DBUG_RETURN(error); } /* mi_extra */
int maria_extra(MARIA_HA *info, enum ha_extra_function function, void *extra_arg) { int error= 0; ulong cache_size; MARIA_SHARE *share= info->s; my_bool block_records= share->data_file_type == BLOCK_RECORD; DBUG_ENTER("maria_extra"); DBUG_PRINT("enter",("function: %d",(int) function)); switch (function) { case HA_EXTRA_RESET_STATE: /* Reset state (don't free buffers) */ info->lastinx= ~0; /* Detect index changes */ info->last_search_keypage= info->cur_row.lastpos= HA_OFFSET_ERROR; info->page_changed= 1; /* Next/prev gives first/last */ if (info->opt_flag & READ_CACHE_USED) { reinit_io_cache(&info->rec_cache,READ_CACHE,0, (pbool) (info->lock_type != F_UNLCK), (pbool) MY_TEST(info->update & HA_STATE_ROW_CHANGED) ); } info->update= ((info->update & HA_STATE_CHANGED) | HA_STATE_NEXT_FOUND | HA_STATE_PREV_FOUND); break; case HA_EXTRA_CACHE: if (block_records) break; /* Not supported */ if (info->lock_type == F_UNLCK && (share->options & HA_OPTION_PACK_RECORD)) { error= 1; /* Not possibly if not locked */ my_errno= EACCES; break; } if (info->s->file_map) /* Don't use cache if mmap */ break; #if defined(HAVE_MMAP) && defined(HAVE_MADVISE) if ((share->options & HA_OPTION_COMPRESS_RECORD)) { mysql_mutex_lock(&share->intern_lock); if (_ma_memmap_file(info)) { /* We don't nead MADV_SEQUENTIAL if small file */ madvise((char*) share->file_map, share->state.state.data_file_length, share->state.state.data_file_length <= RECORD_CACHE_SIZE*16 ? MADV_RANDOM : MADV_SEQUENTIAL); mysql_mutex_unlock(&share->intern_lock); break; } mysql_mutex_unlock(&share->intern_lock); } #endif if (info->opt_flag & WRITE_CACHE_USED) { info->opt_flag&= ~WRITE_CACHE_USED; if ((error= end_io_cache(&info->rec_cache))) break; } if (!(info->opt_flag & (READ_CACHE_USED | WRITE_CACHE_USED | MEMMAP_USED))) { cache_size= (extra_arg ? *(ulong*) extra_arg : my_default_record_cache_size); if (!(init_io_cache(&info->rec_cache, info->dfile.file, (uint) MY_MIN(share->state.state.data_file_length+1, cache_size), READ_CACHE,0L,(pbool) (info->lock_type != F_UNLCK), MYF(share->write_flag & MY_WAIT_IF_FULL)))) { info->opt_flag|= READ_CACHE_USED; info->update&= ~HA_STATE_ROW_CHANGED; } if (share->non_transactional_concurrent_insert) info->rec_cache.end_of_file= info->state->data_file_length; } break; case HA_EXTRA_REINIT_CACHE: if (info->opt_flag & READ_CACHE_USED) { reinit_io_cache(&info->rec_cache, READ_CACHE, info->cur_row.nextpos, (pbool) (info->lock_type != F_UNLCK), (pbool) MY_TEST(info->update & HA_STATE_ROW_CHANGED)); info->update&= ~HA_STATE_ROW_CHANGED; if (share->non_transactional_concurrent_insert) info->rec_cache.end_of_file= info->state->data_file_length; } break; case HA_EXTRA_WRITE_CACHE: if (info->lock_type == F_UNLCK) { error= 1; /* Not possibly if not locked */ break; } if (block_records) break; /* Not supported */ cache_size= (extra_arg ? *(ulong*) extra_arg : my_default_record_cache_size); if (!(info->opt_flag & (READ_CACHE_USED | WRITE_CACHE_USED | OPT_NO_ROWS)) && !share->state.header.uniques) if (!(init_io_cache(&info->rec_cache, info->dfile.file, cache_size, WRITE_CACHE, info->state->data_file_length, (pbool) (info->lock_type != F_UNLCK), MYF(share->write_flag & MY_WAIT_IF_FULL)))) { info->opt_flag|= WRITE_CACHE_USED; info->update&= ~(HA_STATE_ROW_CHANGED | HA_STATE_WRITE_AT_END | HA_STATE_EXTEND_BLOCK); } break; case HA_EXTRA_PREPARE_FOR_UPDATE: if (info->s->data_file_type != DYNAMIC_RECORD) break; /* Remove read/write cache if dynamic rows */ case HA_EXTRA_NO_CACHE: if (info->opt_flag & (READ_CACHE_USED | WRITE_CACHE_USED)) { info->opt_flag&= ~(READ_CACHE_USED | WRITE_CACHE_USED); error= end_io_cache(&info->rec_cache); /* Sergei will insert full text index caching here */ } #if defined(HAVE_MMAP) && defined(HAVE_MADVISE) if (info->opt_flag & MEMMAP_USED) madvise((char*) share->file_map, share->state.state.data_file_length, MADV_RANDOM); #endif break; case HA_EXTRA_FLUSH_CACHE: if (info->opt_flag & WRITE_CACHE_USED) { if ((error= flush_io_cache(&info->rec_cache))) { /* Fatal error found */ _ma_set_fatal_error(share, HA_ERR_CRASHED); } } break; case HA_EXTRA_NO_READCHECK: info->opt_flag&= ~READ_CHECK_USED; /* No readcheck */ break; case HA_EXTRA_READCHECK: info->opt_flag|= READ_CHECK_USED; break; case HA_EXTRA_KEYREAD: /* Read only keys to record */ case HA_EXTRA_REMEMBER_POS: info->opt_flag|= REMEMBER_OLD_POS; bmove(info->last_key.data + share->base.max_key_length*2, info->last_key.data, info->last_key.data_length + info->last_key.ref_length); info->save_update= info->update; info->save_lastinx= info->lastinx; info->save_lastpos= info->cur_row.lastpos; info->save_lastkey_data_length= info->last_key.data_length; info->save_lastkey_ref_length= info->last_key.ref_length; if (function == HA_EXTRA_REMEMBER_POS) break; /* fall through */ case HA_EXTRA_KEYREAD_CHANGE_POS: info->opt_flag|= KEY_READ_USED; info->read_record= _ma_read_key_record; break; case HA_EXTRA_NO_KEYREAD: case HA_EXTRA_RESTORE_POS: if (info->opt_flag & REMEMBER_OLD_POS) { bmove(info->last_key.data, info->last_key.data + share->base.max_key_length*2, info->save_lastkey_data_length + info->save_lastkey_ref_length); info->update= info->save_update | HA_STATE_WRITTEN; info->lastinx= info->save_lastinx; info->cur_row.lastpos= info->save_lastpos; info->last_key.data_length= info->save_lastkey_data_length; info->last_key.ref_length= info->save_lastkey_ref_length; info->last_key.flag= 0; } info->read_record= share->read_record; info->opt_flag&= ~(KEY_READ_USED | REMEMBER_OLD_POS); break; case HA_EXTRA_NO_USER_CHANGE: /* Database is somehow locked agains changes */ info->lock_type= F_EXTRA_LCK; /* Simulate as locked */ break; case HA_EXTRA_WAIT_LOCK: info->lock_wait= 0; break; case HA_EXTRA_NO_WAIT_LOCK: info->lock_wait= MY_SHORT_WAIT; break; case HA_EXTRA_NO_KEYS: /* we're going to modify pieces of the state, stall Checkpoint */ mysql_mutex_lock(&share->intern_lock); if (info->lock_type == F_UNLCK) { mysql_mutex_unlock(&share->intern_lock); error= 1; /* Not possibly if not lock */ break; } if (maria_is_any_key_active(share->state.key_map)) { MARIA_KEYDEF *key= share->keyinfo; uint i; for (i =0 ; i < share->base.keys ; i++,key++) { if (!(key->flag & HA_NOSAME) && info->s->base.auto_key != i+1) { maria_clear_key_active(share->state.key_map, i); info->update|= HA_STATE_CHANGED; } } if (!share->changed) { share->changed= 1; /* Update on close */ share->state.changed|= STATE_CHANGED | STATE_NOT_ANALYZED; if (!share->global_changed) { share->global_changed= 1; share->state.open_count++; } } if (!share->now_transactional) share->state.state= *info->state; /* That state write to disk must be done, even for transactional tables; indeed the table's share is going to be lost (there was a HA_EXTRA_FORCE_REOPEN before, which set share->last_version to 0), and so the only way it leaves information (share->state.key_map) for the posterity is by writing it to disk. */ DBUG_ASSERT(!maria_in_recovery); error= _ma_state_info_write(share, MA_STATE_INFO_WRITE_DONT_MOVE_OFFSET | MA_STATE_INFO_WRITE_FULL_INFO); } mysql_mutex_unlock(&share->intern_lock); break; case HA_EXTRA_FORCE_REOPEN: /* MySQL uses this case after it has closed all other instances of this table. We however do a flush here for additional safety. */ /** @todo consider porting these flush-es to MyISAM */ DBUG_ASSERT(share->reopen == 1); error= _ma_flush_table_files(info, MARIA_FLUSH_DATA | MARIA_FLUSH_INDEX, FLUSH_FORCE_WRITE, FLUSH_FORCE_WRITE); if (!error && share->changed) { mysql_mutex_lock(&share->intern_lock); error= _ma_state_info_write(share, MA_STATE_INFO_WRITE_DONT_MOVE_OFFSET| MA_STATE_INFO_WRITE_FULL_INFO); mysql_mutex_unlock(&share->intern_lock); } mysql_mutex_lock(&THR_LOCK_maria); mysql_mutex_lock(&share->intern_lock); /* protect against Checkpoint */ /* Safety against assert in checkpoint */ share->bitmap.changed_not_flushed= 0; /* this makes the share not be re-used next time the table is opened */ share->last_version= 0L; /* Impossible version */ mysql_mutex_unlock(&share->intern_lock); mysql_mutex_unlock(&THR_LOCK_maria); break; case HA_EXTRA_PREPARE_FOR_DROP: /* Signals about intent to delete this table */ share->deleting= TRUE; share->global_changed= FALSE; /* force writing changed flag */ /* To force repair if reopened */ share->state.open_count= 1; share->changed= 1; _ma_mark_file_changed_now(share); /* Fall trough */ case HA_EXTRA_PREPARE_FOR_RENAME: { my_bool do_flush= MY_TEST(function != HA_EXTRA_PREPARE_FOR_DROP); my_bool save_global_changed; enum flush_type type; /* This share, to have last_version=0, needs to save all its data/index blocks to disk if this is not for a DROP TABLE. Otherwise they would be invisible to future openers; and they could even go to disk late and cancel the work of future openers. */ if (info->lock_type != F_UNLCK && !info->was_locked) { info->was_locked= info->lock_type; if (maria_lock_database(info, F_UNLCK)) error= my_errno; info->lock_type= F_UNLCK; } /* We don't need to call _mi_decrement_open_count() if we are dropping the table, as the files will be removed anyway. If we are aborted before the files is removed, it's better to not call it as in that case the automatic repair on open will add the missing index entries */ mysql_mutex_lock(&share->intern_lock); if (share->kfile.file >= 0 && function != HA_EXTRA_PREPARE_FOR_DROP) _ma_decrement_open_count(info, 0); if (info->trn) { _ma_remove_table_from_trnman(share, info->trn); /* Ensure we don't point to the deleted data in trn */ info->state= info->state_start= &share->state.state; } /* Remove history for table */ _ma_reset_state(info); type= do_flush ? FLUSH_RELEASE : FLUSH_IGNORE_CHANGED; save_global_changed= share->global_changed; share->global_changed= 1; /* Don't increment open count */ mysql_mutex_unlock(&share->intern_lock); if (_ma_flush_table_files(info, MARIA_FLUSH_DATA | MARIA_FLUSH_INDEX, type, type)) { error=my_errno; share->changed= 1; } mysql_mutex_lock(&share->intern_lock); share->global_changed= save_global_changed; if (info->opt_flag & (READ_CACHE_USED | WRITE_CACHE_USED)) { info->opt_flag&= ~(READ_CACHE_USED | WRITE_CACHE_USED); if (end_io_cache(&info->rec_cache)) error= 1; } if (share->kfile.file >= 0) { if (do_flush) { /* Save the state so that others can find it from disk. */ if ((share->changed && _ma_state_info_write(share, MA_STATE_INFO_WRITE_DONT_MOVE_OFFSET | MA_STATE_INFO_WRITE_FULL_INFO)) || mysql_file_sync(share->kfile.file, MYF(0))) error= my_errno; } else { /* be sure that state is not tried for write as file may be closed */ share->changed= 0; share->global_changed= 0; share->state.open_count= 0; } } if (share->data_file_type == BLOCK_RECORD && share->bitmap.file.file >= 0) { DBUG_ASSERT(share->bitmap.non_flushable == 0 && share->bitmap.changed == 0); if (do_flush && my_sync(share->bitmap.file.file, MYF(0))) error= my_errno; share->bitmap.changed_not_flushed= 0; } /* last_version must be protected by intern_lock; See collect_tables() */ share->last_version= 0L; /* Impossible version */ mysql_mutex_unlock(&share->intern_lock); break; } case HA_EXTRA_PREPARE_FOR_FORCED_CLOSE: if (info->trn) { mysql_mutex_lock(&share->intern_lock); _ma_remove_table_from_trnman(share, info->trn); /* Ensure we don't point to the deleted data in trn */ info->state= info->state_start= &share->state.state; mysql_mutex_unlock(&share->intern_lock); } break; case HA_EXTRA_FLUSH: if (!share->temporary) error= _ma_flush_table_files(info, MARIA_FLUSH_DATA | MARIA_FLUSH_INDEX, FLUSH_KEEP, FLUSH_KEEP); _ma_decrement_open_count(info, 1); if (share->not_flushed) { share->not_flushed= 0; if (_ma_sync_table_files(info)) error= my_errno; if (error) { /* Fatal error found */ share->changed= 1; _ma_set_fatal_error(share, HA_ERR_CRASHED); } } break; case HA_EXTRA_NORMAL: /* Theese isn't in use */ info->quick_mode= 0; break; case HA_EXTRA_QUICK: info->quick_mode= 1; break; case HA_EXTRA_NO_ROWS: if (!share->state.header.uniques) info->opt_flag|= OPT_NO_ROWS; break; case HA_EXTRA_PRELOAD_BUFFER_SIZE: info->preload_buff_size= *((ulong *) extra_arg); break; case HA_EXTRA_CHANGE_KEY_TO_UNIQUE: case HA_EXTRA_CHANGE_KEY_TO_DUP: maria_extra_keyflag(info, function); break; case HA_EXTRA_MMAP: #ifdef HAVE_MMAP if (block_records) break; /* Not supported */ mysql_mutex_lock(&share->intern_lock); /* Memory map the data file if it is not already mapped. It is safe to memory map a file while other threads are using file I/O on it. Assigning a new address to a function pointer is an atomic operation. intern_lock prevents that two or more mappings are done at the same time. */ if (!share->file_map) { if (_ma_dynmap_file(info, share->state.state.data_file_length)) { DBUG_PRINT("warning",("mmap failed: errno: %d",errno)); error= my_errno= errno; } else { share->file_read= _ma_mmap_pread; share->file_write= _ma_mmap_pwrite; } } mysql_mutex_unlock(&share->intern_lock); #endif break; case HA_EXTRA_MARK_AS_LOG_TABLE: mysql_mutex_lock(&share->intern_lock); share->is_log_table= TRUE; mysql_mutex_unlock(&share->intern_lock); break; case HA_EXTRA_KEY_CACHE: case HA_EXTRA_NO_KEY_CACHE: default: break; } DBUG_RETURN(error); } /* maria_extra */
int nisam_panic(enum ha_panic_function flag) { int error=0; LIST *list_element,*next_open; N_INFO *info; DBUG_ENTER("nisam_panic"); pthread_mutex_lock(&THR_LOCK_isam); for (list_element=nisam_open_list ; list_element ; list_element=next_open) { next_open=list_element->next; /* Save if close */ info=(N_INFO*) list_element->data; switch (flag) { case HA_PANIC_CLOSE: pthread_mutex_unlock(&THR_LOCK_isam); /* Not exactly right... */ if (nisam_close(info)) error=my_errno; pthread_mutex_lock(&THR_LOCK_isam); break; case HA_PANIC_WRITE: /* Do this to free databases */ #ifdef CANT_OPEN_FILES_TWICE if (info->s->base.options & HA_OPTION_READ_ONLY_DATA) break; #endif if (flush_key_blocks(info->s->kfile,FLUSH_RELEASE)) error=my_errno; if (info->opt_flag & WRITE_CACHE_USED) if (flush_io_cache(&info->rec_cache)) error=my_errno; if (info->opt_flag & READ_CACHE_USED) { if (flush_io_cache(&info->rec_cache)) error=my_errno; reinit_io_cache(&info->rec_cache,READ_CACHE,0, (pbool) (info->lock_type != F_UNLCK),1); } #ifndef NO_LOCKING if (info->lock_type != F_UNLCK && ! info->was_locked) { info->was_locked=info->lock_type; if (nisam_lock_database(info,F_UNLCK)) error=my_errno; } #else { int save_status=info->s->w_locks; /* Only w_locks! */ info->s->w_locks=0; if (_nisam_writeinfo(info, test(info->update & HA_STATE_CHANGED))) error=my_errno; info->s->w_locks=save_status; info->update&= ~HA_STATE_CHANGED; /* Not changed */ } #endif /* NO_LOCKING */ #ifdef CANT_OPEN_FILES_TWICE if (info->s->kfile >= 0 && my_close(info->s->kfile,MYF(0))) error = my_errno; if (info->dfile >= 0 && my_close(info->dfile,MYF(0))) error = my_errno; info->s->kfile=info->dfile= -1; /* Files aren't open anymore */ break; #endif case HA_PANIC_READ: /* Restore to before WRITE */ #ifdef CANT_OPEN_FILES_TWICE { /* Open closed files */ char name_buff[FN_REFLEN]; if (info->s->kfile < 0) if ((info->s->kfile= my_open(fn_format(name_buff,info->filename,"", N_NAME_IEXT,4),info->mode, MYF(MY_WME))) < 0) error = my_errno; if (info->dfile < 0) { if ((info->dfile= my_open(fn_format(name_buff,info->filename,"", N_NAME_DEXT,4),info->mode, MYF(MY_WME))) < 0) error = my_errno; info->rec_cache.file=info->dfile; } } #endif #ifndef NO_LOCKING if (info->was_locked) { if (nisam_lock_database(info, info->was_locked)) error=my_errno; info->was_locked=0; } #else { int lock_type,w_locks; lock_type=info->lock_type ; w_locks=info->s->w_locks; info->lock_type=0; info->s->w_locks=0; if (_nisam_readinfo(info,0,1)) /* Read changed data */ error=my_errno; info->lock_type=lock_type; info->s->w_locks=w_locks; } /* Don't use buffer when doing next */ info->update|=HA_STATE_WRITTEN; #endif /* NO_LOCKING */ break; } } if (flag == HA_PANIC_CLOSE) VOID(nisam_log(0)); /* Close log if neaded */ pthread_mutex_unlock(&THR_LOCK_isam); if (!error) DBUG_RETURN(0); my_errno=error; DBUG_RETURN(-1); } /* nisam_panic */
int mi_panic(enum ha_panic_function flag) { int error=0; LIST *list_element,*next_open; MI_INFO *info; DBUG_ENTER("mi_panic"); mysql_mutex_lock(&THR_LOCK_myisam); for (list_element=myisam_open_list ; list_element ; list_element=next_open) { next_open=list_element->next; /* Save if close */ info=(MI_INFO*) list_element->data; switch (flag) { case HA_PANIC_CLOSE: mysql_mutex_unlock(&THR_LOCK_myisam); /* Not exactly right... */ if (mi_close(info)) error=my_errno; mysql_mutex_lock(&THR_LOCK_myisam); break; case HA_PANIC_WRITE: /* Do this to free databases */ #ifdef CANT_OPEN_FILES_TWICE if (info->s->options & HA_OPTION_READ_ONLY_DATA) break; #endif if (flush_key_blocks(info->s->key_cache, info->s->kfile, FLUSH_RELEASE)) error=my_errno; if (info->opt_flag & WRITE_CACHE_USED) if (flush_io_cache(&info->rec_cache)) error=my_errno; if (info->opt_flag & READ_CACHE_USED) { if (flush_io_cache(&info->rec_cache)) error=my_errno; reinit_io_cache(&info->rec_cache,READ_CACHE,0, (pbool) (info->lock_type != F_UNLCK),1); } if (info->lock_type != F_UNLCK && ! info->was_locked) { info->was_locked=info->lock_type; if (mi_lock_database(info,F_UNLCK)) error=my_errno; } #ifdef CANT_OPEN_FILES_TWICE if (info->s->kfile >= 0 && mysql_file_close(info->s->kfile, MYF(0))) error = my_errno; if (info->dfile >= 0 && mysql_file_close(info->dfile, MYF(0))) error = my_errno; info->s->kfile=info->dfile= -1; /* Files aren't open anymore */ break; #endif case HA_PANIC_READ: /* Restore to before WRITE */ #ifdef CANT_OPEN_FILES_TWICE { /* Open closed files */ char name_buff[FN_REFLEN]; if (info->s->kfile < 0) if ((info->s->kfile= mysql_file_open(mi_key_file_kfile, fn_format(name_buff, info->filename, "", N_NAME_IEXT, 4), info->mode, MYF(MY_WME))) < 0) error = my_errno; if (info->dfile < 0) { if ((info->dfile= mysql_file_open(mi_key_file_dfile, fn_format(name_buff, info->filename, "", N_NAME_DEXT, 4), info->mode, MYF(MY_WME))) < 0) error = my_errno; info->rec_cache.file=info->dfile; } } #endif if (info->was_locked) { if (mi_lock_database(info, info->was_locked)) error=my_errno; info->was_locked=0; } break; } } if (flag == HA_PANIC_CLOSE) { (void) mi_log(0); /* Close log if neaded */ ft_free_stopwords(); } mysql_mutex_unlock(&THR_LOCK_myisam); if (!error) DBUG_RETURN(0); DBUG_RETURN(my_errno=error); } /* mi_panic */
int _nisam_read_rnd_static_record(N_INFO *info, byte *buf, register ulong filepos, int skipp_deleted_blocks) { int locked,error,cache_read; uint cache_length; ISAM_SHARE *share=info->s; DBUG_ENTER("_nisam_read_rnd_static_record"); cache_read=0; LINT_INIT(cache_length); if (info->opt_flag & WRITE_CACHE_USED && (info->rec_cache.pos_in_file <= filepos || skipp_deleted_blocks) && flush_io_cache(&info->rec_cache)) DBUG_RETURN(-1); if (info->opt_flag & READ_CACHE_USED) { /* Cash in use */ if (filepos == my_b_tell(&info->rec_cache) && (skipp_deleted_blocks || !filepos)) { cache_read=1; /* Read record using cache */ cache_length=(uint) (info->rec_cache.rc_end - info->rec_cache.rc_pos); } else info->rec_cache.seek_not_done=1; /* Filepos is changed */ } #ifndef NO_LOCKING locked=0; if (info->lock_type == F_UNLCK) { if (filepos >= share->state.data_file_length) { /* Test if new records */ if (_nisam_readinfo(info,F_RDLCK,0)) DBUG_RETURN(-1); locked=1; } else { /* We don't nead new info */ #ifndef UNSAFE_LOCKING if ((! cache_read || share->base.reclength > cache_length) && share->r_locks == 0 && share->w_locks == 0) { /* record not in cache */ if (my_lock(share->kfile,F_RDLCK,0L,F_TO_EOF, MYF(MY_SEEK_NOT_DONE) | info->lock_wait)) DBUG_RETURN(-1); locked=1; } #else info->tmp_lock_type=F_RDLCK; #endif } } #endif if (filepos >= share->state.data_file_length) { #ifndef NO_LOCKING DBUG_PRINT("test",("filepos: %ld (%ld) records: %ld del: %ld", filepos/share->base.reclength,filepos, share->state.records, share->state.del)); VOID(_nisam_writeinfo(info,0)); #endif my_errno=HA_ERR_END_OF_FILE; DBUG_RETURN(-1); } info->lastpos= filepos; info->nextpos= filepos+share->base.reclength; if (! cache_read) /* No cacheing */ { error=_nisam_read_static_record(info,filepos,buf); if (error > 0) my_errno=HA_ERR_RECORD_DELETED; DBUG_RETURN(error); } /* Read record with cacheing */ error=my_b_read(&info->rec_cache,(byte*) buf,share->base.reclength); #ifndef NO_LOCKING if (locked) VOID(_nisam_writeinfo(info,0)); /* Unlock keyfile */ #endif if (!error) { if (!buf[0]) { /* Record is removed */ my_errno=HA_ERR_RECORD_DELETED; DBUG_RETURN(1); } /* Found and may be updated */ info->update|= HA_STATE_AKTIV | HA_STATE_KEY_CHANGED; DBUG_RETURN(0); } if (info->rec_cache.error != -1 || my_errno == 0) my_errno=HA_ERR_WRONG_IN_RECORD; DBUG_RETURN(-1); /* Something wrong (EOF?) */ }
int mi_extra(MI_INFO *info, enum ha_extra_function function, void *extra_arg) { int error=0; ulong cache_size; MYISAM_SHARE *share=info->s; DBUG_ENTER("mi_extra"); DBUG_PRINT("enter",("function: %d",(int) function)); switch (function) { case HA_EXTRA_RESET_STATE: /* Reset state (don't free buffers) */ info->lastinx= 0; /* Use first index as def */ info->last_search_keypage=info->lastpos= HA_OFFSET_ERROR; info->page_changed=1; /* Next/prev gives first/last */ if (info->opt_flag & READ_CACHE_USED) { reinit_io_cache(&info->rec_cache,READ_CACHE,0, (pbool) (info->lock_type != F_UNLCK), (pbool) test(info->update & HA_STATE_ROW_CHANGED) ); } info->update= ((info->update & HA_STATE_CHANGED) | HA_STATE_NEXT_FOUND | HA_STATE_PREV_FOUND); break; case HA_EXTRA_CACHE: if (info->lock_type == F_UNLCK && (share->options & HA_OPTION_PACK_RECORD)) { error=1; /* Not possibly if not locked */ my_errno=EACCES; break; } if (info->s->file_map) /* Don't use cache if mmap */ break; #if defined(HAVE_MMAP) && defined(HAVE_MADVISE) if ((share->options & HA_OPTION_COMPRESS_RECORD)) { mysql_mutex_lock(&share->intern_lock); if (_mi_memmap_file(info)) { /* We don't nead MADV_SEQUENTIAL if small file */ madvise((char*) share->file_map, share->state.state.data_file_length, share->state.state.data_file_length <= RECORD_CACHE_SIZE*16 ? MADV_RANDOM : MADV_SEQUENTIAL); mysql_mutex_unlock(&share->intern_lock); break; } mysql_mutex_unlock(&share->intern_lock); } #endif if (info->opt_flag & WRITE_CACHE_USED) { info->opt_flag&= ~WRITE_CACHE_USED; if ((error=end_io_cache(&info->rec_cache))) break; } if (!(info->opt_flag & (READ_CACHE_USED | WRITE_CACHE_USED | MEMMAP_USED))) { cache_size= (extra_arg ? *(ulong*) extra_arg : my_default_record_cache_size); if (!(init_io_cache(&info->rec_cache,info->dfile, (uint) min(info->state->data_file_length+1, cache_size), READ_CACHE,0L,(pbool) (info->lock_type != F_UNLCK), MYF(share->write_flag & MY_WAIT_IF_FULL)))) { info->opt_flag|=READ_CACHE_USED; info->update&= ~HA_STATE_ROW_CHANGED; } if (share->concurrent_insert) info->rec_cache.end_of_file=info->state->data_file_length; } break; case HA_EXTRA_REINIT_CACHE: if (info->opt_flag & READ_CACHE_USED) { reinit_io_cache(&info->rec_cache,READ_CACHE,info->nextpos, (pbool) (info->lock_type != F_UNLCK), (pbool) test(info->update & HA_STATE_ROW_CHANGED)); info->update&= ~HA_STATE_ROW_CHANGED; if (share->concurrent_insert) info->rec_cache.end_of_file=info->state->data_file_length; } break; case HA_EXTRA_WRITE_CACHE: if (info->lock_type == F_UNLCK) { error=1; /* Not possibly if not locked */ break; } cache_size= (extra_arg ? *(ulong*) extra_arg : my_default_record_cache_size); if (!(info->opt_flag & (READ_CACHE_USED | WRITE_CACHE_USED | OPT_NO_ROWS)) && !share->state.header.uniques) if (!(init_io_cache(&info->rec_cache,info->dfile, cache_size, WRITE_CACHE,info->state->data_file_length, (pbool) (info->lock_type != F_UNLCK), MYF(share->write_flag & MY_WAIT_IF_FULL)))) { info->opt_flag|=WRITE_CACHE_USED; info->update&= ~(HA_STATE_ROW_CHANGED | HA_STATE_WRITE_AT_END | HA_STATE_EXTEND_BLOCK); } break; case HA_EXTRA_PREPARE_FOR_UPDATE: if (info->s->data_file_type != DYNAMIC_RECORD) break; /* Remove read/write cache if dynamic rows */ case HA_EXTRA_NO_CACHE: if (info->opt_flag & (READ_CACHE_USED | WRITE_CACHE_USED)) { info->opt_flag&= ~(READ_CACHE_USED | WRITE_CACHE_USED); error=end_io_cache(&info->rec_cache); /* Sergei will insert full text index caching here */ } #if defined(HAVE_MMAP) && defined(HAVE_MADVISE) if (info->opt_flag & MEMMAP_USED) madvise((char*) share->file_map, share->state.state.data_file_length, MADV_RANDOM); #endif break; case HA_EXTRA_FLUSH_CACHE: if (info->opt_flag & WRITE_CACHE_USED) { if ((error=flush_io_cache(&info->rec_cache))) { mi_print_error(info->s, HA_ERR_CRASHED); mi_mark_crashed(info); /* Fatal error found */ } } break; case HA_EXTRA_NO_READCHECK: info->opt_flag&= ~READ_CHECK_USED; /* No readcheck */ break; case HA_EXTRA_READCHECK: info->opt_flag|= READ_CHECK_USED; break; case HA_EXTRA_KEYREAD: /* Read only keys to record */ case HA_EXTRA_REMEMBER_POS: info->opt_flag |= REMEMBER_OLD_POS; bmove((uchar*) info->lastkey+share->base.max_key_length*2, (uchar*) info->lastkey,info->lastkey_length); info->save_update= info->update; info->save_lastinx= info->lastinx; info->save_lastpos= info->lastpos; info->save_lastkey_length=info->lastkey_length; if (function == HA_EXTRA_REMEMBER_POS) break; /* fall through */ case HA_EXTRA_KEYREAD_CHANGE_POS: info->opt_flag |= KEY_READ_USED; info->read_record=_mi_read_key_record; break; case HA_EXTRA_NO_KEYREAD: case HA_EXTRA_RESTORE_POS: if (info->opt_flag & REMEMBER_OLD_POS) { bmove((uchar*) info->lastkey, (uchar*) info->lastkey+share->base.max_key_length*2, info->save_lastkey_length); info->update= info->save_update | HA_STATE_WRITTEN; info->lastinx= info->save_lastinx; info->lastpos= info->save_lastpos; info->lastkey_length=info->save_lastkey_length; } info->read_record= share->read_record; info->opt_flag&= ~(KEY_READ_USED | REMEMBER_OLD_POS); break; case HA_EXTRA_NO_USER_CHANGE: /* Database is somehow locked agains changes */ info->lock_type= F_EXTRA_LCK; /* Simulate as locked */ break; case HA_EXTRA_WAIT_LOCK: info->lock_wait=0; break; case HA_EXTRA_NO_WAIT_LOCK: info->lock_wait=MY_DONT_WAIT; break; case HA_EXTRA_NO_KEYS: if (info->lock_type == F_UNLCK) { error=1; /* Not possibly if not lock */ break; } if (mi_is_any_key_active(share->state.key_map)) { MI_KEYDEF *key=share->keyinfo; uint i; for (i=0 ; i < share->base.keys ; i++,key++) { if (!(key->flag & HA_NOSAME) && info->s->base.auto_key != i+1) { mi_clear_key_active(share->state.key_map, i); info->update|= HA_STATE_CHANGED; } } if (!share->changed) { share->state.changed|= STATE_CHANGED | STATE_NOT_ANALYZED; share->changed=1; /* Update on close */ if (!share->global_changed) { share->global_changed=1; share->state.open_count++; } } share->state.state= *info->state; error=mi_state_info_write(share->kfile,&share->state,1 | 2); } break; case HA_EXTRA_FORCE_REOPEN: mysql_mutex_lock(&THR_LOCK_myisam); share->last_version= 0L; /* Impossible version */ mysql_mutex_unlock(&THR_LOCK_myisam); break; case HA_EXTRA_PREPARE_FOR_DROP: mysql_mutex_lock(&THR_LOCK_myisam); share->last_version= 0L; /* Impossible version */ #ifdef __WIN__REMOVE_OBSOLETE_WORKAROUND /* Close the isam and data files as Win32 can't drop an open table */ mysql_mutex_lock(&share->intern_lock); if (flush_key_blocks(share->key_cache, share->kfile, (function == HA_EXTRA_FORCE_REOPEN ? FLUSH_RELEASE : FLUSH_IGNORE_CHANGED))) { error=my_errno; share->changed=1; mi_print_error(info->s, HA_ERR_CRASHED); mi_mark_crashed(info); /* Fatal error found */ } if (info->opt_flag & (READ_CACHE_USED | WRITE_CACHE_USED)) { info->opt_flag&= ~(READ_CACHE_USED | WRITE_CACHE_USED); error=end_io_cache(&info->rec_cache); } if (info->lock_type != F_UNLCK && ! info->was_locked) { info->was_locked=info->lock_type; if (mi_lock_database(info,F_UNLCK)) error=my_errno; info->lock_type = F_UNLCK; } if (share->kfile >= 0) _mi_decrement_open_count(info); if (share->kfile >= 0 && mysql_file_close(share->kfile, MYF(0))) error=my_errno; { LIST *list_element ; for (list_element=myisam_open_list ; list_element ; list_element=list_element->next) { MI_INFO *tmpinfo=(MI_INFO*) list_element->data; if (tmpinfo->s == info->s) { if (tmpinfo->dfile >= 0 && mysql_file_close(tmpinfo->dfile, MYF(0))) error = my_errno; tmpinfo->dfile= -1; } } } share->kfile= -1; /* Files aren't open anymore */ mysql_mutex_unlock(&share->intern_lock); #endif mysql_mutex_unlock(&THR_LOCK_myisam); break; case HA_EXTRA_FLUSH: if (!share->temporary) flush_key_blocks(share->key_cache, share->kfile, FLUSH_KEEP); #ifdef HAVE_PWRITE _mi_decrement_open_count(info); #endif if (share->not_flushed) { share->not_flushed=0; if (mysql_file_sync(share->kfile, MYF(0))) error= my_errno; if (mysql_file_sync(info->dfile, MYF(0))) error= my_errno; if (error) { share->changed=1; mi_print_error(info->s, HA_ERR_CRASHED); mi_mark_crashed(info); /* Fatal error found */ } } if (share->base.blobs) mi_alloc_rec_buff(info, -1, &info->rec_buff); break; case HA_EXTRA_NORMAL: /* Theese isn't in use */ info->quick_mode=0; break; case HA_EXTRA_QUICK: info->quick_mode=1; break; case HA_EXTRA_NO_ROWS: if (!share->state.header.uniques) info->opt_flag|= OPT_NO_ROWS; break; case HA_EXTRA_PRELOAD_BUFFER_SIZE: info->preload_buff_size= *((ulong *) extra_arg); break; case HA_EXTRA_CHANGE_KEY_TO_UNIQUE: case HA_EXTRA_CHANGE_KEY_TO_DUP: mi_extra_keyflag(info, function); break; case HA_EXTRA_MMAP: #ifdef HAVE_MMAP mysql_mutex_lock(&share->intern_lock); /* Memory map the data file if it is not already mapped. It is safe to memory map a file while other threads are using file I/O on it. Assigning a new address to a function pointer is an atomic operation. intern_lock prevents that two or more mappings are done at the same time. */ if (!share->file_map) { if (mi_dynmap_file(info, share->state.state.data_file_length)) { DBUG_PRINT("warning",("mmap failed: errno: %d",errno)); error= my_errno= errno; } } mysql_mutex_unlock(&share->intern_lock); #endif break; case HA_EXTRA_MARK_AS_LOG_TABLE: mysql_mutex_lock(&share->intern_lock); share->is_log_table= TRUE; mysql_mutex_unlock(&share->intern_lock); break; case HA_EXTRA_KEY_CACHE: case HA_EXTRA_NO_KEY_CACHE: default: break; } { char tmp[1]; tmp[0]=function; myisam_log_command(MI_LOG_EXTRA,info,(uchar*) tmp,1,error); } DBUG_RETURN(error); } /* mi_extra */
int _mi_read_rnd_static_record(MI_INFO *info, uchar *buf, register my_off_t filepos, my_bool skip_deleted_blocks) { int locked,error,cache_read; uint cache_length; MYISAM_SHARE *share=info->s; DBUG_ENTER("_mi_read_rnd_static_record"); cache_read=0; cache_length=0; if (info->opt_flag & WRITE_CACHE_USED && (info->rec_cache.pos_in_file <= filepos || skip_deleted_blocks) && flush_io_cache(&info->rec_cache)) DBUG_RETURN(my_errno); if (info->opt_flag & READ_CACHE_USED) { /* Cache in use */ if (filepos == my_b_tell(&info->rec_cache) && (skip_deleted_blocks || !filepos)) { cache_read=1; /* Read record using cache */ cache_length=(uint) (info->rec_cache.read_end - info->rec_cache.read_pos); } else info->rec_cache.seek_not_done=1; /* Filepos is changed */ } locked=0; if (info->lock_type == F_UNLCK) { if (filepos >= info->state->data_file_length) { /* Test if new records */ if (_mi_readinfo(info,F_RDLCK,0)) DBUG_RETURN(my_errno); locked=1; } else { /* We don't nead new info */ #ifndef UNSAFE_LOCKING if ((! cache_read || share->base.reclength > cache_length) && share->tot_locks == 0) { /* record not in cache */ if (my_lock(share->kfile,F_RDLCK,0L,F_TO_EOF, MYF(MY_SEEK_NOT_DONE) | info->lock_wait)) DBUG_RETURN(my_errno); locked=1; } #else info->tmp_lock_type=F_RDLCK; #endif } } if (filepos >= info->state->data_file_length) { DBUG_PRINT("test",("filepos: %ld (%ld) records: %ld del: %ld", (long) filepos/share->base.reclength, (long) filepos, (long) info->state->records, (long) info->state->del)); fast_mi_writeinfo(info); DBUG_RETURN(my_errno=HA_ERR_END_OF_FILE); } info->lastpos= filepos; info->nextpos= filepos+share->base.pack_reclength; if (! cache_read) /* No cacheing */ { if ((error=_mi_read_static_record(info,filepos,buf))) { if (error > 0) error=my_errno=HA_ERR_RECORD_DELETED; else error=my_errno; } DBUG_RETURN(error); } /* Read record with caching. If my_b_read() returns TRUE, less than the requested bytes have been read. In this case rec_cache.error is either -1 for a read error, or contains the number of bytes copied into the buffer. */ error=my_b_read(&info->rec_cache,(uchar*) buf,share->base.reclength); if (info->s->base.pack_reclength != info->s->base.reclength && !error) { char tmp[8]; /* Skill fill bytes */ error=my_b_read(&info->rec_cache,(uchar*) tmp, info->s->base.pack_reclength - info->s->base.reclength); } if (locked) (void) _mi_writeinfo(info,0); /* Unlock keyfile */ if (!error) { if (!buf[0]) { /* Record is removed */ DBUG_RETURN(my_errno=HA_ERR_RECORD_DELETED); } /* Found and may be updated */ info->update|= HA_STATE_AKTIV | HA_STATE_KEY_CHANGED; DBUG_RETURN(0); } /* error is TRUE. my_errno should be set if rec_cache.error == -1 */ if (info->rec_cache.error != -1 || my_errno == 0) { /* If we could not get a full record, we either have a broken record, or are at end of file. */ if (info->rec_cache.error == 0) my_errno= HA_ERR_END_OF_FILE; else my_errno= HA_ERR_WRONG_IN_RECORD; } DBUG_RETURN(my_errno); /* Something wrong (EOF?) */ }