int mi_delete_all_rows(MI_INFO *info) { uint i; MYISAM_SHARE *share=info->s; MI_STATE_INFO *state=&share->state; DBUG_ENTER("mi_delete_all_rows"); if (share->options & HA_OPTION_READ_ONLY_DATA) { DBUG_RETURN(my_errno=EACCES); } if (_mi_readinfo(info,F_WRLCK,1)) DBUG_RETURN(my_errno); if (_mi_mark_file_changed(info)) goto err; info->state->records=info->state->del=state->split=0; state->dellink = HA_OFFSET_ERROR; state->sortkey= (ushort) ~0; info->state->key_file_length=share->base.keystart; info->state->data_file_length=0; info->state->empty=info->state->key_empty=0; info->state->checksum=0; for (i=share->base.max_key_block_length/MI_MIN_KEY_BLOCK_LENGTH ; i-- ; ) state->key_del[i]= HA_OFFSET_ERROR; for (i=0 ; i < share->base.keys ; i++) state->key_root[i]= HA_OFFSET_ERROR; myisam_log_command(MI_LOG_DELETE_ALL,info,(uchar*) 0,0,0); /* If we are using delayed keys or if the user has done changes to the tables since it was locked then there may be key blocks in the key cache */ flush_key_blocks(share->key_cache, share->kfile, FLUSH_IGNORE_CHANGED); #ifdef HAVE_MMAP if (share->file_map) mi_munmap_file(info); #endif if (mysql_file_chsize(info->dfile, 0, 0, MYF(MY_WME)) || mysql_file_chsize(share->kfile, share->base.keystart, 0, MYF(MY_WME))) goto err; (void) _mi_writeinfo(info,WRITEINFO_UPDATE_KEYFILE); #ifdef HAVE_MMAP /* Map again */ if (share->file_map) mi_dynmap_file(info, (my_off_t) 0); #endif allow_break(); /* Allow SIGHUP & SIGINT */ DBUG_RETURN(0); err: { int save_errno=my_errno; (void) _mi_writeinfo(info,WRITEINFO_UPDATE_KEYFILE); info->update|=HA_STATE_WRITTEN; /* Buffer changed */ allow_break(); /* Allow SIGHUP & SIGINT */ DBUG_RETURN(my_errno=save_errno); } } /* mi_delete */
int mi_panic(enum ha_panic_function flag) { int error=0; LIST *list_element,*next_open; MI_INFO *info; DBUG_ENTER("mi_panic"); mysql_mutex_lock(&THR_LOCK_myisam); for (list_element=myisam_open_list ; list_element ; list_element=next_open) { next_open=list_element->next; /* Save if close */ info=(MI_INFO*) list_element->data; switch (flag) { case HA_PANIC_CLOSE: mysql_mutex_unlock(&THR_LOCK_myisam); /* Not exactly right... */ if (mi_close(info)) error=my_errno(); mysql_mutex_lock(&THR_LOCK_myisam); break; case HA_PANIC_WRITE: /* Do this to free databases */ if (flush_key_blocks(info->s->key_cache, keycache_thread_var(), info->s->kfile, FLUSH_RELEASE)) error=my_errno(); if (info->opt_flag & WRITE_CACHE_USED) if (flush_io_cache(&info->rec_cache)) error=my_errno(); if (info->opt_flag & READ_CACHE_USED) { if (flush_io_cache(&info->rec_cache)) error=my_errno(); reinit_io_cache(&info->rec_cache,READ_CACHE,0, (pbool) (info->lock_type != F_UNLCK),1); } if (info->lock_type != F_UNLCK && ! info->was_locked) { info->was_locked=info->lock_type; if (mi_lock_database(info,F_UNLCK)) error=my_errno(); } case HA_PANIC_READ: /* Restore to before WRITE */ if (info->was_locked) { if (mi_lock_database(info, info->was_locked)) error=my_errno(); info->was_locked=0; } break; } } if (flag == HA_PANIC_CLOSE) { (void) mi_log(0); /* Close log if neaded */ ft_free_stopwords(); } mysql_mutex_unlock(&THR_LOCK_myisam); if (!error) DBUG_RETURN(0); set_my_errno(error); DBUG_RETURN(error); } /* mi_panic */
int _mi_test_if_changed(register MI_INFO *info) { MYISAM_SHARE *share=info->s; if (share->state.process != share->last_process || share->state.unique != info->last_unique || share->state.update_count != info->last_loop) { /* Keyfile has changed */ DBUG_PRINT("info",("index file changed")); if (share->state.process != share->this_process) VOID(flush_key_blocks(share->kfile,FLUSH_RELEASE)); share->last_process=share->state.process; info->last_unique= share->state.unique; info->last_loop= share->state.update_count; info->update|= HA_STATE_WRITTEN; /* Must use file on next */ info->data_changed= 1; /* For mi_is_changed */ return 1; } return (!(info->update & HA_STATE_AKTIV) || (info->update & (HA_STATE_WRITTEN | HA_STATE_DELETED | HA_STATE_KEY_CHANGED))); } /* _mi_test_if_changed */
int mi_extra(MI_INFO *info, enum ha_extra_function function, void *extra_arg) { int error=0; ulong cache_size; MYISAM_SHARE *share=info->s; DBUG_ENTER("mi_extra"); DBUG_PRINT("enter",("function: %d",(int) function)); switch (function) { case HA_EXTRA_RESET_STATE: /* Reset state (don't free buffers) */ info->lastinx= 0; /* Use first index as def */ info->last_search_keypage=info->lastpos= HA_OFFSET_ERROR; info->page_changed=1; /* Next/prev gives first/last */ if (info->opt_flag & READ_CACHE_USED) { reinit_io_cache(&info->rec_cache,READ_CACHE,0, (pbool) (info->lock_type != F_UNLCK), (pbool) test(info->update & HA_STATE_ROW_CHANGED) ); } info->update= ((info->update & HA_STATE_CHANGED) | HA_STATE_NEXT_FOUND | HA_STATE_PREV_FOUND); break; case HA_EXTRA_CACHE: if (info->lock_type == F_UNLCK && (share->options & HA_OPTION_PACK_RECORD)) { error=1; /* Not possibly if not locked */ my_errno=EACCES; break; } if (info->s->file_map) /* Don't use cache if mmap */ break; #if defined(HAVE_MMAP) && defined(HAVE_MADVISE) if ((share->options & HA_OPTION_COMPRESS_RECORD)) { mysql_mutex_lock(&share->intern_lock); if (_mi_memmap_file(info)) { /* We don't nead MADV_SEQUENTIAL if small file */ madvise((char*) share->file_map, share->state.state.data_file_length, share->state.state.data_file_length <= RECORD_CACHE_SIZE*16 ? MADV_RANDOM : MADV_SEQUENTIAL); mysql_mutex_unlock(&share->intern_lock); break; } mysql_mutex_unlock(&share->intern_lock); } #endif if (info->opt_flag & WRITE_CACHE_USED) { info->opt_flag&= ~WRITE_CACHE_USED; if ((error=end_io_cache(&info->rec_cache))) break; } if (!(info->opt_flag & (READ_CACHE_USED | WRITE_CACHE_USED | MEMMAP_USED))) { cache_size= (extra_arg ? *(ulong*) extra_arg : my_default_record_cache_size); if (!(init_io_cache(&info->rec_cache,info->dfile, (uint) min(info->state->data_file_length+1, cache_size), READ_CACHE,0L,(pbool) (info->lock_type != F_UNLCK), MYF(share->write_flag & MY_WAIT_IF_FULL)))) { info->opt_flag|=READ_CACHE_USED; info->update&= ~HA_STATE_ROW_CHANGED; } if (share->concurrent_insert) info->rec_cache.end_of_file=info->state->data_file_length; } break; case HA_EXTRA_REINIT_CACHE: if (info->opt_flag & READ_CACHE_USED) { reinit_io_cache(&info->rec_cache,READ_CACHE,info->nextpos, (pbool) (info->lock_type != F_UNLCK), (pbool) test(info->update & HA_STATE_ROW_CHANGED)); info->update&= ~HA_STATE_ROW_CHANGED; if (share->concurrent_insert) info->rec_cache.end_of_file=info->state->data_file_length; } break; case HA_EXTRA_WRITE_CACHE: if (info->lock_type == F_UNLCK) { error=1; /* Not possibly if not locked */ break; } cache_size= (extra_arg ? *(ulong*) extra_arg : my_default_record_cache_size); if (!(info->opt_flag & (READ_CACHE_USED | WRITE_CACHE_USED | OPT_NO_ROWS)) && !share->state.header.uniques) if (!(init_io_cache(&info->rec_cache,info->dfile, cache_size, WRITE_CACHE,info->state->data_file_length, (pbool) (info->lock_type != F_UNLCK), MYF(share->write_flag & MY_WAIT_IF_FULL)))) { info->opt_flag|=WRITE_CACHE_USED; info->update&= ~(HA_STATE_ROW_CHANGED | HA_STATE_WRITE_AT_END | HA_STATE_EXTEND_BLOCK); } break; case HA_EXTRA_PREPARE_FOR_UPDATE: if (info->s->data_file_type != DYNAMIC_RECORD) break; /* Remove read/write cache if dynamic rows */ case HA_EXTRA_NO_CACHE: if (info->opt_flag & (READ_CACHE_USED | WRITE_CACHE_USED)) { info->opt_flag&= ~(READ_CACHE_USED | WRITE_CACHE_USED); error=end_io_cache(&info->rec_cache); /* Sergei will insert full text index caching here */ } #if defined(HAVE_MMAP) && defined(HAVE_MADVISE) if (info->opt_flag & MEMMAP_USED) madvise((char*) share->file_map, share->state.state.data_file_length, MADV_RANDOM); #endif break; case HA_EXTRA_FLUSH_CACHE: if (info->opt_flag & WRITE_CACHE_USED) { if ((error=flush_io_cache(&info->rec_cache))) { mi_print_error(info->s, HA_ERR_CRASHED); mi_mark_crashed(info); /* Fatal error found */ } } break; case HA_EXTRA_NO_READCHECK: info->opt_flag&= ~READ_CHECK_USED; /* No readcheck */ break; case HA_EXTRA_READCHECK: info->opt_flag|= READ_CHECK_USED; break; case HA_EXTRA_KEYREAD: /* Read only keys to record */ case HA_EXTRA_REMEMBER_POS: info->opt_flag |= REMEMBER_OLD_POS; bmove((uchar*) info->lastkey+share->base.max_key_length*2, (uchar*) info->lastkey,info->lastkey_length); info->save_update= info->update; info->save_lastinx= info->lastinx; info->save_lastpos= info->lastpos; info->save_lastkey_length=info->lastkey_length; if (function == HA_EXTRA_REMEMBER_POS) break; /* fall through */ case HA_EXTRA_KEYREAD_CHANGE_POS: info->opt_flag |= KEY_READ_USED; info->read_record=_mi_read_key_record; break; case HA_EXTRA_NO_KEYREAD: case HA_EXTRA_RESTORE_POS: if (info->opt_flag & REMEMBER_OLD_POS) { bmove((uchar*) info->lastkey, (uchar*) info->lastkey+share->base.max_key_length*2, info->save_lastkey_length); info->update= info->save_update | HA_STATE_WRITTEN; info->lastinx= info->save_lastinx; info->lastpos= info->save_lastpos; info->lastkey_length=info->save_lastkey_length; } info->read_record= share->read_record; info->opt_flag&= ~(KEY_READ_USED | REMEMBER_OLD_POS); break; case HA_EXTRA_NO_USER_CHANGE: /* Database is somehow locked agains changes */ info->lock_type= F_EXTRA_LCK; /* Simulate as locked */ break; case HA_EXTRA_WAIT_LOCK: info->lock_wait=0; break; case HA_EXTRA_NO_WAIT_LOCK: info->lock_wait=MY_DONT_WAIT; break; case HA_EXTRA_NO_KEYS: if (info->lock_type == F_UNLCK) { error=1; /* Not possibly if not lock */ break; } if (mi_is_any_key_active(share->state.key_map)) { MI_KEYDEF *key=share->keyinfo; uint i; for (i=0 ; i < share->base.keys ; i++,key++) { if (!(key->flag & HA_NOSAME) && info->s->base.auto_key != i+1) { mi_clear_key_active(share->state.key_map, i); info->update|= HA_STATE_CHANGED; } } if (!share->changed) { share->state.changed|= STATE_CHANGED | STATE_NOT_ANALYZED; share->changed=1; /* Update on close */ if (!share->global_changed) { share->global_changed=1; share->state.open_count++; } } share->state.state= *info->state; error=mi_state_info_write(share->kfile,&share->state,1 | 2); } break; case HA_EXTRA_FORCE_REOPEN: mysql_mutex_lock(&THR_LOCK_myisam); share->last_version= 0L; /* Impossible version */ mysql_mutex_unlock(&THR_LOCK_myisam); break; case HA_EXTRA_PREPARE_FOR_DROP: mysql_mutex_lock(&THR_LOCK_myisam); share->last_version= 0L; /* Impossible version */ #ifdef __WIN__REMOVE_OBSOLETE_WORKAROUND /* Close the isam and data files as Win32 can't drop an open table */ mysql_mutex_lock(&share->intern_lock); if (flush_key_blocks(share->key_cache, share->kfile, (function == HA_EXTRA_FORCE_REOPEN ? FLUSH_RELEASE : FLUSH_IGNORE_CHANGED))) { error=my_errno; share->changed=1; mi_print_error(info->s, HA_ERR_CRASHED); mi_mark_crashed(info); /* Fatal error found */ } if (info->opt_flag & (READ_CACHE_USED | WRITE_CACHE_USED)) { info->opt_flag&= ~(READ_CACHE_USED | WRITE_CACHE_USED); error=end_io_cache(&info->rec_cache); } if (info->lock_type != F_UNLCK && ! info->was_locked) { info->was_locked=info->lock_type; if (mi_lock_database(info,F_UNLCK)) error=my_errno; info->lock_type = F_UNLCK; } if (share->kfile >= 0) _mi_decrement_open_count(info); if (share->kfile >= 0 && mysql_file_close(share->kfile, MYF(0))) error=my_errno; { LIST *list_element ; for (list_element=myisam_open_list ; list_element ; list_element=list_element->next) { MI_INFO *tmpinfo=(MI_INFO*) list_element->data; if (tmpinfo->s == info->s) { if (tmpinfo->dfile >= 0 && mysql_file_close(tmpinfo->dfile, MYF(0))) error = my_errno; tmpinfo->dfile= -1; } } } share->kfile= -1; /* Files aren't open anymore */ mysql_mutex_unlock(&share->intern_lock); #endif mysql_mutex_unlock(&THR_LOCK_myisam); break; case HA_EXTRA_FLUSH: if (!share->temporary) flush_key_blocks(share->key_cache, share->kfile, FLUSH_KEEP); #ifdef HAVE_PWRITE _mi_decrement_open_count(info); #endif if (share->not_flushed) { share->not_flushed=0; if (mysql_file_sync(share->kfile, MYF(0))) error= my_errno; if (mysql_file_sync(info->dfile, MYF(0))) error= my_errno; if (error) { share->changed=1; mi_print_error(info->s, HA_ERR_CRASHED); mi_mark_crashed(info); /* Fatal error found */ } } if (share->base.blobs) mi_alloc_rec_buff(info, -1, &info->rec_buff); break; case HA_EXTRA_NORMAL: /* Theese isn't in use */ info->quick_mode=0; break; case HA_EXTRA_QUICK: info->quick_mode=1; break; case HA_EXTRA_NO_ROWS: if (!share->state.header.uniques) info->opt_flag|= OPT_NO_ROWS; break; case HA_EXTRA_PRELOAD_BUFFER_SIZE: info->preload_buff_size= *((ulong *) extra_arg); break; case HA_EXTRA_CHANGE_KEY_TO_UNIQUE: case HA_EXTRA_CHANGE_KEY_TO_DUP: mi_extra_keyflag(info, function); break; case HA_EXTRA_MMAP: #ifdef HAVE_MMAP mysql_mutex_lock(&share->intern_lock); /* Memory map the data file if it is not already mapped. It is safe to memory map a file while other threads are using file I/O on it. Assigning a new address to a function pointer is an atomic operation. intern_lock prevents that two or more mappings are done at the same time. */ if (!share->file_map) { if (mi_dynmap_file(info, share->state.state.data_file_length)) { DBUG_PRINT("warning",("mmap failed: errno: %d",errno)); error= my_errno= errno; } } mysql_mutex_unlock(&share->intern_lock); #endif break; case HA_EXTRA_MARK_AS_LOG_TABLE: mysql_mutex_lock(&share->intern_lock); share->is_log_table= TRUE; mysql_mutex_unlock(&share->intern_lock); break; case HA_EXTRA_KEY_CACHE: case HA_EXTRA_NO_KEY_CACHE: default: break; } { char tmp[1]; tmp[0]=function; myisam_log_command(MI_LOG_EXTRA,info,(uchar*) tmp,1,error); } DBUG_RETURN(error); } /* mi_extra */
int mi_extra(MI_INFO * info, enum ha_extra_function function) { int error = 0; MYISAM_SHARE *share = info->s; DBUG_ENTER("mi_extra"); switch (function) { case HA_EXTRA_RESET: /* * Free buffers and reset the following flags: EXTRA_CACHE, * EXTRA_WRITE_CACHE, EXTRA_KEYREAD, EXTRA_QUICK */ if (info->opt_flag & (READ_CACHE_USED | WRITE_CACHE_USED)) { info->opt_flag &= ~(READ_CACHE_USED | WRITE_CACHE_USED); error = end_io_cache(&info->rec_cache); } #if defined(HAVE_MMAP) && defined(HAVE_MADVICE) if (info->opt_flag & MEMMAP_USED) madvise(share->file_map, share->state.state.data_file_length, MADV_RANDOM); #endif info->opt_flag &= ~(KEY_READ_USED | REMEMBER_OLD_POS); info->quick_mode = 0; /* Fall through */ case HA_EXTRA_RESET_STATE: /* Reset state (don't free buffers) */ info->lastinx = 0; /* Use first index as def */ info->last_search_keypage = info->lastpos = HA_OFFSET_ERROR; info->page_changed = 1; /* Next/prev gives first/last */ if (info->opt_flag & READ_CACHE_USED) { reinit_io_cache(&info->rec_cache, READ_CACHE, 0, (pbool) (info->lock_type != F_UNLCK), (pbool) test(info->update & HA_STATE_ROW_CHANGED) ); } info->update = ((info->update & HA_STATE_CHANGED) | HA_STATE_NEXT_FOUND | HA_STATE_PREV_FOUND); break; case HA_EXTRA_CACHE: if (info->lock_type == F_UNLCK && (share->options & HA_OPTION_PACK_RECORD)) { error = 1; /* Not possibly if not locked */ my_errno = EACCES; break; } #if defined(HAVE_MMAP) && defined(HAVE_MADVICE) if ((share->options & HA_OPTION_COMPRESS_RECORD)) { pthread_mutex_lock(&share->intern_lock); if (_mi_memmap_file(info)) { /* * We don't nead MADV_SEQUENTIAL if small * file */ madvise(share->file_map, share->state.state.data_file_length, share->state.state.data_file_length <= RECORD_CACHE_SIZE * 16 ? MADV_RANDOM : MADV_SEQUENTIAL); pthread_mutex_unlock(&share->intern_lock); break; } pthread_mutex_unlock(&share->intern_lock); } #endif if (info->opt_flag & WRITE_CACHE_USED) { info->opt_flag &= ~WRITE_CACHE_USED; if ((error = end_io_cache(&info->rec_cache))) break; } if (!(info->opt_flag & (READ_CACHE_USED | WRITE_CACHE_USED | MEMMAP_USED))) { if (!(init_io_cache(&info->rec_cache, info->dfile, (uint) min(info->state->data_file_length + 1, my_default_record_cache_size), READ_CACHE, 0L, (pbool) (info->lock_type != F_UNLCK), MYF(share->write_flag & MY_WAIT_IF_FULL)))) { info->opt_flag |= READ_CACHE_USED; info->update &= ~HA_STATE_ROW_CHANGED; } if (share->concurrent_insert) info->rec_cache.end_of_file = info->state->data_file_length; } break; case HA_EXTRA_REINIT_CACHE: if (info->opt_flag & READ_CACHE_USED) { reinit_io_cache(&info->rec_cache, READ_CACHE, info->nextpos, (pbool) (info->lock_type != F_UNLCK), (pbool) test(info->update & HA_STATE_ROW_CHANGED)); info->update &= ~HA_STATE_ROW_CHANGED; if (share->concurrent_insert) info->rec_cache.end_of_file = info->state->data_file_length; } break; case HA_EXTRA_WRITE_CACHE: if (info->lock_type == F_UNLCK) { error = 1; /* Not possibly if not locked */ break; } if (!(info->opt_flag & (READ_CACHE_USED | WRITE_CACHE_USED | OPT_NO_ROWS)) && !share->state.header.uniques) if (!(init_io_cache(&info->rec_cache, info->dfile, 0, WRITE_CACHE, info->state->data_file_length, (pbool) (info->lock_type != F_UNLCK), MYF(share->write_flag & MY_WAIT_IF_FULL)))) { info->opt_flag |= WRITE_CACHE_USED; info->update &= ~(HA_STATE_ROW_CHANGED | HA_STATE_WRITE_AT_END | HA_STATE_EXTEND_BLOCK); } break; case HA_EXTRA_NO_CACHE: if (info->opt_flag & (READ_CACHE_USED | WRITE_CACHE_USED)) { info->opt_flag &= ~(READ_CACHE_USED | WRITE_CACHE_USED); error = end_io_cache(&info->rec_cache); /* Sergei will insert full text index caching here */ } #if defined(HAVE_MMAP) && defined(HAVE_MADVICE) if (info->opt_flag & MEMMAP_USED) madvise(share->file_map, share->state.state.data_file_length, MADV_RANDOM); #endif break; case HA_EXTRA_FLUSH_CACHE: if (info->opt_flag & WRITE_CACHE_USED) { if ((error = flush_io_cache(&info->rec_cache))) mi_mark_crashed(info); /* Fatal error found */ } break; case HA_EXTRA_NO_READCHECK: info->opt_flag &= ~READ_CHECK_USED; /* No readcheck */ break; case HA_EXTRA_READCHECK: info->opt_flag |= READ_CHECK_USED; break; case HA_EXTRA_KEYREAD: /* Read only keys to record */ case HA_EXTRA_REMEMBER_POS: info->opt_flag |= REMEMBER_OLD_POS; bmove((byte *) info->lastkey + share->base.max_key_length * 2, (byte *) info->lastkey, info->lastkey_length); info->save_update = info->update; info->save_lastinx = info->lastinx; info->save_lastpos = info->lastpos; info->save_lastkey_length = info->lastkey_length; if (function == HA_EXTRA_REMEMBER_POS) break; /* fall through */ case HA_EXTRA_KEYREAD_CHANGE_POS: info->opt_flag |= KEY_READ_USED; info->read_record = _mi_read_key_record; break; case HA_EXTRA_NO_KEYREAD: case HA_EXTRA_RESTORE_POS: if (info->opt_flag & REMEMBER_OLD_POS) { bmove((byte *) info->lastkey, (byte *) info->lastkey + share->base.max_key_length * 2, info->save_lastkey_length); info->update = info->save_update | HA_STATE_WRITTEN; info->lastinx = info->save_lastinx; info->lastpos = info->save_lastpos; info->lastkey_length = info->save_lastkey_length; } info->read_record = share->read_record; info->opt_flag &= ~(KEY_READ_USED | REMEMBER_OLD_POS); break; case HA_EXTRA_NO_USER_CHANGE: /* Database is somehow locked agains * changes */ info->lock_type = F_EXTRA_LCK; /* Simulate as locked */ break; case HA_EXTRA_WAIT_LOCK: info->lock_wait = 0; break; case HA_EXTRA_NO_WAIT_LOCK: info->lock_wait = MY_DONT_WAIT; break; case HA_EXTRA_NO_KEYS: if (info->lock_type == F_UNLCK) { error = 1; /* Not possibly if not lock */ break; } if (share->state.key_map) { share->state.key_map = 0; info->state->key_file_length = share->state.state.key_file_length = share->base.keystart; if (!share->changed) { share->state.changed |= STATE_CHANGED | STATE_NOT_ANALYZED; share->changed = 1; /* Update on close */ if (!share->global_changed) { share->global_changed = 1; share->state.open_count++; } } share->state.state = *info->state; error = mi_state_info_write(share->kfile, &share->state, 1 | 2); } break; case HA_EXTRA_FORCE_REOPEN: pthread_mutex_lock(&THR_LOCK_myisam); share->last_version = 0L; /* Impossible version */ #ifdef __WIN__ /* * Close the isam and data files as Win32 can't drop an open * table */ pthread_mutex_lock(&share->intern_lock); if (flush_key_blocks(share->kfile, FLUSH_RELEASE)) { error = my_errno; share->changed = 1; mi_mark_crashed(info); /* Fatal error found */ } if (info->opt_flag & (READ_CACHE_USED | WRITE_CACHE_USED)) { info->opt_flag &= ~(READ_CACHE_USED | WRITE_CACHE_USED); error = end_io_cache(&info->rec_cache); } if (info->lock_type != F_UNLCK && !info->was_locked) { info->was_locked = info->lock_type; if (mi_lock_database(info, F_UNLCK)) error = my_errno; info->lock_type = F_UNLCK; } if (share->kfile >= 0) _mi_decrement_open_count(info); if (share->kfile >= 0 && my_close(share->kfile, MYF(0))) error = my_errno; { LIST *list_element; for (list_element = myisam_open_list; list_element; list_element = list_element->next) { MI_INFO *tmpinfo = (MI_INFO *) list_element->data; if (tmpinfo->s == info->s) { if (tmpinfo->dfile >= 0 && my_close(tmpinfo->dfile, MYF(0))) error = my_errno; tmpinfo->dfile = -1; } } } share->kfile = -1; /* Files aren't open anymore */ pthread_mutex_unlock(&share->intern_lock); #endif pthread_mutex_unlock(&THR_LOCK_myisam); break; case HA_EXTRA_FLUSH: if (!share->temporary) flush_key_blocks(share->kfile, FLUSH_KEEP); #ifdef HAVE_PWRITE _mi_decrement_open_count(info); #endif if (share->not_flushed) { share->not_flushed = 0; #if defined(__WIN__) if (_commit(share->kfile)) error = errno; if (_commit(info->dfile)) error = errno; #elif defined(HAVE_FDATASYNC) if (fdatasync(share->kfile)) error = errno; if (fdatasync(share->dfile)) error = errno; #elif defined(HAVE_FSYNC) if (fsync(share->kfile)) error = errno; if (fsync(share->dfile)) error = errno; #endif if (error) { share->changed = 1; mi_mark_crashed(info); /* Fatal error found */ } } if (share->base.blobs) { my_free(info->rec_alloc, MYF(MY_ALLOW_ZERO_PTR)); info->rec_alloc = info->rec_buff = 0; mi_fix_rec_buff_for_blob(info, info->s->base.pack_reclength); } break; case HA_EXTRA_NORMAL: /* Theese isn't in use */ info->quick_mode = 0; break; case HA_EXTRA_QUICK: info->quick_mode = 1; break; case HA_EXTRA_NO_ROWS: if (!share->state.header.uniques) info->opt_flag |= OPT_NO_ROWS; break; case HA_EXTRA_KEY_CACHE: case HA_EXTRA_NO_KEY_CACHE: default: break; } { char tmp [1]; tmp[0] = function; myisam_log_command(MI_LOG_EXTRA, info, (byte *) tmp, 1, error); } DBUG_RETURN(error); } /* mi_extra */
int mi_close(register MI_INFO *info) { int error=0,flag; MYISAM_SHARE *share=info->s; DBUG_ENTER("mi_close"); DBUG_PRINT("enter",("base: 0x%lx reopen: %u locks: %u", (long) info, (uint) share->reopen, (uint) share->tot_locks)); pthread_mutex_lock(&THR_LOCK_myisam); if (info->lock_type == F_EXTRA_LCK) info->lock_type=F_UNLCK; /* HA_EXTRA_NO_USER_CHANGE */ if (share->reopen == 1 && share->kfile >= 0) _mi_decrement_open_count(info); if (info->lock_type != F_UNLCK) { if (mi_lock_database(info,F_UNLCK)) error=my_errno; } pthread_mutex_lock(&share->intern_lock); if (share->options & HA_OPTION_READ_ONLY_DATA) { share->r_locks--; share->tot_locks--; } if (info->opt_flag & (READ_CACHE_USED | WRITE_CACHE_USED)) { if (end_io_cache(&info->rec_cache)) error=my_errno; info->opt_flag&= ~(READ_CACHE_USED | WRITE_CACHE_USED); } flag= !--share->reopen; myisam_open_list=list_delete(myisam_open_list,&info->open_list); pthread_mutex_unlock(&share->intern_lock); my_free(mi_get_rec_buff_ptr(info, info->rec_buff), MYF(MY_ALLOW_ZERO_PTR)); if (flag) { if (share->kfile >= 0 && flush_key_blocks(share->key_cache, share->kfile, share->temporary ? FLUSH_IGNORE_CHANGED : FLUSH_RELEASE)) error=my_errno; if (share->kfile >= 0) { /* If we are crashed, we can safely flush the current state as it will not change the crashed state. We can NOT write the state in other cases as other threads may be using the file at this point */ if (share->mode != O_RDONLY && mi_is_crashed(info)) mi_state_info_write(share->kfile, &share->state, 1); if (my_close(share->kfile,MYF(0))) error = my_errno; } #ifdef HAVE_MMAP if (share->file_map) _mi_unmap_file(info); #endif if (share->decode_trees) { my_free((uchar*) share->decode_trees,MYF(0)); my_free((uchar*) share->decode_tables,MYF(0)); } #ifdef THREAD thr_lock_delete(&share->lock); VOID(pthread_mutex_destroy(&share->intern_lock)); { int i,keys; keys = share->state.header.keys; VOID(rwlock_destroy(&share->mmap_lock)); for(i=0; i<keys; i++) { VOID(rwlock_destroy(&share->key_root_lock[i])); } } #endif my_free((uchar*) info->s,MYF(0)); } pthread_mutex_unlock(&THR_LOCK_myisam); if (info->ftparser_param) { my_free((uchar*)info->ftparser_param, MYF(0)); info->ftparser_param= 0; } if (info->dfile >= 0 && my_close(info->dfile,MYF(0))) error = my_errno; myisam_log_command(MI_LOG_CLOSE,info,NULL,0,error); my_free((uchar*) info,MYF(0)); if (error) { DBUG_RETURN(my_errno=error); } DBUG_RETURN(0); } /* mi_close */
int mi_preload(MI_INFO *info, ulonglong key_map, my_bool ignore_leaves) { uint i; ulong length, block_length= 0; uchar *buff= NULL; MYISAM_SHARE* share= info->s; uint keys= share->state.header.keys; MI_KEYDEF *keyinfo= share->keyinfo; my_off_t key_file_length= share->state.state.key_file_length; my_off_t pos= share->base.keystart; DBUG_ENTER("mi_preload"); if (!keys || !mi_is_any_key_active(key_map) || key_file_length == pos) DBUG_RETURN(0); block_length= keyinfo[0].block_length; if (ignore_leaves) { /* Check whether all indexes use the same block size */ for (i= 1 ; i < keys ; i++) { if (keyinfo[i].block_length != block_length) DBUG_RETURN(my_errno= HA_ERR_NON_UNIQUE_BLOCK_SIZE); } } else block_length= share->key_cache->key_cache_block_size; length= info->preload_buff_size/block_length * block_length; set_if_bigger(length, block_length); if (!(buff= (uchar *) my_malloc(length, MYF(MY_WME)))) DBUG_RETURN(my_errno= HA_ERR_OUT_OF_MEM); if (flush_key_blocks(share->key_cache,share->kfile, FLUSH_RELEASE)) goto err; do { /* Read the next block of index file into the preload buffer */ if ((my_off_t) length > (key_file_length-pos)) length= (ulong) (key_file_length-pos); if (mysql_file_pread(share->kfile, (uchar*) buff, length, pos, MYF(MY_FAE|MY_FNABP))) goto err; if (ignore_leaves) { uchar *end= buff+length; do { if (mi_test_if_nod(buff)) { if (key_cache_insert(share->key_cache, share->kfile, pos, DFLT_INIT_HITS, (uchar*) buff, block_length)) goto err; } pos+= block_length; } while ((buff+= block_length) != end); buff= end-length; } else { if (key_cache_insert(share->key_cache, share->kfile, pos, DFLT_INIT_HITS, (uchar*) buff, length)) goto err; pos+= length; } } while (pos != key_file_length); my_free(buff); DBUG_RETURN(0); err: my_free(buff); DBUG_RETURN(my_errno= errno); }
int nisam_panic(enum ha_panic_function flag) { int error=0; LIST *list_element,*next_open; N_INFO *info; DBUG_ENTER("nisam_panic"); pthread_mutex_lock(&THR_LOCK_isam); for (list_element=nisam_open_list ; list_element ; list_element=next_open) { next_open=list_element->next; /* Save if close */ info=(N_INFO*) list_element->data; switch (flag) { case HA_PANIC_CLOSE: pthread_mutex_unlock(&THR_LOCK_isam); /* Not exactly right... */ if (nisam_close(info)) error=my_errno; pthread_mutex_lock(&THR_LOCK_isam); break; case HA_PANIC_WRITE: /* Do this to free databases */ #ifdef CANT_OPEN_FILES_TWICE if (info->s->base.options & HA_OPTION_READ_ONLY_DATA) break; #endif if (flush_key_blocks(info->s->kfile,FLUSH_RELEASE)) error=my_errno; if (info->opt_flag & WRITE_CACHE_USED) if (flush_io_cache(&info->rec_cache)) error=my_errno; if (info->opt_flag & READ_CACHE_USED) { if (flush_io_cache(&info->rec_cache)) error=my_errno; reinit_io_cache(&info->rec_cache,READ_CACHE,0, (pbool) (info->lock_type != F_UNLCK),1); } #ifndef NO_LOCKING if (info->lock_type != F_UNLCK && ! info->was_locked) { info->was_locked=info->lock_type; if (nisam_lock_database(info,F_UNLCK)) error=my_errno; } #else { int save_status=info->s->w_locks; /* Only w_locks! */ info->s->w_locks=0; if (_nisam_writeinfo(info, test(info->update & HA_STATE_CHANGED))) error=my_errno; info->s->w_locks=save_status; info->update&= ~HA_STATE_CHANGED; /* Not changed */ } #endif /* NO_LOCKING */ #ifdef CANT_OPEN_FILES_TWICE if (info->s->kfile >= 0 && my_close(info->s->kfile,MYF(0))) error = my_errno; if (info->dfile >= 0 && my_close(info->dfile,MYF(0))) error = my_errno; info->s->kfile=info->dfile= -1; /* Files aren't open anymore */ break; #endif case HA_PANIC_READ: /* Restore to before WRITE */ #ifdef CANT_OPEN_FILES_TWICE { /* Open closed files */ char name_buff[FN_REFLEN]; if (info->s->kfile < 0) if ((info->s->kfile= my_open(fn_format(name_buff,info->filename,"", N_NAME_IEXT,4),info->mode, MYF(MY_WME))) < 0) error = my_errno; if (info->dfile < 0) { if ((info->dfile= my_open(fn_format(name_buff,info->filename,"", N_NAME_DEXT,4),info->mode, MYF(MY_WME))) < 0) error = my_errno; info->rec_cache.file=info->dfile; } } #endif #ifndef NO_LOCKING if (info->was_locked) { if (nisam_lock_database(info, info->was_locked)) error=my_errno; info->was_locked=0; } #else { int lock_type,w_locks; lock_type=info->lock_type ; w_locks=info->s->w_locks; info->lock_type=0; info->s->w_locks=0; if (_nisam_readinfo(info,0,1)) /* Read changed data */ error=my_errno; info->lock_type=lock_type; info->s->w_locks=w_locks; } /* Don't use buffer when doing next */ info->update|=HA_STATE_WRITTEN; #endif /* NO_LOCKING */ break; } } if (flag == HA_PANIC_CLOSE) VOID(nisam_log(0)); /* Close log if neaded */ pthread_mutex_unlock(&THR_LOCK_isam); if (!error) DBUG_RETURN(0); my_errno=error; DBUG_RETURN(-1); } /* nisam_panic */
int mi_lock_database(MI_INFO *info, int lock_type) { int error; uint count; MYISAM_SHARE *share=info->s; DBUG_ENTER("mi_lock_database"); DBUG_PRINT("enter",("lock_type: %d old lock %d r_locks: %u w_locks: %u " "global_changed: %d open_count: %u name: '%s'", lock_type, info->lock_type, share->r_locks, share->w_locks, share->global_changed, share->state.open_count, share->index_file_name)); if (share->options & HA_OPTION_READ_ONLY_DATA || info->lock_type == lock_type) DBUG_RETURN(0); if (lock_type == F_EXTRA_LCK) /* Used by TMP tables */ { ++share->w_locks; ++share->tot_locks; info->lock_type= lock_type; info->s->in_use= list_add(info->s->in_use, &info->in_use); DBUG_RETURN(0); } error= 0; mysql_mutex_lock(&share->intern_lock); if (share->kfile >= 0) /* May only be false on windows */ { switch (lock_type) { case F_UNLCK: ftparser_call_deinitializer(info); if (info->lock_type == F_RDLCK) count= --share->r_locks; else count= --share->w_locks; --share->tot_locks; if (info->lock_type == F_WRLCK && !share->w_locks && !share->delay_key_write && flush_key_blocks(share->key_cache, keycache_thread_var(), share->kfile,FLUSH_KEEP)) { error=my_errno(); mi_print_error(info->s, HA_ERR_CRASHED); mi_mark_crashed(info); /* Mark that table must be checked */ } if (info->opt_flag & (READ_CACHE_USED | WRITE_CACHE_USED)) { if (end_io_cache(&info->rec_cache)) { error=my_errno(); mi_print_error(info->s, HA_ERR_CRASHED); mi_mark_crashed(info); } } if (!count) { DBUG_PRINT("info",("changed: %u w_locks: %u", (uint) share->changed, share->w_locks)); if (share->changed && !share->w_locks) { if ((info->s->mmaped_length != info->s->state.state.data_file_length) && (info->s->nonmmaped_inserts > MAX_NONMAPPED_INSERTS)) { if (info->s->concurrent_insert) mysql_rwlock_wrlock(&info->s->mmap_lock); mi_remap_file(info, info->s->state.state.data_file_length); info->s->nonmmaped_inserts= 0; if (info->s->concurrent_insert) mysql_rwlock_unlock(&info->s->mmap_lock); } share->state.process= share->last_process=share->this_process; share->state.unique= info->last_unique= info->this_unique; share->state.update_count= info->last_loop= ++info->this_loop; if (mi_state_info_write(share->kfile, &share->state, 1)) error=my_errno(); share->changed=0; if (myisam_flush) { if (mysql_file_sync(share->kfile, MYF(0))) error= my_errno(); if (mysql_file_sync(info->dfile, MYF(0))) error= my_errno(); } else share->not_flushed=1; if (error) { mi_print_error(info->s, HA_ERR_CRASHED); mi_mark_crashed(info); } } if (info->lock_type != F_EXTRA_LCK) { if (share->r_locks) { /* Only read locks left */ if (my_lock(share->kfile,F_RDLCK,0L,F_TO_EOF, MYF(MY_WME | MY_SEEK_NOT_DONE)) && !error) error=my_errno(); } else if (!share->w_locks) { /* No more locks */ if (my_lock(share->kfile,F_UNLCK,0L,F_TO_EOF, MYF(MY_WME | MY_SEEK_NOT_DONE)) && !error) error=my_errno(); } } } info->opt_flag&= ~(READ_CACHE_USED | WRITE_CACHE_USED); info->lock_type= F_UNLCK; info->s->in_use= list_delete(info->s->in_use, &info->in_use); break; case F_RDLCK: if (info->lock_type == F_WRLCK) { /* Change RW to READONLY mysqld does not turn write locks to read locks, so we're never here in mysqld. */ if (share->w_locks == 1) { if (my_lock(share->kfile,lock_type,0L,F_TO_EOF, MYF(MY_SEEK_NOT_DONE))) { error=my_errno(); break; } } share->w_locks--; share->r_locks++; info->lock_type=lock_type; break; } if (!share->r_locks && !share->w_locks) { if (my_lock(share->kfile,lock_type,0L,F_TO_EOF, info->lock_wait | MY_SEEK_NOT_DONE)) { error=my_errno(); break; } if (mi_state_info_read_dsk(share->kfile, &share->state, 1)) { error=my_errno(); (void) my_lock(share->kfile,F_UNLCK,0L,F_TO_EOF,MYF(MY_SEEK_NOT_DONE)); set_my_errno(error); break; } } (void) _mi_test_if_changed(info); share->r_locks++; share->tot_locks++; info->lock_type=lock_type; info->s->in_use= list_add(info->s->in_use, &info->in_use); break; case F_WRLCK: if (info->lock_type == F_RDLCK) { /* Change READONLY to RW */ if (share->r_locks == 1) { if (my_lock(share->kfile,lock_type,0L,F_TO_EOF, MYF(info->lock_wait | MY_SEEK_NOT_DONE))) { error=my_errno(); break; } share->r_locks--; share->w_locks++; info->lock_type=lock_type; break; } } if (!(share->options & HA_OPTION_READ_ONLY_DATA)) { if (!share->w_locks) { if (my_lock(share->kfile,lock_type,0L,F_TO_EOF, info->lock_wait | MY_SEEK_NOT_DONE)) { error=my_errno(); break; } if (!share->r_locks) { if (mi_state_info_read_dsk(share->kfile, &share->state, 1)) { error=my_errno(); (void) my_lock(share->kfile,F_UNLCK,0L,F_TO_EOF, info->lock_wait | MY_SEEK_NOT_DONE); set_my_errno(error); break; } } } } (void) _mi_test_if_changed(info); info->lock_type=lock_type; info->invalidator=info->s->invalidator; share->w_locks++; share->tot_locks++; info->s->in_use= list_add(info->s->in_use, &info->in_use); break; default: break; /* Impossible */ } } #ifdef _WIN32 else { /* Check for bad file descriptors if this table is part of a merge union. Failing to capture this may cause a crash on windows if the table is renamed and later on referenced by the merge table. */ if( info->owned_by_merge && (info->s)->kfile < 0 ) { error = HA_ERR_NO_SUCH_TABLE; } } #endif mysql_mutex_unlock(&share->intern_lock); DBUG_RETURN(error); } /* mi_lock_database */
int mi_assign_to_key_cache(MI_INFO *info, ulonglong key_map __attribute__((unused)), KEY_CACHE *key_cache) { int error= 0; MYISAM_SHARE* share= info->s; DBUG_ENTER("mi_assign_to_key_cache"); DBUG_PRINT("enter",("old_key_cache_handle: 0x%lx new_key_cache_handle: 0x%lx", (long) share->key_cache, (long) key_cache)); /* Skip operation if we didn't change key cache. This can happen if we call this for all open instances of the same table */ if (share->key_cache == key_cache) DBUG_RETURN(0); /* First flush all blocks for the table in the old key cache. This is to ensure that the disk is consistent with the data pages in memory (which may not be the case if the table uses delayed_key_write) Note that some other read thread may still fill in the key cache with new blocks during this call and after, but this doesn't matter as all threads will start using the new key cache for their next call to myisam library and we know that there will not be any changed blocks in the old key cache. */ if (flush_key_blocks(share->key_cache, share->kfile, FLUSH_RELEASE)) { error= my_errno; mi_print_error(info->s, HA_ERR_CRASHED); mi_mark_crashed(info); /* Mark that table must be checked */ } /* Flush the new key cache for this file. This is needed to ensure that there is no old blocks (with outdated data) left in the new key cache from an earlier assign_to_keycache operation (This can never fail as there is never any not written data in the new key cache) */ (void) flush_key_blocks(key_cache, share->kfile, FLUSH_RELEASE); /* ensure that setting the key cache and changing the multi_key_cache is done atomicly */ pthread_mutex_lock(&share->intern_lock); /* Tell all threads to use the new key cache This should be seen at the lastes for the next call to an myisam function. */ share->key_cache= key_cache; /* store the key cache in the global hash structure for future opens */ if (multi_key_cache_set(share->unique_file_name, share->unique_name_length, share->key_cache)) error= my_errno; pthread_mutex_unlock(&share->intern_lock); DBUG_RETURN(error); }
int mi_close(register MI_INFO *info) { int error=0,flag; MYISAM_SHARE *share=info->s; DBUG_ENTER("mi_close"); DBUG_PRINT("enter",("base: 0x%lx reopen: %u locks: %u", (long) info, (uint) share->reopen, (uint) share->tot_locks)); mysql_mutex_lock(&THR_LOCK_myisam); if (info->lock_type == F_EXTRA_LCK) info->lock_type=F_UNLCK; /* HA_EXTRA_NO_USER_CHANGE */ if (info->lock_type != F_UNLCK) { if (mi_lock_database(info,F_UNLCK)) error=my_errno; } mysql_mutex_lock(&share->intern_lock); if (share->options & HA_OPTION_READ_ONLY_DATA) { share->r_locks--; share->tot_locks--; } if (info->opt_flag & (READ_CACHE_USED | WRITE_CACHE_USED)) { if (end_io_cache(&info->rec_cache)) error=my_errno; info->opt_flag&= ~(READ_CACHE_USED | WRITE_CACHE_USED); } flag= !--share->reopen; myisam_open_list=list_delete(myisam_open_list,&info->open_list); mysql_mutex_unlock(&share->intern_lock); my_free(mi_get_rec_buff_ptr(info, info->rec_buff)); if (flag) { DBUG_EXECUTE_IF("crash_before_flush_keys", if (share->kfile >= 0) abort();); if (share->kfile >= 0 && flush_key_blocks(share->key_cache, share->kfile, share->temporary ? FLUSH_IGNORE_CHANGED : FLUSH_RELEASE)) error=my_errno; if (share->kfile >= 0) { /* If we are crashed, we can safely flush the current state as it will not change the crashed state. We can NOT write the state in other cases as other threads may be using the file at this point */ if (share->mode != O_RDONLY && mi_is_crashed(info)) mi_state_info_write(share->kfile, &share->state, 1); /* Decrement open count must be last I/O on this file. */ _mi_decrement_open_count(info); if (mysql_file_close(share->kfile, MYF(0))) error = my_errno; } #ifdef HAVE_MMAP if (share->file_map) _mi_unmap_file(info); #endif if (share->decode_trees) { my_free(share->decode_trees); my_free(share->decode_tables); } thr_lock_delete(&share->lock); mysql_mutex_destroy(&share->intern_lock); { int i,keys; keys = share->state.header.keys; mysql_rwlock_destroy(&share->mmap_lock); for(i=0; i<keys; i++) { mysql_rwlock_destroy(&share->key_root_lock[i]); } } my_free(info->s); }
int mi_panic(enum ha_panic_function flag) { int error=0; LIST *list_element,*next_open; MI_INFO *info; DBUG_ENTER("mi_panic"); mysql_mutex_lock(&THR_LOCK_myisam); for (list_element=myisam_open_list ; list_element ; list_element=next_open) { next_open=list_element->next; /* Save if close */ info=(MI_INFO*) list_element->data; switch (flag) { case HA_PANIC_CLOSE: mysql_mutex_unlock(&THR_LOCK_myisam); /* Not exactly right... */ if (mi_close(info)) error=my_errno; mysql_mutex_lock(&THR_LOCK_myisam); break; case HA_PANIC_WRITE: /* Do this to free databases */ #ifdef CANT_OPEN_FILES_TWICE if (info->s->options & HA_OPTION_READ_ONLY_DATA) break; #endif if (flush_key_blocks(info->s->key_cache, info->s->kfile, FLUSH_RELEASE)) error=my_errno; if (info->opt_flag & WRITE_CACHE_USED) if (flush_io_cache(&info->rec_cache)) error=my_errno; if (info->opt_flag & READ_CACHE_USED) { if (flush_io_cache(&info->rec_cache)) error=my_errno; reinit_io_cache(&info->rec_cache,READ_CACHE,0, (pbool) (info->lock_type != F_UNLCK),1); } if (info->lock_type != F_UNLCK && ! info->was_locked) { info->was_locked=info->lock_type; if (mi_lock_database(info,F_UNLCK)) error=my_errno; } #ifdef CANT_OPEN_FILES_TWICE if (info->s->kfile >= 0 && mysql_file_close(info->s->kfile, MYF(0))) error = my_errno; if (info->dfile >= 0 && mysql_file_close(info->dfile, MYF(0))) error = my_errno; info->s->kfile=info->dfile= -1; /* Files aren't open anymore */ break; #endif case HA_PANIC_READ: /* Restore to before WRITE */ #ifdef CANT_OPEN_FILES_TWICE { /* Open closed files */ char name_buff[FN_REFLEN]; if (info->s->kfile < 0) if ((info->s->kfile= mysql_file_open(mi_key_file_kfile, fn_format(name_buff, info->filename, "", N_NAME_IEXT, 4), info->mode, MYF(MY_WME))) < 0) error = my_errno; if (info->dfile < 0) { if ((info->dfile= mysql_file_open(mi_key_file_dfile, fn_format(name_buff, info->filename, "", N_NAME_DEXT, 4), info->mode, MYF(MY_WME))) < 0) error = my_errno; info->rec_cache.file=info->dfile; } } #endif if (info->was_locked) { if (mi_lock_database(info, info->was_locked)) error=my_errno; info->was_locked=0; } break; } } if (flag == HA_PANIC_CLOSE) { (void) mi_log(0); /* Close log if neaded */ ft_free_stopwords(); } mysql_mutex_unlock(&THR_LOCK_myisam); if (!error) DBUG_RETURN(0); DBUG_RETURN(my_errno=error); } /* mi_panic */
int mi_lock_database(MI_INFO *info, int lock_type) { int error; uint count; MYISAM_SHARE *share=info->s; uint flag; DBUG_ENTER("mi_lock_database"); DBUG_PRINT("info",("lock_type: %d", lock_type)); if (share->options & HA_OPTION_READ_ONLY_DATA || info->lock_type == lock_type) DBUG_RETURN(0); if (lock_type == F_EXTRA_LCK) { ++share->w_locks; ++share->tot_locks; info->lock_type= lock_type; DBUG_RETURN(0); } flag=error=0; pthread_mutex_lock(&share->intern_lock); if (share->kfile >= 0) /* May only be false on windows */ { switch (lock_type) { case F_UNLCK: DBUG_PRINT("info", ("old lock: %d", info->lock_type)); if (info->lock_type == F_RDLCK) count= --share->r_locks; else count= --share->w_locks; --share->tot_locks; if (info->lock_type == F_WRLCK && !share->w_locks && !share->delay_key_write && flush_key_blocks(share->kfile,FLUSH_KEEP)) { error=my_errno; mi_mark_crashed(info); /* Mark that table must be checked */ } if (info->opt_flag & (READ_CACHE_USED | WRITE_CACHE_USED)) { if (end_io_cache(&info->rec_cache)) { error=my_errno; mi_mark_crashed(info); } } if (!count) { DBUG_PRINT("info",("changed: %u w_locks: %u", (uint) share->changed, share->w_locks)); if (share->changed && !share->w_locks) { share->state.process= share->last_process=share->this_process; share->state.unique= info->last_unique= info->this_unique; share->state.update_count= info->last_loop= ++info->this_loop; if (mi_state_info_write(share->kfile, &share->state, 1)) error=my_errno; share->changed=0; if (myisam_flush) { #if defined(__WIN__) if (_commit(share->kfile)) error=errno; if (_commit(info->dfile)) error=errno; #elif defined(HAVE_FDATASYNC) if (fdatasync(share->kfile)) error=errno; if (fdatasync(share->dfile)) error=errno; #elif defined(HAVE_FSYNC) if (fsync(share->kfile)) error=errno; if (fsync(share->dfile)) error=errno; #endif } else share->not_flushed=1; if (error) mi_mark_crashed(info); } if (info->lock_type != F_EXTRA_LCK) { if (share->r_locks) { /* Only read locks left */ flag=1; if (my_lock(share->kfile,F_RDLCK,0L,F_TO_EOF, MYF(MY_WME | MY_SEEK_NOT_DONE)) && !error) error=my_errno; } else if (!share->w_locks) { /* No more locks */ flag=1; if (my_lock(share->kfile,F_UNLCK,0L,F_TO_EOF, MYF(MY_WME | MY_SEEK_NOT_DONE)) && !error) error=my_errno; } } } info->opt_flag&= ~(READ_CACHE_USED | WRITE_CACHE_USED); info->lock_type= F_UNLCK; break; case F_RDLCK: if (info->lock_type == F_WRLCK) { /* Change RW to READONLY */ if (share->w_locks == 1) { flag=1; if (my_lock(share->kfile,lock_type,0L,F_TO_EOF, MYF(MY_SEEK_NOT_DONE))) { error=my_errno; break; } } share->w_locks--; share->r_locks++; info->lock_type=lock_type; break; } if (!share->r_locks && !share->w_locks) { flag=1; if (my_lock(share->kfile,lock_type,0L,F_TO_EOF, info->lock_wait | MY_SEEK_NOT_DONE)) { error=my_errno; break; } if (mi_state_info_read_dsk(share->kfile, &share->state, 1)) { error=my_errno; VOID(my_lock(share->kfile,F_UNLCK,0L,F_TO_EOF,MYF(MY_SEEK_NOT_DONE))); my_errno=error; break; } } VOID(_mi_test_if_changed(info)); share->r_locks++; share->tot_locks++; info->lock_type=lock_type; break; case F_WRLCK: if (info->lock_type == F_RDLCK) { /* Change READONLY to RW */ if (share->r_locks == 1) { flag=1; if (my_lock(share->kfile,lock_type,0L,F_TO_EOF, MYF(info->lock_wait | MY_SEEK_NOT_DONE))) { error=my_errno; break; } share->r_locks--; share->w_locks++; info->lock_type=lock_type; break; } } if (!(share->options & HA_OPTION_READ_ONLY_DATA)) { if (!share->w_locks) { flag=1; if (my_lock(share->kfile,lock_type,0L,F_TO_EOF, info->lock_wait | MY_SEEK_NOT_DONE)) { error=my_errno; break; } if (!share->r_locks) { if (mi_state_info_read_dsk(share->kfile, &share->state, 1)) { error=my_errno; VOID(my_lock(share->kfile,F_UNLCK,0L,F_TO_EOF, info->lock_wait | MY_SEEK_NOT_DONE)); my_errno=error; break; } } } } VOID(_mi_test_if_changed(info)); info->lock_type=lock_type; info->invalidator=info->s->invalidator; share->w_locks++; share->tot_locks++; break; default: break; /* Impossible */ } } pthread_mutex_unlock(&share->intern_lock); #if defined(FULL_LOG) || defined(_lint) lock_type|=(int) (flag << 8); /* Set bit to set if real lock */ myisam_log_command(MI_LOG_LOCK,info,(byte*) &lock_type,sizeof(lock_type), error); #endif DBUG_RETURN(error); } /* mi_lock_database */