int mi_reset(MI_INFO *info) { int error= 0; MYISAM_SHARE *share=info->s; DBUG_ENTER("mi_reset"); /* Free buffers and reset the following flags: EXTRA_CACHE, EXTRA_WRITE_CACHE, EXTRA_KEYREAD, EXTRA_QUICK If the row buffer cache is large (for dynamic tables), reduce it to save memory. */ if (info->opt_flag & (READ_CACHE_USED | WRITE_CACHE_USED)) { info->opt_flag&= ~(READ_CACHE_USED | WRITE_CACHE_USED); error= end_io_cache(&info->rec_cache); } if (share->base.blobs) mi_alloc_rec_buff(info, -1, &info->rec_buff); #if defined(HAVE_MMAP) && defined(HAVE_MADVISE) if (info->opt_flag & MEMMAP_USED) madvise((char*) share->file_map, share->state.state.data_file_length, MADV_RANDOM); #endif info->opt_flag&= ~(KEY_READ_USED | REMEMBER_OLD_POS); info->quick_mode=0; info->lastinx= 0; /* Use first index as def */ info->last_search_keypage= info->lastpos= HA_OFFSET_ERROR; info->page_changed= 1; info->update= ((info->update & HA_STATE_CHANGED) | HA_STATE_NEXT_FOUND | HA_STATE_PREV_FOUND); DBUG_RETURN(error); }
void mi_update_status(void* param) { MI_INFO *info=(MI_INFO*) param; DBUG_ENTER("mi_update_status"); /* Because someone may have closed the table we point at, we only update the state if its our own state. This isn't a problem as we are always pointing at our own lock or at a read lock. (This is enforced by thr_multi_lock.c) */ if (info->state == &info->save_state) { DBUG_PRINT("info", ("updating status: key_file: %lu data_file: %lu rows: %lu", (ulong) info->state->key_file_length, (ulong) info->state->data_file_length, (ulong) info->state->records)); if (info->state->key_file_length < info->s->state.state.key_file_length || info->state->data_file_length < info->s->state.state.data_file_length) DBUG_PRINT("warning",("old info: key_file: %ld data_file: %ld", (long) info->s->state.state.key_file_length, (long) info->s->state.state.data_file_length)); info->s->state.state= *info->state; #ifdef HAVE_QUERY_CACHE DBUG_PRINT("info", ("invalidator... '%s' (status update)", info->filename)); DBUG_ASSERT(info->s->chst_invalidator != NULL); (*info->s->chst_invalidator)((const char *)info->filename); #endif } info->state= &info->s->state.state; info->append_insert_at_end= 0; /* We have to flush the write cache here as other threads may start reading the table before mi_lock_database() is called */ if (info->opt_flag & WRITE_CACHE_USED) { if (end_io_cache(&info->rec_cache)) { mi_print_error(info->s, HA_ERR_CRASHED); mi_mark_crashed(info); } info->opt_flag&= ~WRITE_CACHE_USED; } DBUG_VOID_RETURN; }
int maria_reset(MARIA_HA *info) { int error= 0; MARIA_SHARE *share= info->s; DBUG_ENTER("maria_reset"); /* Free buffers and reset the following flags: EXTRA_CACHE, EXTRA_WRITE_CACHE, EXTRA_KEYREAD, EXTRA_QUICK If the row buffer cache is large (for dynamic tables), reduce it to save memory. */ if (info->opt_flag & (READ_CACHE_USED | WRITE_CACHE_USED)) { info->opt_flag&= ~(READ_CACHE_USED | WRITE_CACHE_USED); error= end_io_cache(&info->rec_cache); } /* Free memory used for keeping blobs */ if (share->base.blobs) { if (info->rec_buff_size > share->base.default_rec_buff_size) { info->rec_buff_size= 1; /* Force realloc */ _ma_alloc_buffer(&info->rec_buff, &info->rec_buff_size, share->base.default_rec_buff_size); } if (info->blob_buff_size > MARIA_SMALL_BLOB_BUFFER) { info->blob_buff_size= 1; /* Force realloc */ _ma_alloc_buffer(&info->blob_buff, &info->blob_buff_size, MARIA_SMALL_BLOB_BUFFER); } } #if defined(HAVE_MMAP) && defined(HAVE_MADVISE) if (info->opt_flag & MEMMAP_USED) madvise((char*) share->file_map, share->state.state.data_file_length, MADV_RANDOM); #endif info->opt_flag&= ~(KEY_READ_USED | REMEMBER_OLD_POS); info->quick_mode= 0; info->lastinx= ~0; /* detect index changes */ info->last_search_keypage= info->cur_row.lastpos= HA_OFFSET_ERROR; info->page_changed= 1; info->update= ((info->update & HA_STATE_CHANGED) | HA_STATE_NEXT_FOUND | HA_STATE_PREV_FOUND); DBUG_RETURN(error); }
void mi_update_status(void* param) { MI_INFO *info=(MI_INFO*) param; /* Because someone may have closed the table we point at, we only update the state if its our own state. This isn't a problem as we are always pointing at our own lock or at a read lock. (This is enforced by thr_multi_lock.c) */ if (info->state == &info->save_state) { #ifndef DBUG_OFF DBUG_PRINT("info",("updating status: key_file: %ld data_file: %ld", (long) info->state->key_file_length, (long) info->state->data_file_length)); if (info->state->key_file_length < info->s->state.state.key_file_length || info->state->data_file_length < info->s->state.state.data_file_length) DBUG_PRINT("warning",("old info: key_file: %ld data_file: %ld", (long) info->s->state.state.key_file_length, (long) info->s->state.state.data_file_length)); #endif info->s->state.state= *info->state; } info->state= &info->s->state.state; info->append_insert_at_end= 0; /* We have to flush the write cache here as other threads may start reading the table before mi_lock_database() is called */ if (info->opt_flag & WRITE_CACHE_USED) { if (end_io_cache(&info->rec_cache)) { mi_print_error(info->s, HA_ERR_CRASHED); mi_mark_crashed(info); } info->opt_flag&= ~WRITE_CACHE_USED; } }
int mi_extra(MI_INFO *info, enum ha_extra_function function, void *extra_arg) { int error=0; ulong cache_size; MYISAM_SHARE *share=info->s; DBUG_ENTER("mi_extra"); DBUG_PRINT("enter",("function: %d",(int) function)); switch (function) { case HA_EXTRA_RESET_STATE: /* Reset state (don't free buffers) */ info->lastinx= 0; /* Use first index as def */ info->last_search_keypage=info->lastpos= HA_OFFSET_ERROR; info->page_changed=1; /* Next/prev gives first/last */ if (info->opt_flag & READ_CACHE_USED) { reinit_io_cache(&info->rec_cache,READ_CACHE,0, (pbool) (info->lock_type != F_UNLCK), (pbool) test(info->update & HA_STATE_ROW_CHANGED) ); } info->update= ((info->update & HA_STATE_CHANGED) | HA_STATE_NEXT_FOUND | HA_STATE_PREV_FOUND); break; case HA_EXTRA_CACHE: if (info->lock_type == F_UNLCK && (share->options & HA_OPTION_PACK_RECORD)) { error=1; /* Not possibly if not locked */ my_errno=EACCES; break; } if (info->s->file_map) /* Don't use cache if mmap */ break; #if defined(HAVE_MMAP) && defined(HAVE_MADVISE) if ((share->options & HA_OPTION_COMPRESS_RECORD)) { mysql_mutex_lock(&share->intern_lock); if (_mi_memmap_file(info)) { /* We don't nead MADV_SEQUENTIAL if small file */ madvise((char*) share->file_map, share->state.state.data_file_length, share->state.state.data_file_length <= RECORD_CACHE_SIZE*16 ? MADV_RANDOM : MADV_SEQUENTIAL); mysql_mutex_unlock(&share->intern_lock); break; } mysql_mutex_unlock(&share->intern_lock); } #endif if (info->opt_flag & WRITE_CACHE_USED) { info->opt_flag&= ~WRITE_CACHE_USED; if ((error=end_io_cache(&info->rec_cache))) break; } if (!(info->opt_flag & (READ_CACHE_USED | WRITE_CACHE_USED | MEMMAP_USED))) { cache_size= (extra_arg ? *(ulong*) extra_arg : my_default_record_cache_size); if (!(init_io_cache(&info->rec_cache,info->dfile, (uint) min(info->state->data_file_length+1, cache_size), READ_CACHE,0L,(pbool) (info->lock_type != F_UNLCK), MYF(share->write_flag & MY_WAIT_IF_FULL)))) { info->opt_flag|=READ_CACHE_USED; info->update&= ~HA_STATE_ROW_CHANGED; } if (share->concurrent_insert) info->rec_cache.end_of_file=info->state->data_file_length; } break; case HA_EXTRA_REINIT_CACHE: if (info->opt_flag & READ_CACHE_USED) { reinit_io_cache(&info->rec_cache,READ_CACHE,info->nextpos, (pbool) (info->lock_type != F_UNLCK), (pbool) test(info->update & HA_STATE_ROW_CHANGED)); info->update&= ~HA_STATE_ROW_CHANGED; if (share->concurrent_insert) info->rec_cache.end_of_file=info->state->data_file_length; } break; case HA_EXTRA_WRITE_CACHE: if (info->lock_type == F_UNLCK) { error=1; /* Not possibly if not locked */ break; } cache_size= (extra_arg ? *(ulong*) extra_arg : my_default_record_cache_size); if (!(info->opt_flag & (READ_CACHE_USED | WRITE_CACHE_USED | OPT_NO_ROWS)) && !share->state.header.uniques) if (!(init_io_cache(&info->rec_cache,info->dfile, cache_size, WRITE_CACHE,info->state->data_file_length, (pbool) (info->lock_type != F_UNLCK), MYF(share->write_flag & MY_WAIT_IF_FULL)))) { info->opt_flag|=WRITE_CACHE_USED; info->update&= ~(HA_STATE_ROW_CHANGED | HA_STATE_WRITE_AT_END | HA_STATE_EXTEND_BLOCK); } break; case HA_EXTRA_PREPARE_FOR_UPDATE: if (info->s->data_file_type != DYNAMIC_RECORD) break; /* Remove read/write cache if dynamic rows */ case HA_EXTRA_NO_CACHE: if (info->opt_flag & (READ_CACHE_USED | WRITE_CACHE_USED)) { info->opt_flag&= ~(READ_CACHE_USED | WRITE_CACHE_USED); error=end_io_cache(&info->rec_cache); /* Sergei will insert full text index caching here */ } #if defined(HAVE_MMAP) && defined(HAVE_MADVISE) if (info->opt_flag & MEMMAP_USED) madvise((char*) share->file_map, share->state.state.data_file_length, MADV_RANDOM); #endif break; case HA_EXTRA_FLUSH_CACHE: if (info->opt_flag & WRITE_CACHE_USED) { if ((error=flush_io_cache(&info->rec_cache))) { mi_print_error(info->s, HA_ERR_CRASHED); mi_mark_crashed(info); /* Fatal error found */ } } break; case HA_EXTRA_NO_READCHECK: info->opt_flag&= ~READ_CHECK_USED; /* No readcheck */ break; case HA_EXTRA_READCHECK: info->opt_flag|= READ_CHECK_USED; break; case HA_EXTRA_KEYREAD: /* Read only keys to record */ case HA_EXTRA_REMEMBER_POS: info->opt_flag |= REMEMBER_OLD_POS; bmove((uchar*) info->lastkey+share->base.max_key_length*2, (uchar*) info->lastkey,info->lastkey_length); info->save_update= info->update; info->save_lastinx= info->lastinx; info->save_lastpos= info->lastpos; info->save_lastkey_length=info->lastkey_length; if (function == HA_EXTRA_REMEMBER_POS) break; /* fall through */ case HA_EXTRA_KEYREAD_CHANGE_POS: info->opt_flag |= KEY_READ_USED; info->read_record=_mi_read_key_record; break; case HA_EXTRA_NO_KEYREAD: case HA_EXTRA_RESTORE_POS: if (info->opt_flag & REMEMBER_OLD_POS) { bmove((uchar*) info->lastkey, (uchar*) info->lastkey+share->base.max_key_length*2, info->save_lastkey_length); info->update= info->save_update | HA_STATE_WRITTEN; info->lastinx= info->save_lastinx; info->lastpos= info->save_lastpos; info->lastkey_length=info->save_lastkey_length; } info->read_record= share->read_record; info->opt_flag&= ~(KEY_READ_USED | REMEMBER_OLD_POS); break; case HA_EXTRA_NO_USER_CHANGE: /* Database is somehow locked agains changes */ info->lock_type= F_EXTRA_LCK; /* Simulate as locked */ break; case HA_EXTRA_WAIT_LOCK: info->lock_wait=0; break; case HA_EXTRA_NO_WAIT_LOCK: info->lock_wait=MY_DONT_WAIT; break; case HA_EXTRA_NO_KEYS: if (info->lock_type == F_UNLCK) { error=1; /* Not possibly if not lock */ break; } if (mi_is_any_key_active(share->state.key_map)) { MI_KEYDEF *key=share->keyinfo; uint i; for (i=0 ; i < share->base.keys ; i++,key++) { if (!(key->flag & HA_NOSAME) && info->s->base.auto_key != i+1) { mi_clear_key_active(share->state.key_map, i); info->update|= HA_STATE_CHANGED; } } if (!share->changed) { share->state.changed|= STATE_CHANGED | STATE_NOT_ANALYZED; share->changed=1; /* Update on close */ if (!share->global_changed) { share->global_changed=1; share->state.open_count++; } } share->state.state= *info->state; error=mi_state_info_write(share->kfile,&share->state,1 | 2); } break; case HA_EXTRA_FORCE_REOPEN: mysql_mutex_lock(&THR_LOCK_myisam); share->last_version= 0L; /* Impossible version */ mysql_mutex_unlock(&THR_LOCK_myisam); break; case HA_EXTRA_PREPARE_FOR_DROP: mysql_mutex_lock(&THR_LOCK_myisam); share->last_version= 0L; /* Impossible version */ #ifdef __WIN__REMOVE_OBSOLETE_WORKAROUND /* Close the isam and data files as Win32 can't drop an open table */ mysql_mutex_lock(&share->intern_lock); if (flush_key_blocks(share->key_cache, share->kfile, (function == HA_EXTRA_FORCE_REOPEN ? FLUSH_RELEASE : FLUSH_IGNORE_CHANGED))) { error=my_errno; share->changed=1; mi_print_error(info->s, HA_ERR_CRASHED); mi_mark_crashed(info); /* Fatal error found */ } if (info->opt_flag & (READ_CACHE_USED | WRITE_CACHE_USED)) { info->opt_flag&= ~(READ_CACHE_USED | WRITE_CACHE_USED); error=end_io_cache(&info->rec_cache); } if (info->lock_type != F_UNLCK && ! info->was_locked) { info->was_locked=info->lock_type; if (mi_lock_database(info,F_UNLCK)) error=my_errno; info->lock_type = F_UNLCK; } if (share->kfile >= 0) _mi_decrement_open_count(info); if (share->kfile >= 0 && mysql_file_close(share->kfile, MYF(0))) error=my_errno; { LIST *list_element ; for (list_element=myisam_open_list ; list_element ; list_element=list_element->next) { MI_INFO *tmpinfo=(MI_INFO*) list_element->data; if (tmpinfo->s == info->s) { if (tmpinfo->dfile >= 0 && mysql_file_close(tmpinfo->dfile, MYF(0))) error = my_errno; tmpinfo->dfile= -1; } } } share->kfile= -1; /* Files aren't open anymore */ mysql_mutex_unlock(&share->intern_lock); #endif mysql_mutex_unlock(&THR_LOCK_myisam); break; case HA_EXTRA_FLUSH: if (!share->temporary) flush_key_blocks(share->key_cache, share->kfile, FLUSH_KEEP); #ifdef HAVE_PWRITE _mi_decrement_open_count(info); #endif if (share->not_flushed) { share->not_flushed=0; if (mysql_file_sync(share->kfile, MYF(0))) error= my_errno; if (mysql_file_sync(info->dfile, MYF(0))) error= my_errno; if (error) { share->changed=1; mi_print_error(info->s, HA_ERR_CRASHED); mi_mark_crashed(info); /* Fatal error found */ } } if (share->base.blobs) mi_alloc_rec_buff(info, -1, &info->rec_buff); break; case HA_EXTRA_NORMAL: /* Theese isn't in use */ info->quick_mode=0; break; case HA_EXTRA_QUICK: info->quick_mode=1; break; case HA_EXTRA_NO_ROWS: if (!share->state.header.uniques) info->opt_flag|= OPT_NO_ROWS; break; case HA_EXTRA_PRELOAD_BUFFER_SIZE: info->preload_buff_size= *((ulong *) extra_arg); break; case HA_EXTRA_CHANGE_KEY_TO_UNIQUE: case HA_EXTRA_CHANGE_KEY_TO_DUP: mi_extra_keyflag(info, function); break; case HA_EXTRA_MMAP: #ifdef HAVE_MMAP mysql_mutex_lock(&share->intern_lock); /* Memory map the data file if it is not already mapped. It is safe to memory map a file while other threads are using file I/O on it. Assigning a new address to a function pointer is an atomic operation. intern_lock prevents that two or more mappings are done at the same time. */ if (!share->file_map) { if (mi_dynmap_file(info, share->state.state.data_file_length)) { DBUG_PRINT("warning",("mmap failed: errno: %d",errno)); error= my_errno= errno; } } mysql_mutex_unlock(&share->intern_lock); #endif break; case HA_EXTRA_MARK_AS_LOG_TABLE: mysql_mutex_lock(&share->intern_lock); share->is_log_table= TRUE; mysql_mutex_unlock(&share->intern_lock); break; case HA_EXTRA_KEY_CACHE: case HA_EXTRA_NO_KEY_CACHE: default: break; } { char tmp[1]; tmp[0]=function; myisam_log_command(MI_LOG_EXTRA,info,(uchar*) tmp,1,error); } DBUG_RETURN(error); } /* mi_extra */
int mi_extra(MI_INFO * info, enum ha_extra_function function) { int error = 0; MYISAM_SHARE *share = info->s; DBUG_ENTER("mi_extra"); switch (function) { case HA_EXTRA_RESET: /* * Free buffers and reset the following flags: EXTRA_CACHE, * EXTRA_WRITE_CACHE, EXTRA_KEYREAD, EXTRA_QUICK */ if (info->opt_flag & (READ_CACHE_USED | WRITE_CACHE_USED)) { info->opt_flag &= ~(READ_CACHE_USED | WRITE_CACHE_USED); error = end_io_cache(&info->rec_cache); } #if defined(HAVE_MMAP) && defined(HAVE_MADVICE) if (info->opt_flag & MEMMAP_USED) madvise(share->file_map, share->state.state.data_file_length, MADV_RANDOM); #endif info->opt_flag &= ~(KEY_READ_USED | REMEMBER_OLD_POS); info->quick_mode = 0; /* Fall through */ case HA_EXTRA_RESET_STATE: /* Reset state (don't free buffers) */ info->lastinx = 0; /* Use first index as def */ info->last_search_keypage = info->lastpos = HA_OFFSET_ERROR; info->page_changed = 1; /* Next/prev gives first/last */ if (info->opt_flag & READ_CACHE_USED) { reinit_io_cache(&info->rec_cache, READ_CACHE, 0, (pbool) (info->lock_type != F_UNLCK), (pbool) test(info->update & HA_STATE_ROW_CHANGED) ); } info->update = ((info->update & HA_STATE_CHANGED) | HA_STATE_NEXT_FOUND | HA_STATE_PREV_FOUND); break; case HA_EXTRA_CACHE: if (info->lock_type == F_UNLCK && (share->options & HA_OPTION_PACK_RECORD)) { error = 1; /* Not possibly if not locked */ my_errno = EACCES; break; } #if defined(HAVE_MMAP) && defined(HAVE_MADVICE) if ((share->options & HA_OPTION_COMPRESS_RECORD)) { pthread_mutex_lock(&share->intern_lock); if (_mi_memmap_file(info)) { /* * We don't nead MADV_SEQUENTIAL if small * file */ madvise(share->file_map, share->state.state.data_file_length, share->state.state.data_file_length <= RECORD_CACHE_SIZE * 16 ? MADV_RANDOM : MADV_SEQUENTIAL); pthread_mutex_unlock(&share->intern_lock); break; } pthread_mutex_unlock(&share->intern_lock); } #endif if (info->opt_flag & WRITE_CACHE_USED) { info->opt_flag &= ~WRITE_CACHE_USED; if ((error = end_io_cache(&info->rec_cache))) break; } if (!(info->opt_flag & (READ_CACHE_USED | WRITE_CACHE_USED | MEMMAP_USED))) { if (!(init_io_cache(&info->rec_cache, info->dfile, (uint) min(info->state->data_file_length + 1, my_default_record_cache_size), READ_CACHE, 0L, (pbool) (info->lock_type != F_UNLCK), MYF(share->write_flag & MY_WAIT_IF_FULL)))) { info->opt_flag |= READ_CACHE_USED; info->update &= ~HA_STATE_ROW_CHANGED; } if (share->concurrent_insert) info->rec_cache.end_of_file = info->state->data_file_length; } break; case HA_EXTRA_REINIT_CACHE: if (info->opt_flag & READ_CACHE_USED) { reinit_io_cache(&info->rec_cache, READ_CACHE, info->nextpos, (pbool) (info->lock_type != F_UNLCK), (pbool) test(info->update & HA_STATE_ROW_CHANGED)); info->update &= ~HA_STATE_ROW_CHANGED; if (share->concurrent_insert) info->rec_cache.end_of_file = info->state->data_file_length; } break; case HA_EXTRA_WRITE_CACHE: if (info->lock_type == F_UNLCK) { error = 1; /* Not possibly if not locked */ break; } if (!(info->opt_flag & (READ_CACHE_USED | WRITE_CACHE_USED | OPT_NO_ROWS)) && !share->state.header.uniques) if (!(init_io_cache(&info->rec_cache, info->dfile, 0, WRITE_CACHE, info->state->data_file_length, (pbool) (info->lock_type != F_UNLCK), MYF(share->write_flag & MY_WAIT_IF_FULL)))) { info->opt_flag |= WRITE_CACHE_USED; info->update &= ~(HA_STATE_ROW_CHANGED | HA_STATE_WRITE_AT_END | HA_STATE_EXTEND_BLOCK); } break; case HA_EXTRA_NO_CACHE: if (info->opt_flag & (READ_CACHE_USED | WRITE_CACHE_USED)) { info->opt_flag &= ~(READ_CACHE_USED | WRITE_CACHE_USED); error = end_io_cache(&info->rec_cache); /* Sergei will insert full text index caching here */ } #if defined(HAVE_MMAP) && defined(HAVE_MADVICE) if (info->opt_flag & MEMMAP_USED) madvise(share->file_map, share->state.state.data_file_length, MADV_RANDOM); #endif break; case HA_EXTRA_FLUSH_CACHE: if (info->opt_flag & WRITE_CACHE_USED) { if ((error = flush_io_cache(&info->rec_cache))) mi_mark_crashed(info); /* Fatal error found */ } break; case HA_EXTRA_NO_READCHECK: info->opt_flag &= ~READ_CHECK_USED; /* No readcheck */ break; case HA_EXTRA_READCHECK: info->opt_flag |= READ_CHECK_USED; break; case HA_EXTRA_KEYREAD: /* Read only keys to record */ case HA_EXTRA_REMEMBER_POS: info->opt_flag |= REMEMBER_OLD_POS; bmove((byte *) info->lastkey + share->base.max_key_length * 2, (byte *) info->lastkey, info->lastkey_length); info->save_update = info->update; info->save_lastinx = info->lastinx; info->save_lastpos = info->lastpos; info->save_lastkey_length = info->lastkey_length; if (function == HA_EXTRA_REMEMBER_POS) break; /* fall through */ case HA_EXTRA_KEYREAD_CHANGE_POS: info->opt_flag |= KEY_READ_USED; info->read_record = _mi_read_key_record; break; case HA_EXTRA_NO_KEYREAD: case HA_EXTRA_RESTORE_POS: if (info->opt_flag & REMEMBER_OLD_POS) { bmove((byte *) info->lastkey, (byte *) info->lastkey + share->base.max_key_length * 2, info->save_lastkey_length); info->update = info->save_update | HA_STATE_WRITTEN; info->lastinx = info->save_lastinx; info->lastpos = info->save_lastpos; info->lastkey_length = info->save_lastkey_length; } info->read_record = share->read_record; info->opt_flag &= ~(KEY_READ_USED | REMEMBER_OLD_POS); break; case HA_EXTRA_NO_USER_CHANGE: /* Database is somehow locked agains * changes */ info->lock_type = F_EXTRA_LCK; /* Simulate as locked */ break; case HA_EXTRA_WAIT_LOCK: info->lock_wait = 0; break; case HA_EXTRA_NO_WAIT_LOCK: info->lock_wait = MY_DONT_WAIT; break; case HA_EXTRA_NO_KEYS: if (info->lock_type == F_UNLCK) { error = 1; /* Not possibly if not lock */ break; } if (share->state.key_map) { share->state.key_map = 0; info->state->key_file_length = share->state.state.key_file_length = share->base.keystart; if (!share->changed) { share->state.changed |= STATE_CHANGED | STATE_NOT_ANALYZED; share->changed = 1; /* Update on close */ if (!share->global_changed) { share->global_changed = 1; share->state.open_count++; } } share->state.state = *info->state; error = mi_state_info_write(share->kfile, &share->state, 1 | 2); } break; case HA_EXTRA_FORCE_REOPEN: pthread_mutex_lock(&THR_LOCK_myisam); share->last_version = 0L; /* Impossible version */ #ifdef __WIN__ /* * Close the isam and data files as Win32 can't drop an open * table */ pthread_mutex_lock(&share->intern_lock); if (flush_key_blocks(share->kfile, FLUSH_RELEASE)) { error = my_errno; share->changed = 1; mi_mark_crashed(info); /* Fatal error found */ } if (info->opt_flag & (READ_CACHE_USED | WRITE_CACHE_USED)) { info->opt_flag &= ~(READ_CACHE_USED | WRITE_CACHE_USED); error = end_io_cache(&info->rec_cache); } if (info->lock_type != F_UNLCK && !info->was_locked) { info->was_locked = info->lock_type; if (mi_lock_database(info, F_UNLCK)) error = my_errno; info->lock_type = F_UNLCK; } if (share->kfile >= 0) _mi_decrement_open_count(info); if (share->kfile >= 0 && my_close(share->kfile, MYF(0))) error = my_errno; { LIST *list_element; for (list_element = myisam_open_list; list_element; list_element = list_element->next) { MI_INFO *tmpinfo = (MI_INFO *) list_element->data; if (tmpinfo->s == info->s) { if (tmpinfo->dfile >= 0 && my_close(tmpinfo->dfile, MYF(0))) error = my_errno; tmpinfo->dfile = -1; } } } share->kfile = -1; /* Files aren't open anymore */ pthread_mutex_unlock(&share->intern_lock); #endif pthread_mutex_unlock(&THR_LOCK_myisam); break; case HA_EXTRA_FLUSH: if (!share->temporary) flush_key_blocks(share->kfile, FLUSH_KEEP); #ifdef HAVE_PWRITE _mi_decrement_open_count(info); #endif if (share->not_flushed) { share->not_flushed = 0; #if defined(__WIN__) if (_commit(share->kfile)) error = errno; if (_commit(info->dfile)) error = errno; #elif defined(HAVE_FDATASYNC) if (fdatasync(share->kfile)) error = errno; if (fdatasync(share->dfile)) error = errno; #elif defined(HAVE_FSYNC) if (fsync(share->kfile)) error = errno; if (fsync(share->dfile)) error = errno; #endif if (error) { share->changed = 1; mi_mark_crashed(info); /* Fatal error found */ } } if (share->base.blobs) { my_free(info->rec_alloc, MYF(MY_ALLOW_ZERO_PTR)); info->rec_alloc = info->rec_buff = 0; mi_fix_rec_buff_for_blob(info, info->s->base.pack_reclength); } break; case HA_EXTRA_NORMAL: /* Theese isn't in use */ info->quick_mode = 0; break; case HA_EXTRA_QUICK: info->quick_mode = 1; break; case HA_EXTRA_NO_ROWS: if (!share->state.header.uniques) info->opt_flag |= OPT_NO_ROWS; break; case HA_EXTRA_KEY_CACHE: case HA_EXTRA_NO_KEY_CACHE: default: break; } { char tmp [1]; tmp[0] = function; myisam_log_command(MI_LOG_EXTRA, info, (byte *) tmp, 1, error); } DBUG_RETURN(error); } /* mi_extra */
int maria_extra(MARIA_HA *info, enum ha_extra_function function, void *extra_arg) { int error= 0; ulong cache_size; MARIA_SHARE *share= info->s; my_bool block_records= share->data_file_type == BLOCK_RECORD; DBUG_ENTER("maria_extra"); DBUG_PRINT("enter",("function: %d",(int) function)); switch (function) { case HA_EXTRA_RESET_STATE: /* Reset state (don't free buffers) */ info->lastinx= ~0; /* Detect index changes */ info->last_search_keypage= info->cur_row.lastpos= HA_OFFSET_ERROR; info->page_changed= 1; /* Next/prev gives first/last */ if (info->opt_flag & READ_CACHE_USED) { reinit_io_cache(&info->rec_cache,READ_CACHE,0, (pbool) (info->lock_type != F_UNLCK), (pbool) MY_TEST(info->update & HA_STATE_ROW_CHANGED) ); } info->update= ((info->update & HA_STATE_CHANGED) | HA_STATE_NEXT_FOUND | HA_STATE_PREV_FOUND); break; case HA_EXTRA_CACHE: if (block_records) break; /* Not supported */ if (info->lock_type == F_UNLCK && (share->options & HA_OPTION_PACK_RECORD)) { error= 1; /* Not possibly if not locked */ my_errno= EACCES; break; } if (info->s->file_map) /* Don't use cache if mmap */ break; #if defined(HAVE_MMAP) && defined(HAVE_MADVISE) if ((share->options & HA_OPTION_COMPRESS_RECORD)) { mysql_mutex_lock(&share->intern_lock); if (_ma_memmap_file(info)) { /* We don't nead MADV_SEQUENTIAL if small file */ madvise((char*) share->file_map, share->state.state.data_file_length, share->state.state.data_file_length <= RECORD_CACHE_SIZE*16 ? MADV_RANDOM : MADV_SEQUENTIAL); mysql_mutex_unlock(&share->intern_lock); break; } mysql_mutex_unlock(&share->intern_lock); } #endif if (info->opt_flag & WRITE_CACHE_USED) { info->opt_flag&= ~WRITE_CACHE_USED; if ((error= end_io_cache(&info->rec_cache))) break; } if (!(info->opt_flag & (READ_CACHE_USED | WRITE_CACHE_USED | MEMMAP_USED))) { cache_size= (extra_arg ? *(ulong*) extra_arg : my_default_record_cache_size); if (!(init_io_cache(&info->rec_cache, info->dfile.file, (uint) MY_MIN(share->state.state.data_file_length+1, cache_size), READ_CACHE,0L,(pbool) (info->lock_type != F_UNLCK), MYF(share->write_flag & MY_WAIT_IF_FULL)))) { info->opt_flag|= READ_CACHE_USED; info->update&= ~HA_STATE_ROW_CHANGED; } if (share->non_transactional_concurrent_insert) info->rec_cache.end_of_file= info->state->data_file_length; } break; case HA_EXTRA_REINIT_CACHE: if (info->opt_flag & READ_CACHE_USED) { reinit_io_cache(&info->rec_cache, READ_CACHE, info->cur_row.nextpos, (pbool) (info->lock_type != F_UNLCK), (pbool) MY_TEST(info->update & HA_STATE_ROW_CHANGED)); info->update&= ~HA_STATE_ROW_CHANGED; if (share->non_transactional_concurrent_insert) info->rec_cache.end_of_file= info->state->data_file_length; } break; case HA_EXTRA_WRITE_CACHE: if (info->lock_type == F_UNLCK) { error= 1; /* Not possibly if not locked */ break; } if (block_records) break; /* Not supported */ cache_size= (extra_arg ? *(ulong*) extra_arg : my_default_record_cache_size); if (!(info->opt_flag & (READ_CACHE_USED | WRITE_CACHE_USED | OPT_NO_ROWS)) && !share->state.header.uniques) if (!(init_io_cache(&info->rec_cache, info->dfile.file, cache_size, WRITE_CACHE, info->state->data_file_length, (pbool) (info->lock_type != F_UNLCK), MYF(share->write_flag & MY_WAIT_IF_FULL)))) { info->opt_flag|= WRITE_CACHE_USED; info->update&= ~(HA_STATE_ROW_CHANGED | HA_STATE_WRITE_AT_END | HA_STATE_EXTEND_BLOCK); } break; case HA_EXTRA_PREPARE_FOR_UPDATE: if (info->s->data_file_type != DYNAMIC_RECORD) break; /* Remove read/write cache if dynamic rows */ case HA_EXTRA_NO_CACHE: if (info->opt_flag & (READ_CACHE_USED | WRITE_CACHE_USED)) { info->opt_flag&= ~(READ_CACHE_USED | WRITE_CACHE_USED); error= end_io_cache(&info->rec_cache); /* Sergei will insert full text index caching here */ } #if defined(HAVE_MMAP) && defined(HAVE_MADVISE) if (info->opt_flag & MEMMAP_USED) madvise((char*) share->file_map, share->state.state.data_file_length, MADV_RANDOM); #endif break; case HA_EXTRA_FLUSH_CACHE: if (info->opt_flag & WRITE_CACHE_USED) { if ((error= flush_io_cache(&info->rec_cache))) { /* Fatal error found */ _ma_set_fatal_error(share, HA_ERR_CRASHED); } } break; case HA_EXTRA_NO_READCHECK: info->opt_flag&= ~READ_CHECK_USED; /* No readcheck */ break; case HA_EXTRA_READCHECK: info->opt_flag|= READ_CHECK_USED; break; case HA_EXTRA_KEYREAD: /* Read only keys to record */ case HA_EXTRA_REMEMBER_POS: info->opt_flag|= REMEMBER_OLD_POS; bmove(info->last_key.data + share->base.max_key_length*2, info->last_key.data, info->last_key.data_length + info->last_key.ref_length); info->save_update= info->update; info->save_lastinx= info->lastinx; info->save_lastpos= info->cur_row.lastpos; info->save_lastkey_data_length= info->last_key.data_length; info->save_lastkey_ref_length= info->last_key.ref_length; if (function == HA_EXTRA_REMEMBER_POS) break; /* fall through */ case HA_EXTRA_KEYREAD_CHANGE_POS: info->opt_flag|= KEY_READ_USED; info->read_record= _ma_read_key_record; break; case HA_EXTRA_NO_KEYREAD: case HA_EXTRA_RESTORE_POS: if (info->opt_flag & REMEMBER_OLD_POS) { bmove(info->last_key.data, info->last_key.data + share->base.max_key_length*2, info->save_lastkey_data_length + info->save_lastkey_ref_length); info->update= info->save_update | HA_STATE_WRITTEN; info->lastinx= info->save_lastinx; info->cur_row.lastpos= info->save_lastpos; info->last_key.data_length= info->save_lastkey_data_length; info->last_key.ref_length= info->save_lastkey_ref_length; info->last_key.flag= 0; } info->read_record= share->read_record; info->opt_flag&= ~(KEY_READ_USED | REMEMBER_OLD_POS); break; case HA_EXTRA_NO_USER_CHANGE: /* Database is somehow locked agains changes */ info->lock_type= F_EXTRA_LCK; /* Simulate as locked */ break; case HA_EXTRA_WAIT_LOCK: info->lock_wait= 0; break; case HA_EXTRA_NO_WAIT_LOCK: info->lock_wait= MY_SHORT_WAIT; break; case HA_EXTRA_NO_KEYS: /* we're going to modify pieces of the state, stall Checkpoint */ mysql_mutex_lock(&share->intern_lock); if (info->lock_type == F_UNLCK) { mysql_mutex_unlock(&share->intern_lock); error= 1; /* Not possibly if not lock */ break; } if (maria_is_any_key_active(share->state.key_map)) { MARIA_KEYDEF *key= share->keyinfo; uint i; for (i =0 ; i < share->base.keys ; i++,key++) { if (!(key->flag & HA_NOSAME) && info->s->base.auto_key != i+1) { maria_clear_key_active(share->state.key_map, i); info->update|= HA_STATE_CHANGED; } } if (!share->changed) { share->changed= 1; /* Update on close */ share->state.changed|= STATE_CHANGED | STATE_NOT_ANALYZED; if (!share->global_changed) { share->global_changed= 1; share->state.open_count++; } } if (!share->now_transactional) share->state.state= *info->state; /* That state write to disk must be done, even for transactional tables; indeed the table's share is going to be lost (there was a HA_EXTRA_FORCE_REOPEN before, which set share->last_version to 0), and so the only way it leaves information (share->state.key_map) for the posterity is by writing it to disk. */ DBUG_ASSERT(!maria_in_recovery); error= _ma_state_info_write(share, MA_STATE_INFO_WRITE_DONT_MOVE_OFFSET | MA_STATE_INFO_WRITE_FULL_INFO); } mysql_mutex_unlock(&share->intern_lock); break; case HA_EXTRA_FORCE_REOPEN: /* MySQL uses this case after it has closed all other instances of this table. We however do a flush here for additional safety. */ /** @todo consider porting these flush-es to MyISAM */ DBUG_ASSERT(share->reopen == 1); error= _ma_flush_table_files(info, MARIA_FLUSH_DATA | MARIA_FLUSH_INDEX, FLUSH_FORCE_WRITE, FLUSH_FORCE_WRITE); if (!error && share->changed) { mysql_mutex_lock(&share->intern_lock); error= _ma_state_info_write(share, MA_STATE_INFO_WRITE_DONT_MOVE_OFFSET| MA_STATE_INFO_WRITE_FULL_INFO); mysql_mutex_unlock(&share->intern_lock); } mysql_mutex_lock(&THR_LOCK_maria); mysql_mutex_lock(&share->intern_lock); /* protect against Checkpoint */ /* Safety against assert in checkpoint */ share->bitmap.changed_not_flushed= 0; /* this makes the share not be re-used next time the table is opened */ share->last_version= 0L; /* Impossible version */ mysql_mutex_unlock(&share->intern_lock); mysql_mutex_unlock(&THR_LOCK_maria); break; case HA_EXTRA_PREPARE_FOR_DROP: /* Signals about intent to delete this table */ share->deleting= TRUE; share->global_changed= FALSE; /* force writing changed flag */ /* To force repair if reopened */ share->state.open_count= 1; share->changed= 1; _ma_mark_file_changed_now(share); /* Fall trough */ case HA_EXTRA_PREPARE_FOR_RENAME: { my_bool do_flush= MY_TEST(function != HA_EXTRA_PREPARE_FOR_DROP); my_bool save_global_changed; enum flush_type type; /* This share, to have last_version=0, needs to save all its data/index blocks to disk if this is not for a DROP TABLE. Otherwise they would be invisible to future openers; and they could even go to disk late and cancel the work of future openers. */ if (info->lock_type != F_UNLCK && !info->was_locked) { info->was_locked= info->lock_type; if (maria_lock_database(info, F_UNLCK)) error= my_errno; info->lock_type= F_UNLCK; } /* We don't need to call _mi_decrement_open_count() if we are dropping the table, as the files will be removed anyway. If we are aborted before the files is removed, it's better to not call it as in that case the automatic repair on open will add the missing index entries */ mysql_mutex_lock(&share->intern_lock); if (share->kfile.file >= 0 && function != HA_EXTRA_PREPARE_FOR_DROP) _ma_decrement_open_count(info, 0); if (info->trn) { _ma_remove_table_from_trnman(share, info->trn); /* Ensure we don't point to the deleted data in trn */ info->state= info->state_start= &share->state.state; } /* Remove history for table */ _ma_reset_state(info); type= do_flush ? FLUSH_RELEASE : FLUSH_IGNORE_CHANGED; save_global_changed= share->global_changed; share->global_changed= 1; /* Don't increment open count */ mysql_mutex_unlock(&share->intern_lock); if (_ma_flush_table_files(info, MARIA_FLUSH_DATA | MARIA_FLUSH_INDEX, type, type)) { error=my_errno; share->changed= 1; } mysql_mutex_lock(&share->intern_lock); share->global_changed= save_global_changed; if (info->opt_flag & (READ_CACHE_USED | WRITE_CACHE_USED)) { info->opt_flag&= ~(READ_CACHE_USED | WRITE_CACHE_USED); if (end_io_cache(&info->rec_cache)) error= 1; } if (share->kfile.file >= 0) { if (do_flush) { /* Save the state so that others can find it from disk. */ if ((share->changed && _ma_state_info_write(share, MA_STATE_INFO_WRITE_DONT_MOVE_OFFSET | MA_STATE_INFO_WRITE_FULL_INFO)) || mysql_file_sync(share->kfile.file, MYF(0))) error= my_errno; } else { /* be sure that state is not tried for write as file may be closed */ share->changed= 0; share->global_changed= 0; share->state.open_count= 0; } } if (share->data_file_type == BLOCK_RECORD && share->bitmap.file.file >= 0) { DBUG_ASSERT(share->bitmap.non_flushable == 0 && share->bitmap.changed == 0); if (do_flush && my_sync(share->bitmap.file.file, MYF(0))) error= my_errno; share->bitmap.changed_not_flushed= 0; } /* last_version must be protected by intern_lock; See collect_tables() */ share->last_version= 0L; /* Impossible version */ mysql_mutex_unlock(&share->intern_lock); break; } case HA_EXTRA_PREPARE_FOR_FORCED_CLOSE: if (info->trn) { mysql_mutex_lock(&share->intern_lock); _ma_remove_table_from_trnman(share, info->trn); /* Ensure we don't point to the deleted data in trn */ info->state= info->state_start= &share->state.state; mysql_mutex_unlock(&share->intern_lock); } break; case HA_EXTRA_FLUSH: if (!share->temporary) error= _ma_flush_table_files(info, MARIA_FLUSH_DATA | MARIA_FLUSH_INDEX, FLUSH_KEEP, FLUSH_KEEP); _ma_decrement_open_count(info, 1); if (share->not_flushed) { share->not_flushed= 0; if (_ma_sync_table_files(info)) error= my_errno; if (error) { /* Fatal error found */ share->changed= 1; _ma_set_fatal_error(share, HA_ERR_CRASHED); } } break; case HA_EXTRA_NORMAL: /* Theese isn't in use */ info->quick_mode= 0; break; case HA_EXTRA_QUICK: info->quick_mode= 1; break; case HA_EXTRA_NO_ROWS: if (!share->state.header.uniques) info->opt_flag|= OPT_NO_ROWS; break; case HA_EXTRA_PRELOAD_BUFFER_SIZE: info->preload_buff_size= *((ulong *) extra_arg); break; case HA_EXTRA_CHANGE_KEY_TO_UNIQUE: case HA_EXTRA_CHANGE_KEY_TO_DUP: maria_extra_keyflag(info, function); break; case HA_EXTRA_MMAP: #ifdef HAVE_MMAP if (block_records) break; /* Not supported */ mysql_mutex_lock(&share->intern_lock); /* Memory map the data file if it is not already mapped. It is safe to memory map a file while other threads are using file I/O on it. Assigning a new address to a function pointer is an atomic operation. intern_lock prevents that two or more mappings are done at the same time. */ if (!share->file_map) { if (_ma_dynmap_file(info, share->state.state.data_file_length)) { DBUG_PRINT("warning",("mmap failed: errno: %d",errno)); error= my_errno= errno; } else { share->file_read= _ma_mmap_pread; share->file_write= _ma_mmap_pwrite; } } mysql_mutex_unlock(&share->intern_lock); #endif break; case HA_EXTRA_MARK_AS_LOG_TABLE: mysql_mutex_lock(&share->intern_lock); share->is_log_table= TRUE; mysql_mutex_unlock(&share->intern_lock); break; case HA_EXTRA_KEY_CACHE: case HA_EXTRA_NO_KEY_CACHE: default: break; } DBUG_RETURN(error); } /* maria_extra */
MYRG_INFO *myrg_open(const char *name, int mode, int handle_locking) { int save_errno,errpos=0; uint files= 0, i, dir_length, length, UNINIT_VAR(key_parts), min_keys= 0; ulonglong file_offset=0; char name_buff[FN_REFLEN*2],buff[FN_REFLEN],*end; MYRG_INFO *m_info=0; File fd; IO_CACHE file; MI_INFO *isam=0; uint found_merge_insert_method= 0; size_t name_buff_length; my_bool bad_children= FALSE; DBUG_ENTER("myrg_open"); memset(&file, 0, sizeof(file)); if ((fd= mysql_file_open(rg_key_file_MRG, fn_format(name_buff, name, "", MYRG_NAME_EXT, MY_UNPACK_FILENAME|MY_APPEND_EXT), O_RDONLY | O_SHARE, MYF(0))) < 0) goto err; errpos=1; if (init_io_cache(&file, fd, 4*IO_SIZE, READ_CACHE, 0, 0, MYF(MY_WME | MY_NABP))) goto err; errpos=2; dir_length=dirname_part(name_buff, name, &name_buff_length); while ((length=my_b_gets(&file,buff,FN_REFLEN-1))) { if ((end=buff+length)[-1] == '\n') end[-1]='\0'; if (buff[0] && buff[0] != '#') files++; } my_b_seek(&file, 0); while ((length=my_b_gets(&file,buff,FN_REFLEN-1))) { if ((end=buff+length)[-1] == '\n') *--end='\0'; if (!buff[0]) continue; /* Skip empty lines */ if (buff[0] == '#') { if (!strncmp(buff+1,"INSERT_METHOD=",14)) { /* Lookup insert method */ int tmp= find_type(buff + 15, &merge_insert_method, FIND_TYPE_BASIC); found_merge_insert_method = (uint) (tmp >= 0 ? tmp : 0); } continue; /* Skip comments */ } if (!has_path(buff)) { (void) strmake(name_buff+dir_length,buff, sizeof(name_buff)-1-dir_length); (void) cleanup_dirname(buff,name_buff); } else fn_format(buff, buff, "", "", 0); if (!(isam=mi_open(buff,mode,(handle_locking?HA_OPEN_WAIT_IF_LOCKED:0)))) { if (handle_locking & HA_OPEN_FOR_REPAIR) { myrg_print_wrong_table(buff); bad_children= TRUE; continue; } goto bad_children; } if (!m_info) /* First file */ { key_parts=isam->s->base.key_parts; if (!(m_info= (MYRG_INFO*) my_malloc(sizeof(MYRG_INFO) + files*sizeof(MYRG_TABLE) + key_parts*sizeof(long), MYF(MY_WME|MY_ZEROFILL)))) goto err; DBUG_ASSERT(files); m_info->open_tables=(MYRG_TABLE *) (m_info+1); m_info->rec_per_key_part=(ulong *) (m_info->open_tables+files); m_info->tables= files; files= 0; m_info->reclength=isam->s->base.reclength; min_keys= isam->s->base.keys; errpos=3; } m_info->open_tables[files].table= isam; m_info->open_tables[files].file_offset=(my_off_t) file_offset; file_offset+=isam->state->data_file_length; files++; if (m_info->reclength != isam->s->base.reclength) { if (handle_locking & HA_OPEN_FOR_REPAIR) { myrg_print_wrong_table(buff); bad_children= TRUE; continue; } goto bad_children; } m_info->options|= isam->s->options; m_info->records+= isam->state->records; m_info->del+= isam->state->del; m_info->data_file_length+= isam->state->data_file_length; if (min_keys > isam->s->base.keys) min_keys= isam->s->base.keys; for (i=0; i < key_parts; i++) m_info->rec_per_key_part[i]+= (isam->s->state.rec_per_key_part[i] / m_info->tables); } if (bad_children) goto bad_children; if (!m_info && !(m_info= (MYRG_INFO*) my_malloc(sizeof(MYRG_INFO), MYF(MY_WME | MY_ZEROFILL)))) goto err; /* Don't mark table readonly, for ALTER TABLE ... UNION=(...) to work */ m_info->options&= ~(HA_OPTION_COMPRESS_RECORD | HA_OPTION_READ_ONLY_DATA); m_info->merge_insert_method= found_merge_insert_method; if (sizeof(my_off_t) == 4 && file_offset > (ulonglong) (ulong) ~0L) { my_errno=HA_ERR_RECORD_FILE_FULL; goto err; } m_info->keys= min_keys; memset(&m_info->by_key, 0, sizeof(m_info->by_key)); /* this works ok if the table list is empty */ m_info->end_table=m_info->open_tables+files; m_info->last_used_table=m_info->open_tables; m_info->children_attached= TRUE; (void) mysql_file_close(fd, MYF(0)); end_io_cache(&file); mysql_mutex_init(rg_key_mutex_MYRG_INFO_mutex, &m_info->mutex, MY_MUTEX_INIT_FAST); m_info->open_list.data=(void*) m_info; mysql_mutex_lock(&THR_LOCK_open); myrg_open_list=list_add(myrg_open_list,&m_info->open_list); mysql_mutex_unlock(&THR_LOCK_open); DBUG_RETURN(m_info); bad_children: my_errno= HA_ERR_WRONG_MRG_TABLE_DEF; err: save_errno=my_errno; switch (errpos) { case 3: while (files) (void) mi_close(m_info->open_tables[--files].table); my_free(m_info); /* Fall through */ case 2: end_io_cache(&file); /* Fall through */ case 1: (void) mysql_file_close(fd, MYF(0)); } my_errno=save_errno; DBUG_RETURN (NULL); }
MYRG_INFO *myrg_parent_open(const char *parent_name, int (*callback)(void*, const char*), void *callback_param) { MYRG_INFO *UNINIT_VAR(m_info); int rc; int errpos; int save_errno; int insert_method; uint length; uint child_count; File fd; IO_CACHE file_cache; char parent_name_buff[FN_REFLEN * 2]; char child_name_buff[FN_REFLEN]; DBUG_ENTER("myrg_parent_open"); rc= 1; errpos= 0; memset(&file_cache, 0, sizeof(file_cache)); /* Open MERGE meta file. */ if ((fd= mysql_file_open(rg_key_file_MRG, fn_format(parent_name_buff, parent_name, "", MYRG_NAME_EXT, MY_UNPACK_FILENAME|MY_APPEND_EXT), O_RDONLY | O_SHARE, MYF(0))) < 0) goto err; /* purecov: inspected */ errpos= 1; if (init_io_cache(&file_cache, fd, 4 * IO_SIZE, READ_CACHE, 0, 0, MYF(MY_WME | MY_NABP))) goto err; /* purecov: inspected */ errpos= 2; /* Count children. Determine insert method. */ child_count= 0; insert_method= 0; while ((length= my_b_gets(&file_cache, child_name_buff, FN_REFLEN - 1))) { /* Remove line terminator. */ if (child_name_buff[length - 1] == '\n') child_name_buff[--length]= '\0'; /* Skip empty lines. */ if (!child_name_buff[0]) continue; /* purecov: inspected */ /* Skip comments, but evaluate insert method. */ if (child_name_buff[0] == '#') { if (!strncmp(child_name_buff + 1, "INSERT_METHOD=", 14)) { /* Compare buffer with global methods list: merge_insert_method. */ insert_method= find_type(child_name_buff + 15, &merge_insert_method, FIND_TYPE_BASIC); } continue; } /* Count the child. */ child_count++; } /* Allocate MERGE parent table structure. */ if (!(m_info= (MYRG_INFO*) my_malloc(sizeof(MYRG_INFO) + child_count * sizeof(MYRG_TABLE), MYF(MY_WME | MY_ZEROFILL)))) goto err; /* purecov: inspected */ errpos= 3; m_info->open_tables= (MYRG_TABLE*) (m_info + 1); m_info->tables= child_count; m_info->merge_insert_method= insert_method > 0 ? insert_method : 0; /* This works even if the table list is empty. */ m_info->end_table= m_info->open_tables + child_count; if (!child_count) { /* Do not attach/detach an empty child list. */ m_info->children_attached= TRUE; } /* Call callback for each child. */ my_b_seek(&file_cache, 0); while ((length= my_b_gets(&file_cache, child_name_buff, FN_REFLEN - 1))) { /* Remove line terminator. */ if (child_name_buff[length - 1] == '\n') child_name_buff[--length]= '\0'; /* Skip empty lines and comments. */ if (!child_name_buff[0] || (child_name_buff[0] == '#')) continue; DBUG_PRINT("info", ("child: '%s'", child_name_buff)); /* Callback registers child with handler table. */ if ((rc= (*callback)(callback_param, child_name_buff))) goto err; /* purecov: inspected */ } end_io_cache(&file_cache); (void) mysql_file_close(fd, MYF(0)); mysql_mutex_init(rg_key_mutex_MYRG_INFO_mutex, &m_info->mutex, MY_MUTEX_INIT_FAST); m_info->open_list.data= (void*) m_info; mysql_mutex_lock(&THR_LOCK_open); myrg_open_list= list_add(myrg_open_list, &m_info->open_list); mysql_mutex_unlock(&THR_LOCK_open); DBUG_RETURN(m_info); /* purecov: begin inspected */ err: save_errno= my_errno; switch (errpos) { case 3: my_free(m_info); /* Fall through */ case 2: end_io_cache(&file_cache); /* Fall through */ case 1: (void) mysql_file_close(fd, MYF(0)); } my_errno= save_errno; DBUG_RETURN (NULL); /* purecov: end */ }
int mi_close(register MI_INFO *info) { int error=0,flag; MYISAM_SHARE *share=info->s; DBUG_ENTER("mi_close"); DBUG_PRINT("enter",("base: 0x%lx reopen: %u locks: %u", (long) info, (uint) share->reopen, (uint) share->tot_locks)); pthread_mutex_lock(&THR_LOCK_myisam); if (info->lock_type == F_EXTRA_LCK) info->lock_type=F_UNLCK; /* HA_EXTRA_NO_USER_CHANGE */ if (share->reopen == 1 && share->kfile >= 0) _mi_decrement_open_count(info); if (info->lock_type != F_UNLCK) { if (mi_lock_database(info,F_UNLCK)) error=my_errno; } pthread_mutex_lock(&share->intern_lock); if (share->options & HA_OPTION_READ_ONLY_DATA) { share->r_locks--; share->tot_locks--; } if (info->opt_flag & (READ_CACHE_USED | WRITE_CACHE_USED)) { if (end_io_cache(&info->rec_cache)) error=my_errno; info->opt_flag&= ~(READ_CACHE_USED | WRITE_CACHE_USED); } flag= !--share->reopen; myisam_open_list=list_delete(myisam_open_list,&info->open_list); pthread_mutex_unlock(&share->intern_lock); my_free(mi_get_rec_buff_ptr(info, info->rec_buff), MYF(MY_ALLOW_ZERO_PTR)); if (flag) { if (share->kfile >= 0 && flush_key_blocks(share->key_cache, share->kfile, share->temporary ? FLUSH_IGNORE_CHANGED : FLUSH_RELEASE)) error=my_errno; if (share->kfile >= 0) { /* If we are crashed, we can safely flush the current state as it will not change the crashed state. We can NOT write the state in other cases as other threads may be using the file at this point */ if (share->mode != O_RDONLY && mi_is_crashed(info)) mi_state_info_write(share->kfile, &share->state, 1); if (my_close(share->kfile,MYF(0))) error = my_errno; } #ifdef HAVE_MMAP if (share->file_map) _mi_unmap_file(info); #endif if (share->decode_trees) { my_free((uchar*) share->decode_trees,MYF(0)); my_free((uchar*) share->decode_tables,MYF(0)); } #ifdef THREAD thr_lock_delete(&share->lock); VOID(pthread_mutex_destroy(&share->intern_lock)); { int i,keys; keys = share->state.header.keys; VOID(rwlock_destroy(&share->mmap_lock)); for(i=0; i<keys; i++) { VOID(rwlock_destroy(&share->key_root_lock[i])); } } #endif my_free((uchar*) info->s,MYF(0)); } pthread_mutex_unlock(&THR_LOCK_myisam); if (info->ftparser_param) { my_free((uchar*)info->ftparser_param, MYF(0)); info->ftparser_param= 0; } if (info->dfile >= 0 && my_close(info->dfile,MYF(0))) error = my_errno; myisam_log_command(MI_LOG_CLOSE,info,NULL,0,error); my_free((uchar*) info,MYF(0)); if (error) { DBUG_RETURN(my_errno=error); } DBUG_RETURN(0); } /* mi_close */
int mi_lock_database(MI_INFO *info, int lock_type) { int error; uint count; MYISAM_SHARE *share=info->s; DBUG_ENTER("mi_lock_database"); DBUG_PRINT("enter",("lock_type: %d old lock %d r_locks: %u w_locks: %u " "global_changed: %d open_count: %u name: '%s'", lock_type, info->lock_type, share->r_locks, share->w_locks, share->global_changed, share->state.open_count, share->index_file_name)); if (share->options & HA_OPTION_READ_ONLY_DATA || info->lock_type == lock_type) DBUG_RETURN(0); if (lock_type == F_EXTRA_LCK) /* Used by TMP tables */ { ++share->w_locks; ++share->tot_locks; info->lock_type= lock_type; info->s->in_use= list_add(info->s->in_use, &info->in_use); DBUG_RETURN(0); } error= 0; mysql_mutex_lock(&share->intern_lock); if (share->kfile >= 0) /* May only be false on windows */ { switch (lock_type) { case F_UNLCK: ftparser_call_deinitializer(info); if (info->lock_type == F_RDLCK) count= --share->r_locks; else count= --share->w_locks; --share->tot_locks; if (info->lock_type == F_WRLCK && !share->w_locks && !share->delay_key_write && flush_key_blocks(share->key_cache, keycache_thread_var(), share->kfile,FLUSH_KEEP)) { error=my_errno(); mi_print_error(info->s, HA_ERR_CRASHED); mi_mark_crashed(info); /* Mark that table must be checked */ } if (info->opt_flag & (READ_CACHE_USED | WRITE_CACHE_USED)) { if (end_io_cache(&info->rec_cache)) { error=my_errno(); mi_print_error(info->s, HA_ERR_CRASHED); mi_mark_crashed(info); } } if (!count) { DBUG_PRINT("info",("changed: %u w_locks: %u", (uint) share->changed, share->w_locks)); if (share->changed && !share->w_locks) { if ((info->s->mmaped_length != info->s->state.state.data_file_length) && (info->s->nonmmaped_inserts > MAX_NONMAPPED_INSERTS)) { if (info->s->concurrent_insert) mysql_rwlock_wrlock(&info->s->mmap_lock); mi_remap_file(info, info->s->state.state.data_file_length); info->s->nonmmaped_inserts= 0; if (info->s->concurrent_insert) mysql_rwlock_unlock(&info->s->mmap_lock); } share->state.process= share->last_process=share->this_process; share->state.unique= info->last_unique= info->this_unique; share->state.update_count= info->last_loop= ++info->this_loop; if (mi_state_info_write(share->kfile, &share->state, 1)) error=my_errno(); share->changed=0; if (myisam_flush) { if (mysql_file_sync(share->kfile, MYF(0))) error= my_errno(); if (mysql_file_sync(info->dfile, MYF(0))) error= my_errno(); } else share->not_flushed=1; if (error) { mi_print_error(info->s, HA_ERR_CRASHED); mi_mark_crashed(info); } } if (info->lock_type != F_EXTRA_LCK) { if (share->r_locks) { /* Only read locks left */ if (my_lock(share->kfile,F_RDLCK,0L,F_TO_EOF, MYF(MY_WME | MY_SEEK_NOT_DONE)) && !error) error=my_errno(); } else if (!share->w_locks) { /* No more locks */ if (my_lock(share->kfile,F_UNLCK,0L,F_TO_EOF, MYF(MY_WME | MY_SEEK_NOT_DONE)) && !error) error=my_errno(); } } } info->opt_flag&= ~(READ_CACHE_USED | WRITE_CACHE_USED); info->lock_type= F_UNLCK; info->s->in_use= list_delete(info->s->in_use, &info->in_use); break; case F_RDLCK: if (info->lock_type == F_WRLCK) { /* Change RW to READONLY mysqld does not turn write locks to read locks, so we're never here in mysqld. */ if (share->w_locks == 1) { if (my_lock(share->kfile,lock_type,0L,F_TO_EOF, MYF(MY_SEEK_NOT_DONE))) { error=my_errno(); break; } } share->w_locks--; share->r_locks++; info->lock_type=lock_type; break; } if (!share->r_locks && !share->w_locks) { if (my_lock(share->kfile,lock_type,0L,F_TO_EOF, info->lock_wait | MY_SEEK_NOT_DONE)) { error=my_errno(); break; } if (mi_state_info_read_dsk(share->kfile, &share->state, 1)) { error=my_errno(); (void) my_lock(share->kfile,F_UNLCK,0L,F_TO_EOF,MYF(MY_SEEK_NOT_DONE)); set_my_errno(error); break; } } (void) _mi_test_if_changed(info); share->r_locks++; share->tot_locks++; info->lock_type=lock_type; info->s->in_use= list_add(info->s->in_use, &info->in_use); break; case F_WRLCK: if (info->lock_type == F_RDLCK) { /* Change READONLY to RW */ if (share->r_locks == 1) { if (my_lock(share->kfile,lock_type,0L,F_TO_EOF, MYF(info->lock_wait | MY_SEEK_NOT_DONE))) { error=my_errno(); break; } share->r_locks--; share->w_locks++; info->lock_type=lock_type; break; } } if (!(share->options & HA_OPTION_READ_ONLY_DATA)) { if (!share->w_locks) { if (my_lock(share->kfile,lock_type,0L,F_TO_EOF, info->lock_wait | MY_SEEK_NOT_DONE)) { error=my_errno(); break; } if (!share->r_locks) { if (mi_state_info_read_dsk(share->kfile, &share->state, 1)) { error=my_errno(); (void) my_lock(share->kfile,F_UNLCK,0L,F_TO_EOF, info->lock_wait | MY_SEEK_NOT_DONE); set_my_errno(error); break; } } } } (void) _mi_test_if_changed(info); info->lock_type=lock_type; info->invalidator=info->s->invalidator; share->w_locks++; share->tot_locks++; info->s->in_use= list_add(info->s->in_use, &info->in_use); break; default: break; /* Impossible */ } } #ifdef _WIN32 else { /* Check for bad file descriptors if this table is part of a merge union. Failing to capture this may cause a crash on windows if the table is renamed and later on referenced by the merge table. */ if( info->owned_by_merge && (info->s)->kfile < 0 ) { error = HA_ERR_NO_SUCH_TABLE; } } #endif mysql_mutex_unlock(&share->intern_lock); DBUG_RETURN(error); } /* mi_lock_database */
static int examine_log(my_string file_name, char **table_names) { uint command,result,files_open; ulong access_time,length; my_off_t filepos; int lock_command,mi_result; char isam_file_name[FN_REFLEN],llbuff[21],llbuff2[21]; uchar head[20]; gptr buff; struct test_if_open_param open_param; IO_CACHE cache; File file; FILE *write_file; enum ha_extra_function extra_command; TREE tree; struct file_info file_info,*curr_file_info; DBUG_ENTER("examine_log"); if ((file=my_open(file_name,O_RDONLY,MYF(MY_WME))) < 0) DBUG_RETURN(1); write_file=0; if (write_filename) { if (!(write_file=my_fopen(write_filename,O_WRONLY,MYF(MY_WME)))) { my_close(file,MYF(0)); DBUG_RETURN(1); } } init_io_cache(&cache,file,0,READ_CACHE,start_offset,0,MYF(0)); bzero((gptr) com_count,sizeof(com_count)); init_tree(&tree,0,0,sizeof(file_info),(qsort_cmp2) file_info_compare,1, (tree_element_free) file_info_free, NULL); VOID(init_key_cache(KEY_CACHE_SIZE)); files_open=0; access_time=0; while (access_time++ != number_of_commands && !my_b_read(&cache,(byte*) head,9)) { isamlog_filepos=my_b_tell(&cache)-9L; file_info.filenr= mi_uint2korr(head+1); isamlog_process=file_info.process=(long) mi_uint4korr(head+3); if (!opt_processes) file_info.process=0; result= mi_uint2korr(head+7); if ((curr_file_info=(struct file_info*) tree_search(&tree,&file_info))) { curr_file_info->accessed=access_time; if (update && curr_file_info->used && curr_file_info->closed) { if (reopen_closed_file(&tree,curr_file_info)) { command=sizeof(com_count)/sizeof(com_count[0][0])/3; result=0; goto com_err; } } } command=(uint) head[0]; if (command < sizeof(com_count)/sizeof(com_count[0][0])/3 && (!table_names[0] || (curr_file_info && curr_file_info->used))) { com_count[command][0]++; if (result) com_count[command][1]++; } switch ((enum myisam_log_commands) command) { case MI_LOG_OPEN: if (!table_names[0]) { com_count[command][0]--; /* Must be counted explicite */ if (result) com_count[command][1]--; } if (curr_file_info) printf("\nWarning: %s is opened with same process and filenumber\nMaybe you should use the -P option ?\n", curr_file_info->show_name); if (my_b_read(&cache,(byte*) head,2)) goto err; file_info.name=0; file_info.show_name=0; file_info.record=0; if (read_string(&cache,(gptr*) &file_info.name, (uint) mi_uint2korr(head))) goto err; { uint i; char *pos,*to; /* Fix if old DOS files to new format */ for (pos=file_info.name; (pos=strchr(pos,'\\')) ; pos++) *pos= '/'; pos=file_info.name; for (i=0 ; i < prefix_remove ; i++) { char *next; if (!(next=strchr(pos,'/'))) break; pos=next+1; } to=isam_file_name; if (filepath) to=convert_dirname(isam_file_name,filepath,NullS); strmov(to,pos); fn_ext(isam_file_name)[0]=0; /* Remove extension */ } open_param.name=file_info.name; open_param.max_id=0; VOID(tree_walk(&tree,(tree_walk_action) test_if_open,(void*) &open_param, left_root_right)); file_info.id=open_param.max_id+1; /* * In the line below +10 is added to accomodate '<' and '>' chars * plus '\0' at the end, so that there is place for 7 digits. * It is improbable that same table can have that many entries in * the table cache. * The additional space is needed for the sprintf commands two lines * below. */ file_info.show_name=my_memdup(isam_file_name, (uint) strlen(isam_file_name)+10, MYF(MY_WME)); if (file_info.id > 1) sprintf(strend(file_info.show_name),"<%d>",file_info.id); file_info.closed=1; file_info.accessed=access_time; file_info.used=1; if (table_names[0]) { char **name; file_info.used=0; for (name=table_names ; *name ; name++) { if (!strcmp(*name,isam_file_name)) file_info.used=1; /* Update/log only this */ } } if (update && file_info.used) { if (files_open >= max_files) { if (close_some_file(&tree)) goto com_err; files_open--; } if (!(file_info.isam= mi_open(isam_file_name,O_RDWR, HA_OPEN_WAIT_IF_LOCKED))) goto com_err; if (!(file_info.record=my_malloc(file_info.isam->s->base.reclength, MYF(MY_WME)))) goto end; files_open++; file_info.closed=0; if (opt_myisam_with_debug) file_info.isam->s->rnd= 0; else file_info.isam->s->rnd= isamlog_process; } VOID(tree_insert(&tree,(gptr) &file_info,0)); if (file_info.used) { if (verbose && !record_pos_file) printf_log("%s: open -> %d",file_info.show_name, file_info.filenr); com_count[command][0]++; if (result) com_count[command][1]++; } break; case MI_LOG_CLOSE: if (verbose && !record_pos_file && (!table_names[0] || (curr_file_info && curr_file_info->used))) printf_log("%s: %s -> %d",FILENAME(curr_file_info), command_name[command],result); if (curr_file_info) { if (!curr_file_info->closed) files_open--; VOID(tree_delete(&tree,(gptr) curr_file_info)); } break; case MI_LOG_EXTRA: if (my_b_read(&cache,(byte*) head,1)) goto err; extra_command=(enum ha_extra_function) head[0]; if (verbose && !record_pos_file && (!table_names[0] || (curr_file_info && curr_file_info->used))) printf_log("%s: %s(%d) -> %d",FILENAME(curr_file_info), command_name[command], (int) extra_command,result); if (update && curr_file_info && !curr_file_info->closed) { if (mi_extra(curr_file_info->isam, extra_command, 0) != (int) result) { fflush(stdout); VOID(fprintf(stderr, "Warning: error %d, expected %d on command %s at %s\n", my_errno,result,command_name[command], llstr(isamlog_filepos,llbuff))); fflush(stderr); } } break; case MI_LOG_DELETE: if (my_b_read(&cache,(byte*) head,8)) goto err; filepos=mi_sizekorr(head); if (verbose && (!record_pos_file || ((record_pos == filepos || record_pos == NO_FILEPOS) && !cmp_filename(curr_file_info,record_pos_file))) && (!table_names[0] || (curr_file_info && curr_file_info->used))) printf_log("%s: %s at %ld -> %d",FILENAME(curr_file_info), command_name[command],(long) filepos,result); if (update && curr_file_info && !curr_file_info->closed) { if (mi_rrnd(curr_file_info->isam,curr_file_info->record,filepos)) { if (!recover) goto com_err; if (verbose) printf_log("error: Didn't find row to delete with mi_rrnd"); com_count[command][2]++; /* Mark error */ } mi_result=mi_delete(curr_file_info->isam,curr_file_info->record); if ((mi_result == 0 && result) || (mi_result && (uint) my_errno != result)) { if (!recover) goto com_err; if (mi_result) com_count[command][2]++; /* Mark error */ if (verbose) printf_log("error: Got result %d from mi_delete instead of %d", mi_result, result); } } break; case MI_LOG_WRITE: case MI_LOG_UPDATE: if (my_b_read(&cache,(byte*) head,12)) goto err; filepos=mi_sizekorr(head); length=mi_uint4korr(head+8); buff=0; if (read_string(&cache,&buff,(uint) length)) goto err; if ((!record_pos_file || ((record_pos == filepos || record_pos == NO_FILEPOS) && !cmp_filename(curr_file_info,record_pos_file))) && (!table_names[0] || (curr_file_info && curr_file_info->used))) { if (write_file && (my_fwrite(write_file,buff,length,MYF(MY_WAIT_IF_FULL | MY_NABP)))) goto end; if (verbose) printf_log("%s: %s at %ld, length=%ld -> %d", FILENAME(curr_file_info), command_name[command], filepos,length,result); } if (update && curr_file_info && !curr_file_info->closed) { if (curr_file_info->isam->s->base.blobs) fix_blob_pointers(curr_file_info->isam,buff); if ((enum myisam_log_commands) command == MI_LOG_UPDATE) { if (mi_rrnd(curr_file_info->isam,curr_file_info->record,filepos)) { if (!recover) { result=0; goto com_err; } if (verbose) printf_log("error: Didn't find row to update with mi_rrnd"); if (recover == 1 || result || find_record_with_key(curr_file_info,buff)) { com_count[command][2]++; /* Mark error */ break; } } mi_result=mi_update(curr_file_info->isam,curr_file_info->record, buff); if ((mi_result == 0 && result) || (mi_result && (uint) my_errno != result)) { if (!recover) goto com_err; if (verbose) printf_log("error: Got result %d from mi_update instead of %d", mi_result, result); if (mi_result) com_count[command][2]++; /* Mark error */ } } else { mi_result=mi_write(curr_file_info->isam,buff); if ((mi_result == 0 && result) || (mi_result && (uint) my_errno != result)) { if (!recover) goto com_err; if (verbose) printf_log("error: Got result %d from mi_write instead of %d", mi_result, result); if (mi_result) com_count[command][2]++; /* Mark error */ } if (!recover && filepos != curr_file_info->isam->lastpos) { printf("error: Wrote at position: %s, should have been %s", llstr(curr_file_info->isam->lastpos,llbuff), llstr(filepos,llbuff2)); goto end; } } } my_free(buff,MYF(0)); break; case MI_LOG_LOCK: if (my_b_read(&cache,(byte*) head,sizeof(lock_command))) goto err; memcpy_fixed(&lock_command,head,sizeof(lock_command)); if (verbose && !record_pos_file && (!table_names[0] || (curr_file_info && curr_file_info->used))) printf_log("%s: %s(%d) -> %d\n",FILENAME(curr_file_info), command_name[command],lock_command,result); if (update && curr_file_info && !curr_file_info->closed) { if (mi_lock_database(curr_file_info->isam,lock_command) != (int) result) goto com_err; } break; case MI_LOG_DELETE_ALL: if (verbose && !record_pos_file && (!table_names[0] || (curr_file_info && curr_file_info->used))) printf_log("%s: %s -> %d\n",FILENAME(curr_file_info), command_name[command],result); break; default: fflush(stdout); VOID(fprintf(stderr, "Error: found unknown command %d in logfile, aborted\n", command)); fflush(stderr); goto end; } } end_key_cache(); delete_tree(&tree); VOID(end_io_cache(&cache)); VOID(my_close(file,MYF(0))); if (write_file && my_fclose(write_file,MYF(MY_WME))) DBUG_RETURN(1); DBUG_RETURN(0); err: fflush(stdout); VOID(fprintf(stderr,"Got error %d when reading from logfile\n",my_errno)); fflush(stderr); goto end; com_err: fflush(stdout); VOID(fprintf(stderr,"Got error %d, expected %d on command %s at %s\n", my_errno,result,command_name[command], llstr(isamlog_filepos,llbuff))); fflush(stderr); end: end_key_cache(); delete_tree(&tree); VOID(end_io_cache(&cache)); VOID(my_close(file,MYF(0))); if (write_file) VOID(my_fclose(write_file,MYF(MY_WME))); DBUG_RETURN(1); }
int mi_close(register MI_INFO *info) { int error=0,flag; MYISAM_SHARE *share=info->s; DBUG_ENTER("mi_close"); DBUG_PRINT("enter",("base: 0x%lx reopen: %u locks: %u", (long) info, (uint) share->reopen, (uint) share->tot_locks)); mysql_mutex_lock(&THR_LOCK_myisam); if (info->lock_type == F_EXTRA_LCK) info->lock_type=F_UNLCK; /* HA_EXTRA_NO_USER_CHANGE */ if (info->lock_type != F_UNLCK) { if (mi_lock_database(info,F_UNLCK)) error=my_errno; } mysql_mutex_lock(&share->intern_lock); if (share->options & HA_OPTION_READ_ONLY_DATA) { share->r_locks--; share->tot_locks--; } if (info->opt_flag & (READ_CACHE_USED | WRITE_CACHE_USED)) { if (end_io_cache(&info->rec_cache)) error=my_errno; info->opt_flag&= ~(READ_CACHE_USED | WRITE_CACHE_USED); } flag= !--share->reopen; myisam_open_list=list_delete(myisam_open_list,&info->open_list); mysql_mutex_unlock(&share->intern_lock); my_free(mi_get_rec_buff_ptr(info, info->rec_buff)); if (flag) { DBUG_EXECUTE_IF("crash_before_flush_keys", if (share->kfile >= 0) abort();); if (share->kfile >= 0 && flush_key_blocks(share->key_cache, share->kfile, share->temporary ? FLUSH_IGNORE_CHANGED : FLUSH_RELEASE)) error=my_errno; if (share->kfile >= 0) { /* If we are crashed, we can safely flush the current state as it will not change the crashed state. We can NOT write the state in other cases as other threads may be using the file at this point */ if (share->mode != O_RDONLY && mi_is_crashed(info)) mi_state_info_write(share->kfile, &share->state, 1); /* Decrement open count must be last I/O on this file. */ _mi_decrement_open_count(info); if (mysql_file_close(share->kfile, MYF(0))) error = my_errno; } #ifdef HAVE_MMAP if (share->file_map) _mi_unmap_file(info); #endif if (share->decode_trees) { my_free(share->decode_trees); my_free(share->decode_tables); } thr_lock_delete(&share->lock); mysql_mutex_destroy(&share->intern_lock); { int i,keys; keys = share->state.header.keys; mysql_rwlock_destroy(&share->mmap_lock); for(i=0; i<keys; i++) { mysql_rwlock_destroy(&share->key_root_lock[i]); } } my_free(info->s); }
int mi_lock_database(MI_INFO *info, int lock_type) { int error; uint count; MYISAM_SHARE *share=info->s; uint flag; DBUG_ENTER("mi_lock_database"); DBUG_PRINT("info",("lock_type: %d", lock_type)); if (share->options & HA_OPTION_READ_ONLY_DATA || info->lock_type == lock_type) DBUG_RETURN(0); if (lock_type == F_EXTRA_LCK) { ++share->w_locks; ++share->tot_locks; info->lock_type= lock_type; DBUG_RETURN(0); } flag=error=0; pthread_mutex_lock(&share->intern_lock); if (share->kfile >= 0) /* May only be false on windows */ { switch (lock_type) { case F_UNLCK: DBUG_PRINT("info", ("old lock: %d", info->lock_type)); if (info->lock_type == F_RDLCK) count= --share->r_locks; else count= --share->w_locks; --share->tot_locks; if (info->lock_type == F_WRLCK && !share->w_locks && !share->delay_key_write && flush_key_blocks(share->kfile,FLUSH_KEEP)) { error=my_errno; mi_mark_crashed(info); /* Mark that table must be checked */ } if (info->opt_flag & (READ_CACHE_USED | WRITE_CACHE_USED)) { if (end_io_cache(&info->rec_cache)) { error=my_errno; mi_mark_crashed(info); } } if (!count) { DBUG_PRINT("info",("changed: %u w_locks: %u", (uint) share->changed, share->w_locks)); if (share->changed && !share->w_locks) { share->state.process= share->last_process=share->this_process; share->state.unique= info->last_unique= info->this_unique; share->state.update_count= info->last_loop= ++info->this_loop; if (mi_state_info_write(share->kfile, &share->state, 1)) error=my_errno; share->changed=0; if (myisam_flush) { #if defined(__WIN__) if (_commit(share->kfile)) error=errno; if (_commit(info->dfile)) error=errno; #elif defined(HAVE_FDATASYNC) if (fdatasync(share->kfile)) error=errno; if (fdatasync(share->dfile)) error=errno; #elif defined(HAVE_FSYNC) if (fsync(share->kfile)) error=errno; if (fsync(share->dfile)) error=errno; #endif } else share->not_flushed=1; if (error) mi_mark_crashed(info); } if (info->lock_type != F_EXTRA_LCK) { if (share->r_locks) { /* Only read locks left */ flag=1; if (my_lock(share->kfile,F_RDLCK,0L,F_TO_EOF, MYF(MY_WME | MY_SEEK_NOT_DONE)) && !error) error=my_errno; } else if (!share->w_locks) { /* No more locks */ flag=1; if (my_lock(share->kfile,F_UNLCK,0L,F_TO_EOF, MYF(MY_WME | MY_SEEK_NOT_DONE)) && !error) error=my_errno; } } } info->opt_flag&= ~(READ_CACHE_USED | WRITE_CACHE_USED); info->lock_type= F_UNLCK; break; case F_RDLCK: if (info->lock_type == F_WRLCK) { /* Change RW to READONLY */ if (share->w_locks == 1) { flag=1; if (my_lock(share->kfile,lock_type,0L,F_TO_EOF, MYF(MY_SEEK_NOT_DONE))) { error=my_errno; break; } } share->w_locks--; share->r_locks++; info->lock_type=lock_type; break; } if (!share->r_locks && !share->w_locks) { flag=1; if (my_lock(share->kfile,lock_type,0L,F_TO_EOF, info->lock_wait | MY_SEEK_NOT_DONE)) { error=my_errno; break; } if (mi_state_info_read_dsk(share->kfile, &share->state, 1)) { error=my_errno; VOID(my_lock(share->kfile,F_UNLCK,0L,F_TO_EOF,MYF(MY_SEEK_NOT_DONE))); my_errno=error; break; } } VOID(_mi_test_if_changed(info)); share->r_locks++; share->tot_locks++; info->lock_type=lock_type; break; case F_WRLCK: if (info->lock_type == F_RDLCK) { /* Change READONLY to RW */ if (share->r_locks == 1) { flag=1; if (my_lock(share->kfile,lock_type,0L,F_TO_EOF, MYF(info->lock_wait | MY_SEEK_NOT_DONE))) { error=my_errno; break; } share->r_locks--; share->w_locks++; info->lock_type=lock_type; break; } } if (!(share->options & HA_OPTION_READ_ONLY_DATA)) { if (!share->w_locks) { flag=1; if (my_lock(share->kfile,lock_type,0L,F_TO_EOF, info->lock_wait | MY_SEEK_NOT_DONE)) { error=my_errno; break; } if (!share->r_locks) { if (mi_state_info_read_dsk(share->kfile, &share->state, 1)) { error=my_errno; VOID(my_lock(share->kfile,F_UNLCK,0L,F_TO_EOF, info->lock_wait | MY_SEEK_NOT_DONE)); my_errno=error; break; } } } } VOID(_mi_test_if_changed(info)); info->lock_type=lock_type; info->invalidator=info->s->invalidator; share->w_locks++; share->tot_locks++; break; default: break; /* Impossible */ } } pthread_mutex_unlock(&share->intern_lock); #if defined(FULL_LOG) || defined(_lint) lock_type|=(int) (flag << 8); /* Set bit to set if real lock */ myisam_log_command(MI_LOG_LOCK,info,(byte*) &lock_type,sizeof(lock_type), error); #endif DBUG_RETURN(error); } /* mi_lock_database */
MYRG_INFO *myrg_open(const char *name, int mode, int handle_locking) { int save_errno,i,errpos; uint files,dir_length,length,key_parts; ulonglong file_offset; char name_buff[FN_REFLEN*2],buff[FN_REFLEN],*end; MYRG_INFO info,*m_info; File fd; IO_CACHE file; MI_INFO *isam,*last_isam; DBUG_ENTER("myrg_open"); LINT_INIT(last_isam); LINT_INIT(m_info); isam=0; errpos=files=0; bzero((gptr) &info,sizeof(info)); bzero((char*) &file,sizeof(file)); if ((fd=my_open(fn_format(name_buff,name,"",MYRG_NAME_EXT,4), O_RDONLY | O_SHARE,MYF(0))) < 0 || init_io_cache(&file, fd, IO_SIZE, READ_CACHE, 0, 0, MYF(MY_WME | MY_NABP))) goto err; errpos=1; dir_length=dirname_part(name_buff,name); info.reclength=0; while ((length=my_b_gets(&file,buff,FN_REFLEN-1))) { if ((end=buff+length)[-1] == '\n') end[-1]='\0'; if (!buff[0]) continue; /* Skip empty lines */ if (buff[0] == '#') { if( !strncmp(buff+1,"INSERT_METHOD=",14)) { /* Lookup insert method */ int tmp=find_type(buff+15,&merge_insert_method,2); info.merge_insert_method = (uint) (tmp >= 0 ? tmp : 0); } continue; /* Skip comments */ } if (!test_if_hard_path(buff)) { VOID(strmake(name_buff+dir_length,buff, sizeof(name_buff)-1-dir_length)); VOID(cleanup_dirname(buff,name_buff)); } if (!(isam=mi_open(buff,mode,(handle_locking?HA_OPEN_WAIT_IF_LOCKED:0)))) goto err; files++; last_isam=isam; if (info.reclength && info.reclength != isam->s->base.reclength) { my_errno=HA_ERR_WRONG_MRG_TABLE_DEF; goto err; } info.reclength=isam->s->base.reclength; } key_parts=(isam ? isam->s->base.key_parts : 0); if (!(m_info= (MYRG_INFO*) my_malloc(sizeof(MYRG_INFO)+ files*sizeof(MYRG_TABLE)+ sizeof(long)*key_parts, MYF(MY_WME)))) goto err; *m_info=info; m_info->tables=files; if (files) { m_info->open_tables=(MYRG_TABLE *) (m_info+1); m_info->rec_per_key_part=(ulong *) (m_info->open_tables+files); bzero((char*) m_info->rec_per_key_part,sizeof(long)*key_parts); } else { m_info->open_tables=0; m_info->rec_per_key_part=0; } errpos=2; for (i=files ; i-- > 0 ; ) { uint j; m_info->open_tables[i].table=isam; m_info->options|=isam->s->options; m_info->records+=isam->state->records; m_info->del+=isam->state->del; m_info->data_file_length+=isam->state->data_file_length; for (j=0; j < key_parts; j++) m_info->rec_per_key_part[j]+=isam->s->state.rec_per_key_part[j] / files; if (i) isam=(MI_INFO*) (isam->open_list.next->data); } /* Don't mark table readonly, for ALTER TABLE ... UNION=(...) to work */ m_info->options&= ~(HA_OPTION_COMPRESS_RECORD | HA_OPTION_READ_ONLY_DATA); /* Fix fileinfo for easyer debugging (actually set by rrnd) */ file_offset=0; for (i=0 ; (uint) i < files ; i++) { m_info->open_tables[i].file_offset=(my_off_t) file_offset; file_offset+=m_info->open_tables[i].table->state->data_file_length; } if (sizeof(my_off_t) == 4 && file_offset > (ulonglong) (ulong) ~0L) { my_errno=HA_ERR_RECORD_FILE_FULL; goto err; } m_info->keys=(files) ? m_info->open_tables->table->s->base.keys : 0; bzero((char*) &m_info->by_key,sizeof(m_info->by_key)); /* this works ok if the table list is empty */ m_info->end_table=m_info->open_tables+files; m_info->last_used_table=m_info->open_tables; VOID(my_close(fd,MYF(0))); end_io_cache(&file); m_info->open_list.data=(void*) m_info; pthread_mutex_lock(&THR_LOCK_open); myrg_open_list=list_add(myrg_open_list,&m_info->open_list); pthread_mutex_unlock(&THR_LOCK_open); DBUG_RETURN(m_info); err: save_errno=my_errno; switch (errpos) { case 2: my_free((char*) m_info,MYF(0)); /* Fall through */ case 1: VOID(my_close(fd,MYF(0))); end_io_cache(&file); for (i=files ; i-- > 0 ; ) { isam=last_isam; if (i) last_isam=(MI_INFO*) (isam->open_list.next->data); mi_close(isam); } } my_errno=save_errno; DBUG_RETURN (NULL); }