my_bool thr_reschedule_write_lock(THR_LOCK_DATA *data) { THR_LOCK *lock=data->lock; DBUG_ENTER("thr_reschedule_write_lock"); pthread_mutex_lock(&lock->mutex); if (!lock->read_wait.data) /* No waiting read locks */ { pthread_mutex_unlock(&lock->mutex); DBUG_RETURN(0); } data->type=TL_WRITE_DELAYED; if (lock->update_status) (*lock->update_status)(data->status_param); if (((*data->prev)=data->next)) /* remove from lock-list */ data->next->prev= data->prev; else lock->write.last=data->prev; if ((data->next=lock->write_wait.data)) /* Put first in lock_list */ data->next->prev= &data->next; else lock->write_wait.last= &data->next; data->prev= &lock->write_wait.data; data->cond=get_cond(); /* This was zero */ lock->write_wait.data=data; free_all_read_locks(lock,0); pthread_mutex_unlock(&lock->mutex); DBUG_RETURN(thr_upgrade_write_delay_lock(data)); }
int until(int laddr1, int laddr2, int under_glob) { register int cnt, status; register char *lp_save; int condition; if(laddr1 <= 0) return(ERROR5); if(*lp >= '0' && *lp <= '9') cnt = getint(); else cnt = -1; if((condition = get_cond()) < 0) return(ERROR5); if(condition != NOT_TRUE_OR_FALSE) cc_reg = !condition; curln = laddr1; lp_save = lp; for(curln = laddr1 ; curln <= laddr2 ; ++curln) { status = OK; while((cnt >= 0 ? --cnt : 1) >= 0 && status == OK && cc_reg != condition) { lp = lp_save; status = exec_line(under_glob, 1); } } --curln; lp = "\n"; /* Force main to fetch a new line */ return(OK); }
static my_bool wait_for_lock(struct st_lock_list *wait, THR_LOCK_DATA *data, my_bool in_wait_list) { pthread_cond_t *cond=get_cond(); struct st_my_thread_var *thread_var=my_thread_var; int result; if (!in_wait_list) { (*wait->last)=data; /* Wait for lock */ data->prev= wait->last; wait->last= &data->next; } /* Set up control struct to allow others to abort locks */ thread_var->current_mutex= &data->lock->mutex; thread_var->current_cond= cond; data->cond=cond; while (!thread_var->abort || in_wait_list) { pthread_cond_wait(cond,&data->lock->mutex); if (data->cond != cond) break; } if (data->cond || data->type == TL_UNLOCK) { if (data->cond) /* aborted */ { if (((*data->prev)=data->next)) /* remove from wait-list */ data->next->prev= data->prev; else wait->last=data->prev; } data->type=TL_UNLOCK; /* No lock */ result=1; /* Didn't get lock */ check_locks(data->lock,"failed wait_for_lock",0); } else { result=0; statistic_increment(locks_waited, &THR_LOCK_lock); if (data->lock->get_status) (*data->lock->get_status)(data->status_param); check_locks(data->lock,"got wait_for_lock",0); } pthread_mutex_unlock(&data->lock->mutex); /* The following must be done after unlock of lock->mutex */ pthread_mutex_lock(&thread_var->mutex); thread_var->current_mutex= 0; thread_var->current_cond= 0; pthread_mutex_unlock(&thread_var->mutex); return result; }
int branch() { register int n, c; int condition; n = getint(); if((condition = get_cond()) < 0 ) return(ERROR5); if(condition == cc_reg || condition == NOT_TRUE_OR_FALSE) if(n == 0) lp = lbuff; else { while(n > 1) if((c = x_fp ? getc(x_fp) : mgetchar()) == '\n' || c == '\r' || c == EOF) --n; lp ="\n"; /* Force main to fetch a new line */ } return(OK); }
enum enum_thr_lock_result thr_lock(THR_LOCK_DATA *data, THR_LOCK_OWNER *owner, enum thr_lock_type lock_type) { THR_LOCK *lock=data->lock; enum enum_thr_lock_result result= THR_LOCK_SUCCESS; struct st_lock_list *wait_queue; THR_LOCK_DATA *lock_owner; DBUG_ENTER("thr_lock"); data->next=0; data->cond=0; /* safety */ data->type=lock_type; data->owner= owner; /* Must be reset ! */ VOID(pthread_mutex_lock(&lock->mutex)); DBUG_PRINT("lock",("data: 0x%lx thread: %ld lock: 0x%lx type: %d", (long) data, data->owner->info->thread_id, (long) lock, (int) lock_type)); check_locks(lock,(uint) lock_type <= (uint) TL_READ_NO_INSERT ? "enter read_lock" : "enter write_lock",0); if ((int) lock_type <= (int) TL_READ_NO_INSERT) { /* Request for READ lock */ if (lock->write.data) { /* We can allow a read lock even if there is already a write lock on the table in one the following cases: - This thread alread have a write lock on the table - The write lock is TL_WRITE_ALLOW_READ or TL_WRITE_DELAYED and the read lock is TL_READ_HIGH_PRIORITY or TL_READ - The write lock is TL_WRITE_CONCURRENT_INSERT or TL_WRITE_ALLOW_WRITE and the read lock is not TL_READ_NO_INSERT */ DBUG_PRINT("lock",("write locked by thread: %ld", lock->write.data->owner->info->thread_id)); if (thr_lock_owner_equal(data->owner, lock->write.data->owner) || (lock->write.data->type <= TL_WRITE_DELAYED && (((int) lock_type <= (int) TL_READ_HIGH_PRIORITY) || (lock->write.data->type != TL_WRITE_CONCURRENT_INSERT && lock->write.data->type != TL_WRITE_ALLOW_READ)))) { /* Already got a write lock */ (*lock->read.last)=data; /* Add to running FIFO */ data->prev=lock->read.last; lock->read.last= &data->next; if (lock_type == TL_READ_NO_INSERT) lock->read_no_write_count++; check_locks(lock,"read lock with old write lock",0); if (lock->get_status) (*lock->get_status)(data->status_param, 0); statistic_increment(locks_immediate,&THR_LOCK_lock); goto end; } if (lock->write.data->type == TL_WRITE_ONLY) { /* We are not allowed to get a READ lock in this case */ data->type=TL_UNLOCK; result= THR_LOCK_ABORTED; /* Can't wait for this one */ goto end; } } else if (!lock->write_wait.data || lock->write_wait.data->type <= TL_WRITE_LOW_PRIORITY || lock_type == TL_READ_HIGH_PRIORITY || have_old_read_lock(lock->read.data, data->owner)) { /* No important write-locks */ (*lock->read.last)=data; /* Add to running FIFO */ data->prev=lock->read.last; lock->read.last= &data->next; if (lock->get_status) (*lock->get_status)(data->status_param, 0); if (lock_type == TL_READ_NO_INSERT) lock->read_no_write_count++; check_locks(lock,"read lock with no write locks",0); statistic_increment(locks_immediate,&THR_LOCK_lock); goto end; } /* We're here if there is an active write lock or no write lock but a high priority write waiting in the write_wait queue. In the latter case we should yield the lock to the writer. */ wait_queue= &lock->read_wait; } else /* Request for WRITE lock */ { if (lock_type == TL_WRITE_DELAYED) { if (lock->write.data && lock->write.data->type == TL_WRITE_ONLY) { data->type=TL_UNLOCK; result= THR_LOCK_ABORTED; /* Can't wait for this one */ goto end; } /* if there is a TL_WRITE_ALLOW_READ lock, we have to wait for a lock (TL_WRITE_ALLOW_READ is used for ALTER TABLE in MySQL) */ if ((!lock->write.data || lock->write.data->type != TL_WRITE_ALLOW_READ) && !have_specific_lock(lock->write_wait.data,TL_WRITE_ALLOW_READ) && (lock->write.data || lock->read.data)) { /* Add delayed write lock to write_wait queue, and return at once */ (*lock->write_wait.last)=data; data->prev=lock->write_wait.last; lock->write_wait.last= &data->next; data->cond=get_cond(); /* We don't have to do get_status here as we will do it when we change the delayed lock to a real write lock */ statistic_increment(locks_immediate,&THR_LOCK_lock); goto end; } } else if (lock_type == TL_WRITE_CONCURRENT_INSERT && ! lock->check_status) data->type=lock_type= thr_upgraded_concurrent_insert_lock; if (lock->write.data) /* If there is a write lock */ { if (lock->write.data->type == TL_WRITE_ONLY) { /* We are not allowed to get a lock in this case */ data->type=TL_UNLOCK; result= THR_LOCK_ABORTED; /* Can't wait for this one */ goto end; } /* The following test will not work if the old lock was a TL_WRITE_ALLOW_WRITE, TL_WRITE_ALLOW_READ or TL_WRITE_DELAYED in the same thread, but this will never happen within MySQL. */ if (thr_lock_owner_equal(data->owner, lock->write.data->owner) || (lock_type == TL_WRITE_ALLOW_WRITE && !lock->write_wait.data && lock->write.data->type == TL_WRITE_ALLOW_WRITE)) { /* We have already got a write lock or all locks are TL_WRITE_ALLOW_WRITE */ DBUG_PRINT("info", ("write_wait.data: 0x%lx old_type: %d", (ulong) lock->write_wait.data, lock->write.data->type)); (*lock->write.last)=data; /* Add to running fifo */ data->prev=lock->write.last; lock->write.last= &data->next; check_locks(lock,"second write lock",0); if (data->lock->get_status) (*data->lock->get_status)(data->status_param, 0); statistic_increment(locks_immediate,&THR_LOCK_lock); goto end; } DBUG_PRINT("lock",("write locked by thread: %ld", lock->write.data->owner->info->thread_id)); } else { DBUG_PRINT("info", ("write_wait.data: 0x%lx", (ulong) lock->write_wait.data)); if (!lock->write_wait.data) { /* no scheduled write locks */ my_bool concurrent_insert= 0; if (lock_type == TL_WRITE_CONCURRENT_INSERT) { concurrent_insert= 1; if ((*lock->check_status)(data->status_param)) { concurrent_insert= 0; data->type=lock_type= thr_upgraded_concurrent_insert_lock; } } if (!lock->read.data || (lock_type <= TL_WRITE_DELAYED && ((lock_type != TL_WRITE_CONCURRENT_INSERT && lock_type != TL_WRITE_ALLOW_WRITE) || !lock->read_no_write_count))) { (*lock->write.last)=data; /* Add as current write lock */ data->prev=lock->write.last; lock->write.last= &data->next; if (data->lock->get_status) (*data->lock->get_status)(data->status_param, concurrent_insert); check_locks(lock,"only write lock",0); statistic_increment(locks_immediate,&THR_LOCK_lock); goto end; } } DBUG_PRINT("lock",("write locked by thread: %ld type: %d", lock->read.data->owner->info->thread_id, data->type)); } wait_queue= &lock->write_wait; } /* Try to detect a trivial deadlock when using cursors: attempt to lock a table that is already locked by an open cursor within the same connection. lock_owner can be zero if we succumbed to a high priority writer in the write_wait queue. */ lock_owner= lock->read.data ? lock->read.data : lock->write.data; if (lock_owner && lock_owner->owner->info == owner->info) { result= THR_LOCK_DEADLOCK; goto end; } /* Can't get lock yet; Wait for it */ DBUG_RETURN(wait_for_lock(wait_queue, data, 0)); end: pthread_mutex_unlock(&lock->mutex); DBUG_RETURN(result); }
/*! * \brief Get a raster category label * * This routine looks up category <i>rast</i> in the <i>pcats</i> * structure and returns a pointer to a string which is the label for * the category. A legal pointer is always returned. If the category * does not exist in <i>pcats</i>, then a pointer to the empty string * "" is returned. * * <b>Warning:</b> The pointer that is returned points to a hidden * static buffer. Successive calls to Rast_get_c_cat() overwrite this * buffer. * * \param rast cell value * \param pcats pointer to Categories structure * \param data_type map type (CELL, FCELL, DCELL) * * \return pointer to category label * \return "" if category is not found */ char *Rast_get_cat(void *rast, struct Categories *pcats, RASTER_MAP_TYPE data_type) { static char label[1024]; char *f, *l, *v; CELL i; DCELL val; float a[2]; char fmt[30], value_str[30]; if (Rast_is_null_value(rast, data_type)) { sprintf(label, "no data"); return label; } /* first search the list of labels */ *label = 0; val = Rast_get_d_value(rast, data_type); i = Rast_quant_get_cell_value(&pcats->q, val); G_debug(5, "Rast_get_cat(): val %lf found i %d", val, i); if (!Rast_is_c_null_value(&i) && i < pcats->ncats) { if (pcats->labels[i] != NULL) return pcats->labels[i]; return label; } /* generate the label */ if ((f = pcats->fmt) == NULL) return label; a[0] = (float)val *pcats->m1 + pcats->a1; a[1] = (float)val *pcats->m2 + pcats->a2; l = label; while (*f) { if (*f == '$') { f++; if (*f == '$') *l++ = *f++; else if (*f == '?') { f++; get_cond(&f, v = value_str, val); while (*v) *l++ = *v++; } else if (get_fmt(&f, fmt, &i)) { sprintf(v = value_str, fmt, a[i]); while (*v) *l++ = *v++; } else *l++ = '$'; } else { *l++ = *f++; } } *l = 0; return label; }
/* * parse one line from magic file, put into magic[index++] if valid */ static int parse(RMagic *ms, struct r_magic_entry **mentryp, ut32 *nmentryp, const char *line, size_t lineno, int action) { static ut32 last_cont_level = 0; size_t i; struct r_magic_entry *me; struct r_magic *m; const char *l = line; char *t; int op; ut32 cont_level = 0; for (; *l == '>'; l++, cont_level++); if (cont_level == 0 || cont_level > last_cont_level) if (file_check_mem (ms, cont_level) == -1) return -1; last_cont_level = cont_level; #define ALLOC_CHUNK (size_t)10 #define ALLOC_INCR (size_t)200 if (cont_level != 0) { if (*nmentryp == 0) { file_error(ms, 0, "No current entry for continuation"); return -1; } me = &(*mentryp)[*nmentryp - 1]; if (me->cont_count == me->max_count) { struct r_magic *nm; size_t cnt = me->max_count + ALLOC_CHUNK; if (!(nm = realloc(me->mp, sizeof (*nm) * cnt))) { file_oomem(ms, sizeof (*nm) * cnt); return -1; } me->mp = nm; me->max_count = cnt; } m = &me->mp[me->cont_count++]; (void)memset(m, 0, sizeof (*m)); m->cont_level = cont_level; } else { if (*nmentryp == maxmagic) { struct r_magic_entry *mp; maxmagic += ALLOC_INCR; if (!(mp = realloc (*mentryp, sizeof (*mp) * maxmagic))) { file_oomem (ms, sizeof (*mp) * maxmagic); return -1; } (void)memset(&mp[*nmentryp], 0, sizeof (*mp) * ALLOC_INCR); *mentryp = mp; } me = &(*mentryp)[*nmentryp]; if (!me->mp) { if (!(m = malloc (sizeof (*m) * ALLOC_CHUNK))) { file_oomem (ms, sizeof (*m) * ALLOC_CHUNK); return -1; } me->mp = m; me->max_count = ALLOC_CHUNK; } else m = me->mp; (void)memset(m, 0, sizeof (*m)); m->cont_level = 0; me->cont_count = 1; } m->lineno = lineno; if (*l == '&') { /* m->cont_level == 0 checked below. */ ++l; /* step over */ m->flag |= OFFADD; } if (*l == '(') { ++l; /* step over */ m->flag |= INDIR; if (m->flag & OFFADD) m->flag = (m->flag & ~OFFADD) | INDIROFFADD; if (*l == '&') { /* m->cont_level == 0 checked below */ ++l; /* step over */ m->flag |= OFFADD; } } /* Indirect offsets are not valid at level 0. */ if (m->cont_level == 0 && (m->flag & (OFFADD | INDIROFFADD))) if (ms->flags & R_MAGIC_CHECK) file_magwarn(ms, "relative offset at level 0"); /* get offset, then skip over it */ m->offset = (ut32)strtoul(l, &t, 0); if ((l == t) && (ms->flags & R_MAGIC_CHECK)) file_magwarn(ms, "offset `%s' invalid", l); l = t; if (m->flag & INDIR) { m->in_type = FILE_LONG; m->in_offset = 0; /* * read [.lbs][+-]nnnnn) */ if (*l == '.') { l++; switch (*l) { case 'l': m->in_type = FILE_LELONG; break; case 'L': m->in_type = FILE_BELONG; break; case 'm': m->in_type = FILE_MELONG; break; case 'h': case 's': m->in_type = FILE_LESHORT; break; case 'H': case 'S': m->in_type = FILE_BESHORT; break; case 'c': case 'b': case 'C': case 'B': m->in_type = FILE_BYTE; break; case 'e': case 'f': case 'g': m->in_type = FILE_LEDOUBLE; break; case 'E': case 'F': case 'G': m->in_type = FILE_BEDOUBLE; break; default: if (ms->flags & R_MAGIC_CHECK) file_magwarn(ms, "indirect offset type `%c' invalid", *l); break; } l++; } m->in_op = 0; if (*l == '~') { m->in_op |= FILE_OPINVERSE; l++; } if ((op = get_op(*l)) != -1) { m->in_op |= op; l++; } if (*l == '(') { m->in_op |= FILE_OPINDIRECT; l++; } if (isdigit((ut8)*l) || *l == '-') { m->in_offset = (int32_t)strtol(l, &t, 0); if (l == t) if (ms->flags & R_MAGIC_CHECK) file_magwarn(ms, "in_offset `%s' invalid", l); l = t; } if (*l++ != ')' || ((m->in_op & FILE_OPINDIRECT) && *l++ != ')')) if (ms->flags & R_MAGIC_CHECK) file_magwarn(ms, "missing ')' in indirect offset"); } EATAB; m->cond = get_cond(l, &l); if (check_cond(ms, m->cond, cont_level) == -1) return -1; EATAB; if (*l == 'u') { ++l; m->flag |= UNSIGNED; } m->type = get_type(l, &l); if (m->type == FILE_INVALID) { if (ms->flags & R_MAGIC_CHECK) file_magwarn(ms, "type `%s' invalid", l); return -1; } /* New-style anding: "0 byte&0x80 =0x80 dynamically linked" */ /* New and improved: ~ & | ^ + - * / % -- exciting, isn't it? */ m->mask_op = 0; if (*l == '~') { if (!MAGIC_IS_STRING (m->type)) m->mask_op |= FILE_OPINVERSE; else if (ms->flags & R_MAGIC_CHECK) file_magwarn (ms, "'~' invalid for string types"); ++l; } m->str_range = 0; m->str_flags = 0; m->num_mask = 0; if ((op = get_op (*l)) != -1) { if (!MAGIC_IS_STRING (m->type)) { ut64 val; ++l; m->mask_op |= op; val = (ut64)strtoull (l, &t, 0); l = t; m->num_mask = file_signextend (ms, m, val); eatsize (&l); } else if (op == FILE_OPDIVIDE) { int have_range = 0; while (!isspace ((ut8)*++l)) { switch (*l) { case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': if (have_range && (ms->flags & R_MAGIC_CHECK)) file_magwarn(ms, "multiple ranges"); have_range = 1; m->str_range = strtoul(l, &t, 0); if (m->str_range == 0) file_magwarn(ms, "zero range"); l = t - 1; break; case CHAR_COMPACT_BLANK: m->str_flags |= STRING_COMPACT_BLANK; break; case CHAR_COMPACT_OPTIONAL_BLANK: m->str_flags |= STRING_COMPACT_OPTIONAL_BLANK; break; case CHAR_IGNORE_LOWERCASE: m->str_flags |= STRING_IGNORE_LOWERCASE; break; case CHAR_IGNORE_UPPERCASE: m->str_flags |= STRING_IGNORE_UPPERCASE; break; case CHAR_REGEX_OFFSET_START: m->str_flags |= REGEX_OFFSET_START; break; default: if (ms->flags & R_MAGIC_CHECK) file_magwarn(ms, "string extension `%c' invalid", *l); return -1; } /* allow multiple '/' for readability */ if (l[1] == '/' && !isspace ((ut8)l[2])) l++; } if (string_modifier_check(ms, m) == -1) return -1; } else { if (ms->flags & R_MAGIC_CHECK) file_magwarn(ms, "invalid string op: %c", *t); return -1; } } /* * We used to set mask to all 1's here, instead let's just not do * anything if mask = 0 (unless you have a better idea) */ EATAB; switch (*l) { case '>': case '<': /* Old-style anding: "0 byte &0x80 dynamically linked" */ case '&': case '^': case '=': m->reln = *l; ++l; if (*l == '=') { /* HP compat: ignore &= etc. */ ++l; } break; case '!': m->reln = *l; ++l; break; default: m->reln = '='; /* the default relation */ if (*l == 'x' && ((isascii((ut8)l[1]) && isspace ((ut8)l[1])) || !l[1])) { m->reln = *l; ++l; } break; } /* * Grab the value part, except for an 'x' reln. */ if (m->reln != 'x' && getvalue (ms, m, &l, action)) return -1; /* * TODO finish this macro and start using it! * #define offsetcheck {if (offset > HOWMANY-1) * magwarn("offset too big"); } */ /* * Now get last part - the description */ EATAB; if (l[0] == '\b') { ++l; m->flag |= NOSPACE; } else if ((l[0] == '\\') && (l[1] == 'b')) { ++l; ++l; m->flag |= NOSPACE; } for (i = 0; (m->desc[i++] = *l++) != '\0' && i < sizeof (m->desc); ) continue; if (i == sizeof (m->desc)) { m->desc[sizeof (m->desc) - 1] = '\0'; if (ms->flags & R_MAGIC_CHECK) file_magwarn(ms, "description `%s' truncated", m->desc); } /* * We only do this check while compiling, or if any of the magic * files were not compiled. */ if (ms->flags & R_MAGIC_CHECK) if (check_format (ms, m) == -1) return -1; if (action == FILE_CHECK) file_mdump (m); m->mimetype[0] = '\0'; /* initialise MIME type to none */ if (m->cont_level == 0) ++(*nmentryp); /* make room for next */ return 0; }
enum enum_thr_lock_result thr_lock(THR_LOCK_DATA *data, THR_LOCK_INFO *owner, enum thr_lock_type lock_type, ulong lock_wait_timeout) { THR_LOCK *lock=data->lock; enum enum_thr_lock_result result= THR_LOCK_SUCCESS; struct st_lock_list *wait_queue; DBUG_ENTER("thr_lock"); data->next=0; data->cond=0; /* safety */ data->type=lock_type; data->owner= owner; /* Must be reset ! */ mysql_mutex_lock(&lock->mutex); DBUG_PRINT("lock",("data: 0x%lx thread: 0x%lx lock: 0x%lx type: %d", (long) data, data->owner->thread_id, (long) lock, (int) lock_type)); check_locks(lock,(uint) lock_type <= (uint) TL_READ_NO_INSERT ? "enter read_lock" : "enter write_lock",0); if ((int) lock_type <= (int) TL_READ_NO_INSERT) { /* Request for READ lock */ if (lock->write.data) { /* We can allow a read lock even if there is already a write lock on the table if they are owned by the same thread or if they satisfy the following lock compatibility matrix: Request /------- H|++++ WRITE_ALLOW_WRITE e|+++- WRITE_CONCURRENT_INSERT l|++++ WRITE_DELAYED d |||| |||\= READ_NO_INSERT ||\ = READ_HIGH_PRIORITY |\ = READ_WITH_SHARED_LOCKS \ = READ + = Request can be satisified. - = Request cannot be satisified. READ_NO_INSERT and WRITE_ALLOW_WRITE should in principle be incompatible. However this will cause starvation of LOCK TABLE READ in InnoDB under high write load. See Bug#42147 for more information. */ DBUG_PRINT("lock",("write locked 1 by thread: 0x%lx", lock->write.data->owner->thread_id)); if (thr_lock_owner_equal(data->owner, lock->write.data->owner) || (lock->write.data->type <= TL_WRITE_DELAYED && (((int) lock_type <= (int) TL_READ_HIGH_PRIORITY) || (lock->write.data->type != TL_WRITE_CONCURRENT_INSERT)))) { /* Already got a write lock */ (*lock->read.last)=data; /* Add to running FIFO */ data->prev=lock->read.last; lock->read.last= &data->next; if (lock_type == TL_READ_NO_INSERT) lock->read_no_write_count++; check_locks(lock,"read lock with old write lock",0); if (lock->get_status) (*lock->get_status)(data->status_param, 0); statistic_increment(locks_immediate,&THR_LOCK_lock); goto end; } if (lock->write.data->type == TL_WRITE_ONLY) { /* We are not allowed to get a READ lock in this case */ data->type=TL_UNLOCK; result= THR_LOCK_ABORTED; /* Can't wait for this one */ goto end; } } else if (!lock->write_wait.data || lock->write_wait.data->type <= TL_WRITE_LOW_PRIORITY || lock_type == TL_READ_HIGH_PRIORITY || has_old_lock(lock->read.data, data->owner)) /* Has old read lock */ { /* No important write-locks */ (*lock->read.last)=data; /* Add to running FIFO */ data->prev=lock->read.last; lock->read.last= &data->next; if (lock->get_status) (*lock->get_status)(data->status_param, 0); if (lock_type == TL_READ_NO_INSERT) lock->read_no_write_count++; check_locks(lock,"read lock with no write locks",0); statistic_increment(locks_immediate,&THR_LOCK_lock); goto end; } /* We're here if there is an active write lock or no write lock but a high priority write waiting in the write_wait queue. In the latter case we should yield the lock to the writer. */ wait_queue= &lock->read_wait; } else /* Request for WRITE lock */ { if (lock_type == TL_WRITE_DELAYED) { if (lock->write.data && lock->write.data->type == TL_WRITE_ONLY) { data->type=TL_UNLOCK; result= THR_LOCK_ABORTED; /* Can't wait for this one */ goto end; } if (lock->write.data || lock->read.data) { /* Add delayed write lock to write_wait queue, and return at once */ (*lock->write_wait.last)=data; data->prev=lock->write_wait.last; lock->write_wait.last= &data->next; data->cond=get_cond(); /* We don't have to do get_status here as we will do it when we change the delayed lock to a real write lock */ statistic_increment(locks_immediate,&THR_LOCK_lock); goto end; } } else if (lock_type == TL_WRITE_CONCURRENT_INSERT && ! lock->check_status) data->type=lock_type= thr_upgraded_concurrent_insert_lock; if (lock->write.data) /* If there is a write lock */ { if (lock->write.data->type == TL_WRITE_ONLY) { /* purecov: begin tested */ /* Allow lock owner to bypass TL_WRITE_ONLY. */ if (!thr_lock_owner_equal(data->owner, lock->write.data->owner)) { /* We are not allowed to get a lock in this case */ data->type=TL_UNLOCK; result= THR_LOCK_ABORTED; /* Can't wait for this one */ goto end; } /* purecov: end */ } /* The idea is to allow us to get a lock at once if we already have a write lock or if there is no pending write locks and if all write locks are of TL_WRITE_ALLOW_WRITE type. Note that, since lock requests for the same table are sorted in such way that requests with higher thr_lock_type value come first (with one exception (*)), lock being requested usually (**) has equal or "weaker" type than one which thread might have already acquired. *) The only exception to this rule is case when type of old lock is TL_WRITE_LOW_PRIORITY and type of new lock is changed inside of thr_lock() from TL_WRITE_CONCURRENT_INSERT to TL_WRITE since engine turns out to be not supporting concurrent inserts. Note that since TL_WRITE has the same compatibility rules as TL_WRITE_LOW_PRIORITY (their only difference is priority), it is OK to grant new lock without additional checks in such situation. **) The exceptions are situations when: - when old lock type is TL_WRITE_DELAYED But these should never happen within MySQL. Therefore it is OK to allow acquiring write lock on the table if this thread already holds some write lock on it. (INSERT INTO t1 VALUES (f1()), where f1() is stored function which tries to update t1, is an example of statement which requests two different types of write lock on the same table). */ DBUG_ASSERT(! has_old_lock(lock->write.data, data->owner) || ((lock_type <= lock->write.data->type || (lock_type == TL_WRITE && lock->write.data->type == TL_WRITE_LOW_PRIORITY)) && lock->write.data->type != TL_WRITE_DELAYED)); if ((lock_type == TL_WRITE_ALLOW_WRITE && ! lock->write_wait.data && lock->write.data->type == TL_WRITE_ALLOW_WRITE) || has_old_lock(lock->write.data, data->owner)) { /* We have already got a write lock or all locks are TL_WRITE_ALLOW_WRITE */ DBUG_PRINT("info", ("write_wait.data: 0x%lx old_type: %d", (ulong) lock->write_wait.data, lock->write.data->type)); (*lock->write.last)=data; /* Add to running fifo */ data->prev=lock->write.last; lock->write.last= &data->next; check_locks(lock,"second write lock",0); if (data->lock->get_status) (*data->lock->get_status)(data->status_param, 0); statistic_increment(locks_immediate,&THR_LOCK_lock); goto end; } DBUG_PRINT("lock",("write locked 2 by thread: 0x%lx", lock->write.data->owner->thread_id)); } else { DBUG_PRINT("info", ("write_wait.data: 0x%lx", (ulong) lock->write_wait.data)); if (!lock->write_wait.data) { /* no scheduled write locks */ my_bool concurrent_insert= 0; if (lock_type == TL_WRITE_CONCURRENT_INSERT) { concurrent_insert= 1; if ((*lock->check_status)(data->status_param)) { concurrent_insert= 0; data->type=lock_type= thr_upgraded_concurrent_insert_lock; } } if (!lock->read.data || (lock_type <= TL_WRITE_DELAYED && ((lock_type != TL_WRITE_CONCURRENT_INSERT && lock_type != TL_WRITE_ALLOW_WRITE) || !lock->read_no_write_count))) { (*lock->write.last)=data; /* Add as current write lock */ data->prev=lock->write.last; lock->write.last= &data->next; if (data->lock->get_status) (*data->lock->get_status)(data->status_param, concurrent_insert); check_locks(lock,"only write lock",0); statistic_increment(locks_immediate,&THR_LOCK_lock); goto end; } } DBUG_PRINT("lock",("write locked 3 by thread: 0x%lx type: %d", lock->read.data->owner->thread_id, data->type)); } wait_queue= &lock->write_wait; } /* Can't get lock yet; Wait for it */ DBUG_RETURN(wait_for_lock(wait_queue, data, 0, lock_wait_timeout)); end: mysql_mutex_unlock(&lock->mutex); DBUG_RETURN(result); }
int thr_lock(THR_LOCK_DATA *data,enum thr_lock_type lock_type) { THR_LOCK *lock=data->lock; int result=0; DBUG_ENTER("thr_lock"); data->next=0; data->cond=0; /* safety */ data->type=lock_type; data->thread=pthread_self(); /* Must be reset ! */ data->thread_id=my_thread_id(); /* Must be reset ! */ VOID(pthread_mutex_lock(&lock->mutex)); DBUG_PRINT("lock",("data: %lx thread: %ld lock: %lx type: %d", data,data->thread_id,lock,(int) lock_type)); check_locks(lock,(uint) lock_type <= (uint) TL_READ_NO_INSERT ? "enter read_lock" : "enter write_lock",0); if ((int) lock_type <= (int) TL_READ_NO_INSERT) { /* Request for READ lock */ if (lock->write.data) { /* We can allow a read lock even if there is already a write lock on the table in one the following cases: - This thread alread have a write lock on the table - The write lock is TL_WRITE_ALLOW_READ or TL_WRITE_DELAYED and the read lock is TL_READ_HIGH_PRIORITY or TL_READ - The write lock is TL_WRITE_CONCURRENT_INSERT or TL_WRITE_ALLOW_WRITE and the read lock is not TL_READ_NO_INSERT */ DBUG_PRINT("lock",("write locked by thread: %ld", lock->write.data->thread_id)); if (pthread_equal(data->thread,lock->write.data->thread) || (lock->write.data->type <= TL_WRITE_DELAYED && (((int) lock_type <= (int) TL_READ_HIGH_PRIORITY) || (lock->write.data->type != TL_WRITE_CONCURRENT_INSERT && lock->write.data->type != TL_WRITE_ALLOW_READ)))) { /* Already got a write lock */ (*lock->read.last)=data; /* Add to running FIFO */ data->prev=lock->read.last; lock->read.last= &data->next; if ((int) lock_type == (int) TL_READ_NO_INSERT) lock->read_no_write_count++; check_locks(lock,"read lock with old write lock",0); if (lock->get_status) (*lock->get_status)(data->status_param); statistic_increment(locks_immediate,&THR_LOCK_lock); goto end; } if (lock->write.data->type == TL_WRITE_ONLY) { /* We are not allowed to get a READ lock in this case */ data->type=TL_UNLOCK; result=1; /* Can't wait for this one */ goto end; } } else if (!lock->write_wait.data || lock->write_wait.data->type <= TL_WRITE_LOW_PRIORITY || lock_type == TL_READ_HIGH_PRIORITY || have_old_read_lock(lock->read.data,data->thread)) { /* No important write-locks */ (*lock->read.last)=data; /* Add to running FIFO */ data->prev=lock->read.last; lock->read.last= &data->next; if (lock->get_status) (*lock->get_status)(data->status_param); if ((int) lock_type == (int) TL_READ_NO_INSERT) lock->read_no_write_count++; check_locks(lock,"read lock with no write locks",0); statistic_increment(locks_immediate,&THR_LOCK_lock); goto end; } /* Can't get lock yet; Wait for it */ DBUG_RETURN(wait_for_lock(&lock->read_wait,data,0)); } else /* Request for WRITE lock */ { if (lock_type == TL_WRITE_DELAYED) { if (lock->write.data && lock->write.data->type == TL_WRITE_ONLY) { data->type=TL_UNLOCK; result=1; /* Can't wait for this one */ goto end; } /* if there is a TL_WRITE_ALLOW_READ lock, we have to wait for a lock (TL_WRITE_ALLOW_READ is used for ALTER TABLE in MySQL) */ if ((!lock->write.data || lock->write.data->type != TL_WRITE_ALLOW_READ) && !have_specific_lock(lock->write_wait.data,TL_WRITE_ALLOW_READ) && (lock->write.data || lock->read.data)) { /* Add delayed write lock to write_wait queue, and return at once */ (*lock->write_wait.last)=data; data->prev=lock->write_wait.last; lock->write_wait.last= &data->next; data->cond=get_cond(); if (lock->get_status) (*lock->get_status)(data->status_param); statistic_increment(locks_immediate,&THR_LOCK_lock); goto end; } } else if (lock_type == TL_WRITE_CONCURRENT_INSERT && ! lock->check_status) data->type=lock_type= thr_upgraded_concurrent_insert_lock; if (lock->write.data) /* If there is a write lock */ { if (lock->write.data->type == TL_WRITE_ONLY) { /* We are not allowed to get a lock in this case */ data->type=TL_UNLOCK; result=1; /* Can't wait for this one */ goto end; } /* The following test will not work if the old lock was a TL_WRITE_ALLOW_WRITE, TL_WRITE_ALLOW_READ or TL_WRITE_DELAYED in the same thread, but this will never happen within MySQL. */ if (pthread_equal(data->thread,lock->write.data->thread) || (lock_type == TL_WRITE_ALLOW_WRITE && !lock->write_wait.data && lock->write.data->type == TL_WRITE_ALLOW_WRITE)) { /* We have already got a write lock or all locks are TL_WRITE_ALLOW_WRITE */ (*lock->write.last)=data; /* Add to running fifo */ data->prev=lock->write.last; lock->write.last= &data->next; check_locks(lock,"second write lock",0); if (data->lock->get_status) (*data->lock->get_status)(data->status_param); statistic_increment(locks_immediate,&THR_LOCK_lock); goto end; } DBUG_PRINT("lock",("write locked by thread: %ld", lock->write.data->thread_id)); } else { if (!lock->write_wait.data) { /* no scheduled write locks */ if (lock_type == TL_WRITE_CONCURRENT_INSERT && (*lock->check_status)(data->status_param)) data->type=lock_type= thr_upgraded_concurrent_insert_lock; if (!lock->read.data || (lock_type <= TL_WRITE_DELAYED && ((lock_type != TL_WRITE_CONCURRENT_INSERT && lock_type != TL_WRITE_ALLOW_WRITE) || !lock->read_no_write_count))) { (*lock->write.last)=data; /* Add as current write lock */ data->prev=lock->write.last; lock->write.last= &data->next; if (data->lock->get_status) (*data->lock->get_status)(data->status_param); check_locks(lock,"only write lock",0); statistic_increment(locks_immediate,&THR_LOCK_lock); goto end; } } DBUG_PRINT("lock",("write locked by thread: %ld, type: %ld", lock->read.data->thread_id,data->type)); } DBUG_RETURN(wait_for_lock(&lock->write_wait,data,0)); } end: pthread_mutex_unlock(&lock->mutex); DBUG_RETURN(result); }