static struct value * value_of_builtin_frame_fp_reg (struct frame_info *frame, const void *baton) { struct gdbarch *gdbarch = get_frame_arch (frame); if (gdbarch_deprecated_fp_regnum (gdbarch) >= 0) /* NOTE: cagney/2003-04-24: Since the mere presence of "fp" in the register name table overrides this built-in $fp register, there is no real reason for this gdbarch_deprecated_fp_regnum trickery here. An architecture wanting to implement "$fp" as alias for a raw register can do so by adding "fp" to register name table (mind you, doing this is probably a dangerous thing). */ return value_of_register (gdbarch_deprecated_fp_regnum (gdbarch), frame); else { struct type *data_ptr_type = builtin_type (gdbarch)->builtin_data_ptr; struct value *val = allocate_value (data_ptr_type); gdb_byte *buf = value_contents_raw (val); if (frame == NULL) memset (buf, 0, TYPE_LENGTH (value_type (val))); else gdbarch_address_to_pointer (gdbarch, data_ptr_type, buf, get_frame_base_address (frame)); return val; } }
static struct value * get_call_return_value (struct call_return_meta_info *ri) { struct value *retval = NULL; int stack_temporaries = thread_stack_temporaries_enabled_p (inferior_ptid); if (TYPE_CODE (ri->value_type) == TYPE_CODE_VOID) retval = allocate_value (ri->value_type); else if (ri->struct_return_p) { if (stack_temporaries) { retval = value_from_contents_and_address (ri->value_type, NULL, ri->struct_addr); push_thread_stack_temporary (inferior_ptid, retval); } else { retval = allocate_value (ri->value_type); read_value_memory (retval, 0, 1, ri->struct_addr, value_contents_raw (retval), TYPE_LENGTH (ri->value_type)); } } else { retval = allocate_value (ri->value_type); gdbarch_return_value (ri->gdbarch, ri->function, ri->value_type, get_current_regcache (), value_contents_raw (retval), NULL); if (stack_temporaries && class_or_union_p (ri->value_type)) { /* Values of class type returned in registers are copied onto the stack and their lval_type set to lval_memory. This is required because further evaluation of the expression could potentially invoke methods on the return value requiring GDB to evaluate the "this" pointer. To evaluate the this pointer, GDB needs the memory address of the value. */ value_force_lval (retval, ri->struct_addr); push_thread_stack_temporary (inferior_ptid, retval); } } gdb_assert (retval != NULL); return retval; }
static struct value * value_of_builtin_frame_pc_reg (struct frame_info *frame, const void *baton) { if (gdbarch_pc_regnum (current_gdbarch) >= 0) return value_of_register (gdbarch_pc_regnum (current_gdbarch), frame); else { struct value *val = allocate_value (builtin_type_void_data_ptr); gdb_byte *buf = value_contents_raw (val); if (frame == NULL) memset (buf, 0, TYPE_LENGTH (value_type (val))); else gdbarch_address_to_pointer (current_gdbarch, builtin_type_void_data_ptr, buf, get_frame_pc (frame)); return val; } }
static struct value * value_of_builtin_frame_pc_reg (struct frame_info *frame) { if (PC_REGNUM >= 0) return value_of_register (PC_REGNUM, frame); else { struct value *val = allocate_value (builtin_type_void_data_ptr); gdb_byte *buf = value_contents_raw (val); if (frame == NULL) memset (buf, 0, TYPE_LENGTH (value_type (val))); else ADDRESS_TO_POINTER (builtin_type_void_data_ptr, buf, get_frame_pc (frame)); return val; } }
static struct value * value_of_builtin_frame_reg (struct frame_info *frame) { struct value *val; gdb_byte *buf; build_builtin_type_frame_reg (); val = allocate_value (builtin_type_frame_reg); VALUE_LVAL (val) = not_lval; buf = value_contents_raw (val); memset (buf, 0, TYPE_LENGTH (value_type (val))); /* frame.base. */ if (frame != NULL) ADDRESS_TO_POINTER (builtin_type_void_data_ptr, buf, get_frame_base (frame)); buf += TYPE_LENGTH (builtin_type_void_data_ptr); /* frame.XXX. */ return val; }
static struct value * value_of_builtin_frame_pc_reg (struct frame_info *frame, const void *baton) { struct gdbarch *gdbarch = get_frame_arch (frame); if (gdbarch_pc_regnum (gdbarch) >= 0) return value_of_register (gdbarch_pc_regnum (gdbarch), frame); else { struct type *func_ptr_type = builtin_type (gdbarch)->builtin_func_ptr; struct value *val = allocate_value (func_ptr_type); gdb_byte *buf = value_contents_raw (val); gdbarch_address_to_pointer (gdbarch, func_ptr_type, buf, get_frame_pc (frame)); return val; } }
static struct value * value_of_builtin_frame_fp_reg (struct frame_info *frame) { if (DEPRECATED_FP_REGNUM >= 0) /* NOTE: cagney/2003-04-24: Since the mere presence of "fp" in the register name table overrides this built-in $fp register, there is no real reason for this DEPRECATED_FP_REGNUM trickery here. An architecture wanting to implement "$fp" as alias for a raw register can do so by adding "fp" to register name table (mind you, doing this is probably a dangerous thing). */ return value_of_register (DEPRECATED_FP_REGNUM, frame); else { struct value *val = allocate_value (builtin_type_void_data_ptr); gdb_byte *buf = value_contents_raw (val); if (frame == NULL) memset (buf, 0, TYPE_LENGTH (value_type (val))); else ADDRESS_TO_POINTER (builtin_type_void_data_ptr, buf, get_frame_base_address (frame)); return val; } }
/* Evaluate a location description, starting at DATA and with length SIZE, to find the current location of variable VAR in the context of FRAME. */ static struct value * dwarf2_evaluate_loc_desc (struct symbol *var, struct frame_info *frame, gdb_byte *data, unsigned short size, struct objfile *objfile) { struct gdbarch *arch = get_frame_arch (frame); struct value *retval; struct dwarf_expr_baton baton; struct dwarf_expr_context *ctx; if (size == 0) { retval = allocate_value (SYMBOL_TYPE (var)); VALUE_LVAL (retval) = not_lval; set_value_optimized_out (retval, 1); return retval; } baton.frame = frame; baton.objfile = objfile; ctx = new_dwarf_expr_context (); ctx->baton = &baton; ctx->read_reg = dwarf_expr_read_reg; ctx->read_mem = dwarf_expr_read_mem; ctx->get_frame_base = dwarf_expr_frame_base; ctx->get_tls_address = dwarf_expr_tls_address; dwarf_expr_eval (ctx, data, size); if (ctx->num_pieces > 0) { int i; long offset = 0; bfd_byte *contents; retval = allocate_value (SYMBOL_TYPE (var)); contents = value_contents_raw (retval); for (i = 0; i < ctx->num_pieces; i++) { struct dwarf_expr_piece *p = &ctx->pieces[i]; if (p->in_reg) { bfd_byte regval[MAX_REGISTER_SIZE]; int gdb_regnum = gdbarch_dwarf2_reg_to_regnum (arch, p->value); get_frame_register (frame, gdb_regnum, regval); memcpy (contents + offset, regval, p->size); } else /* In memory? */ { read_memory (p->value, contents + offset, p->size); } offset += p->size; } } else if (ctx->in_reg) { CORE_ADDR dwarf_regnum = dwarf_expr_fetch (ctx, 0); int gdb_regnum = gdbarch_dwarf2_reg_to_regnum (arch, dwarf_regnum); retval = value_from_register (SYMBOL_TYPE (var), gdb_regnum, frame); } else { CORE_ADDR address = dwarf_expr_fetch (ctx, 0); retval = allocate_value (SYMBOL_TYPE (var)); VALUE_LVAL (retval) = lval_memory; set_value_lazy (retval, 1); VALUE_ADDRESS (retval) = address; } set_value_initialized (retval, ctx->initialized); free_dwarf_expr_context (ctx); return retval; }
static void parse_find_args (char *args, ULONGEST *max_countp, char **pattern_bufp, ULONGEST *pattern_lenp, CORE_ADDR *start_addrp, ULONGEST *search_space_lenp, bfd_boolean big_p) { /* Default to using the specified type. */ char size = '\0'; ULONGEST max_count = ~(ULONGEST) 0; /* Buffer to hold the search pattern. */ char *pattern_buf; /* Current size of search pattern buffer. We realloc space as needed. */ #define INITIAL_PATTERN_BUF_SIZE 100 ULONGEST pattern_buf_size = INITIAL_PATTERN_BUF_SIZE; /* Pointer to one past the last in-use part of pattern_buf. */ char *pattern_buf_end; ULONGEST pattern_len; CORE_ADDR start_addr; ULONGEST search_space_len; char *s = args; struct cleanup *old_cleanups; struct value *v; if (args == NULL) error (_("Missing search parameters.")); pattern_buf = xmalloc (pattern_buf_size); pattern_buf_end = pattern_buf; old_cleanups = make_cleanup (free_current_contents, &pattern_buf); /* Get search granularity and/or max count if specified. They may be specified in either order, together or separately. */ while (*s == '/') { ++s; while (*s != '\0' && *s != '/' && !isspace (*s)) { if (isdigit (*s)) { max_count = atoi (s); while (isdigit (*s)) ++s; continue; } switch (*s) { case 'b': case 'h': case 'w': case 'g': size = *s++; break; default: error (_("Invalid size granularity.")); } } while (isspace (*s)) ++s; } /* Get the search range. */ v = parse_to_comma_and_eval (&s); start_addr = value_as_address (v); if (*s == ',') ++s; while (isspace (*s)) ++s; if (*s == '+') { LONGEST len; ++s; v = parse_to_comma_and_eval (&s); len = value_as_long (v); if (len == 0) { printf_filtered (_("Empty search range.\n")); return; } if (len < 0) error (_("Invalid length.")); /* Watch for overflows. */ if (len > CORE_ADDR_MAX || (start_addr + len - 1) < start_addr) error (_("Search space too large.")); search_space_len = len; } else { CORE_ADDR end_addr; v = parse_to_comma_and_eval (&s); end_addr = value_as_address (v); if (start_addr > end_addr) error (_("Invalid search space, end preceeds start.")); search_space_len = end_addr - start_addr + 1; /* We don't support searching all of memory (i.e. start=0, end = 0xff..ff). Bail to avoid overflows later on. */ if (search_space_len == 0) error (_("Overflow in address range computation, choose smaller range.")); } if (*s == ',') ++s; /* Fetch the search string. */ while (*s != '\0') { LONGEST x; int val_bytes; while (isspace (*s)) ++s; v = parse_to_comma_and_eval (&s); val_bytes = TYPE_LENGTH (value_type (v)); /* Keep it simple and assume size == 'g' when watching for when we need to grow the pattern buf. */ if ((pattern_buf_end - pattern_buf + max (val_bytes, sizeof (int64_t))) > pattern_buf_size) { size_t current_offset = pattern_buf_end - pattern_buf; pattern_buf_size *= 2; pattern_buf = xrealloc (pattern_buf, pattern_buf_size); pattern_buf_end = pattern_buf + current_offset; } if (size != '\0') { x = value_as_long (v); switch (size) { case 'b': *pattern_buf_end++ = x; break; case 'h': put_bits (x, pattern_buf_end, 16, big_p); pattern_buf_end += sizeof (int16_t); break; case 'w': put_bits (x, pattern_buf_end, 32, big_p); pattern_buf_end += sizeof (int32_t); break; case 'g': put_bits (x, pattern_buf_end, 64, big_p); pattern_buf_end += sizeof (int64_t); break; } } else { memcpy (pattern_buf_end, value_contents_raw (v), val_bytes); pattern_buf_end += val_bytes; } if (*s == ',') ++s; while (isspace (*s)) ++s; } if (pattern_buf_end == pattern_buf) error (_("Missing search pattern.")); pattern_len = pattern_buf_end - pattern_buf; if (search_space_len < pattern_len) error (_("Search space too small to contain pattern.")); *max_countp = max_count; *pattern_bufp = pattern_buf; *pattern_lenp = pattern_len; *start_addrp = start_addr; *search_space_lenp = search_space_len; /* We successfully parsed the arguments, leave the freeing of PATTERN_BUF to the caller now. */ discard_cleanups (old_cleanups); }
struct value * evaluate_subexp_c (struct type *expect_type, struct expression *exp, int *pos, enum noside noside) { enum exp_opcode op = exp->elts[*pos].opcode; switch (op) { case OP_STRING: { int oplen, limit; struct type *type; struct obstack output; struct cleanup *cleanup; struct value *result; enum c_string_type dest_type; const char *dest_charset; int satisfy_expected = 0; obstack_init (&output); cleanup = make_cleanup_obstack_free (&output); ++*pos; oplen = longest_to_int (exp->elts[*pos].longconst); ++*pos; limit = *pos + BYTES_TO_EXP_ELEM (oplen + 1); dest_type = (enum c_string_type) longest_to_int (exp->elts[*pos].longconst); switch (dest_type & ~C_CHAR) { case C_STRING: type = language_string_char_type (exp->language_defn, exp->gdbarch); break; case C_WIDE_STRING: type = lookup_typename (exp->language_defn, exp->gdbarch, "wchar_t", NULL, 0); break; case C_STRING_16: type = lookup_typename (exp->language_defn, exp->gdbarch, "char16_t", NULL, 0); break; case C_STRING_32: type = lookup_typename (exp->language_defn, exp->gdbarch, "char32_t", NULL, 0); break; default: internal_error (__FILE__, __LINE__, _("unhandled c_string_type")); } /* Ensure TYPE_LENGTH is valid for TYPE. */ check_typedef (type); /* If the caller expects an array of some integral type, satisfy them. If something odder is expected, rely on the caller to cast. */ if (expect_type && TYPE_CODE (expect_type) == TYPE_CODE_ARRAY) { struct type *element_type = check_typedef (TYPE_TARGET_TYPE (expect_type)); if (TYPE_CODE (element_type) == TYPE_CODE_INT || TYPE_CODE (element_type) == TYPE_CODE_CHAR) { type = element_type; satisfy_expected = 1; } } dest_charset = charset_for_string_type (dest_type, exp->gdbarch); ++*pos; while (*pos < limit) { int len; len = longest_to_int (exp->elts[*pos].longconst); ++*pos; if (noside != EVAL_SKIP) parse_one_string (&output, &exp->elts[*pos].string, len, dest_charset, type); *pos += BYTES_TO_EXP_ELEM (len); } /* Skip the trailing length and opcode. */ *pos += 2; if (noside == EVAL_SKIP) { /* Return a dummy value of the appropriate type. */ if (expect_type != NULL) result = allocate_value (expect_type); else if ((dest_type & C_CHAR) != 0) result = allocate_value (type); else result = value_cstring ("", 0, type); do_cleanups (cleanup); return result; } if ((dest_type & C_CHAR) != 0) { LONGEST value; if (obstack_object_size (&output) != TYPE_LENGTH (type)) error (_("Could not convert character " "constant to target character set")); value = unpack_long (type, (gdb_byte *) obstack_base (&output)); result = value_from_longest (type, value); } else { int i; /* Write the terminating character. */ for (i = 0; i < TYPE_LENGTH (type); ++i) obstack_1grow (&output, 0); if (satisfy_expected) { LONGEST low_bound, high_bound; int element_size = TYPE_LENGTH (type); if (get_discrete_bounds (TYPE_INDEX_TYPE (expect_type), &low_bound, &high_bound) < 0) { low_bound = 0; high_bound = (TYPE_LENGTH (expect_type) / element_size) - 1; } if (obstack_object_size (&output) / element_size > (high_bound - low_bound + 1)) error (_("Too many array elements")); result = allocate_value (expect_type); memcpy (value_contents_raw (result), obstack_base (&output), obstack_object_size (&output)); } else result = value_cstring (obstack_base (&output), obstack_object_size (&output), type); } do_cleanups (cleanup); return result; } break; default: break; } return evaluate_subexp_standard (expect_type, exp, pos, noside); }
static struct value * scalar_binop (struct value *arg1, struct value *arg2, enum exp_opcode op) { struct value *val; struct type *type1, *type2, *result_type; arg1 = coerce_ref (arg1); arg2 = coerce_ref (arg2); type1 = check_typedef (value_type (arg1)); type2 = check_typedef (value_type (arg2)); if ((TYPE_CODE (type1) != TYPE_CODE_FLT && TYPE_CODE (type1) != TYPE_CODE_DECFLOAT && !is_integral_type (type1)) || (TYPE_CODE (type2) != TYPE_CODE_FLT && TYPE_CODE (type2) != TYPE_CODE_DECFLOAT && !is_integral_type (type2))) error (_("Argument to arithmetic operation not a number or boolean.")); if (TYPE_CODE (type1) == TYPE_CODE_DECFLOAT || TYPE_CODE (type2) == TYPE_CODE_DECFLOAT) { int len_v1, len_v2, len_v; enum bfd_endian byte_order_v1, byte_order_v2, byte_order_v; gdb_byte v1[16], v2[16]; gdb_byte v[16]; /* If only one type is decimal float, use its type. Otherwise use the bigger type. */ if (TYPE_CODE (type1) != TYPE_CODE_DECFLOAT) result_type = type2; else if (TYPE_CODE (type2) != TYPE_CODE_DECFLOAT) result_type = type1; else if (TYPE_LENGTH (type2) > TYPE_LENGTH (type1)) result_type = type2; else result_type = type1; len_v = TYPE_LENGTH (result_type); byte_order_v = gdbarch_byte_order (get_type_arch (result_type)); value_args_as_decimal (arg1, arg2, v1, &len_v1, &byte_order_v1, v2, &len_v2, &byte_order_v2); switch (op) { case BINOP_ADD: case BINOP_SUB: case BINOP_MUL: case BINOP_DIV: case BINOP_EXP: decimal_binop (op, v1, len_v1, byte_order_v1, v2, len_v2, byte_order_v2, v, len_v, byte_order_v); break; default: error (_("Operation not valid for decimal floating point number.")); } val = value_from_decfloat (result_type, v); } else if (TYPE_CODE (type1) == TYPE_CODE_FLT || TYPE_CODE (type2) == TYPE_CODE_FLT) { /* FIXME-if-picky-about-floating-accuracy: Should be doing this in target format. real.c in GCC probably has the necessary code. */ DOUBLEST v1, v2, v = 0; v1 = value_as_double (arg1); v2 = value_as_double (arg2); switch (op) { case BINOP_ADD: v = v1 + v2; break; case BINOP_SUB: v = v1 - v2; break; case BINOP_MUL: v = v1 * v2; break; case BINOP_DIV: v = v1 / v2; break; case BINOP_EXP: errno = 0; v = pow (v1, v2); if (errno) error (_("Cannot perform exponentiation: %s"), safe_strerror (errno)); break; case BINOP_MIN: v = v1 < v2 ? v1 : v2; break; case BINOP_MAX: v = v1 > v2 ? v1 : v2; break; default: error (_("Integer-only operation on floating point number.")); } /* If only one type is float, use its type. Otherwise use the bigger type. */ if (TYPE_CODE (type1) != TYPE_CODE_FLT) result_type = type2; else if (TYPE_CODE (type2) != TYPE_CODE_FLT) result_type = type1; else if (TYPE_LENGTH (type2) > TYPE_LENGTH (type1)) result_type = type2; else result_type = type1; val = allocate_value (result_type); store_typed_floating (value_contents_raw (val), value_type (val), v); } else if (TYPE_CODE (type1) == TYPE_CODE_BOOL || TYPE_CODE (type2) == TYPE_CODE_BOOL) { LONGEST v1, v2, v = 0; v1 = value_as_long (arg1); v2 = value_as_long (arg2); switch (op) { case BINOP_BITWISE_AND: v = v1 & v2; break; case BINOP_BITWISE_IOR: v = v1 | v2; break; case BINOP_BITWISE_XOR: v = v1 ^ v2; break; case BINOP_EQUAL: v = v1 == v2; break; case BINOP_NOTEQUAL: v = v1 != v2; break; default: error (_("Invalid operation on booleans.")); } result_type = type1; val = allocate_value (result_type); store_signed_integer (value_contents_raw (val), TYPE_LENGTH (result_type), gdbarch_byte_order (get_type_arch (result_type)), v); } else /* Integral operations here. */ { /* Determine type length of the result, and if the operation should be done unsigned. For exponentiation and shift operators, use the length and type of the left operand. Otherwise, use the signedness of the operand with the greater length. If both operands are of equal length, use unsigned operation if one of the operands is unsigned. */ if (op == BINOP_RSH || op == BINOP_LSH || op == BINOP_EXP) result_type = type1; else if (TYPE_LENGTH (type1) > TYPE_LENGTH (type2)) result_type = type1; else if (TYPE_LENGTH (type2) > TYPE_LENGTH (type1)) result_type = type2; else if (TYPE_UNSIGNED (type1)) result_type = type1; else if (TYPE_UNSIGNED (type2)) result_type = type2; else result_type = type1; if (TYPE_UNSIGNED (result_type)) { LONGEST v2_signed = value_as_long (arg2); ULONGEST v1, v2, v = 0; v1 = (ULONGEST) value_as_long (arg1); v2 = (ULONGEST) v2_signed; switch (op) { case BINOP_ADD: v = v1 + v2; break; case BINOP_SUB: v = v1 - v2; break; case BINOP_MUL: v = v1 * v2; break; case BINOP_DIV: case BINOP_INTDIV: if (v2 != 0) v = v1 / v2; else error (_("Division by zero")); break; case BINOP_EXP: v = uinteger_pow (v1, v2_signed); break; case BINOP_REM: if (v2 != 0) v = v1 % v2; else error (_("Division by zero")); break; case BINOP_MOD: /* Knuth 1.2.4, integer only. Note that unlike the C '%' op, v1 mod 0 has a defined value, v1. */ if (v2 == 0) { v = v1; } else { v = v1 / v2; /* Note floor(v1/v2) == v1/v2 for unsigned. */ v = v1 - (v2 * v); } break; case BINOP_LSH: v = v1 << v2; break; case BINOP_RSH: v = v1 >> v2; break; case BINOP_BITWISE_AND: v = v1 & v2; break; case BINOP_BITWISE_IOR: v = v1 | v2; break; case BINOP_BITWISE_XOR: v = v1 ^ v2; break; case BINOP_LOGICAL_AND: v = v1 && v2; break; case BINOP_LOGICAL_OR: v = v1 || v2; break; case BINOP_MIN: v = v1 < v2 ? v1 : v2; break; case BINOP_MAX: v = v1 > v2 ? v1 : v2; break; case BINOP_EQUAL: v = v1 == v2; break; case BINOP_NOTEQUAL: v = v1 != v2; break; case BINOP_LESS: v = v1 < v2; break; case BINOP_GTR: v = v1 > v2; break; case BINOP_LEQ: v = v1 <= v2; break; case BINOP_GEQ: v = v1 >= v2; break; default: error (_("Invalid binary operation on numbers.")); } val = allocate_value (result_type); store_unsigned_integer (value_contents_raw (val), TYPE_LENGTH (value_type (val)), gdbarch_byte_order (get_type_arch (result_type)), v); } else { LONGEST v1, v2, v = 0; v1 = value_as_long (arg1); v2 = value_as_long (arg2); switch (op) { case BINOP_ADD: v = v1 + v2; break; case BINOP_SUB: v = v1 - v2; break; case BINOP_MUL: v = v1 * v2; break; case BINOP_DIV: case BINOP_INTDIV: if (v2 != 0) v = v1 / v2; else error (_("Division by zero")); break; case BINOP_EXP: v = integer_pow (v1, v2); break; case BINOP_REM: if (v2 != 0) v = v1 % v2; else error (_("Division by zero")); break; case BINOP_MOD: /* Knuth 1.2.4, integer only. Note that unlike the C '%' op, X mod 0 has a defined value, X. */ if (v2 == 0) { v = v1; } else { v = v1 / v2; /* Compute floor. */ if (TRUNCATION_TOWARDS_ZERO && (v < 0) && ((v1 % v2) != 0)) { v--; } v = v1 - (v2 * v); } break; case BINOP_LSH: v = v1 << v2; break; case BINOP_RSH: v = v1 >> v2; break; case BINOP_BITWISE_AND: v = v1 & v2; break; case BINOP_BITWISE_IOR: v = v1 | v2; break; case BINOP_BITWISE_XOR: v = v1 ^ v2; break; case BINOP_LOGICAL_AND: v = v1 && v2; break; case BINOP_LOGICAL_OR: v = v1 || v2; break; case BINOP_MIN: v = v1 < v2 ? v1 : v2; break; case BINOP_MAX: v = v1 > v2 ? v1 : v2; break; case BINOP_EQUAL: v = v1 == v2; break; case BINOP_NOTEQUAL: v = v1 != v2; break; case BINOP_LESS: v = v1 < v2; break; case BINOP_GTR: v = v1 > v2; break; case BINOP_LEQ: v = v1 <= v2; break; case BINOP_GEQ: v = v1 >= v2; break; default: error (_("Invalid binary operation on numbers.")); } val = allocate_value (result_type); store_signed_integer (value_contents_raw (val), TYPE_LENGTH (value_type (val)), gdbarch_byte_order (get_type_arch (result_type)), v); } }
static struct value * scalar_binop (struct value *arg1, struct value *arg2, enum exp_opcode op) { struct value *val; struct type *type1, *type2, *result_type; arg1 = coerce_ref (arg1); arg2 = coerce_ref (arg2); type1 = check_typedef (value_type (arg1)); type2 = check_typedef (value_type (arg2)); if ((!is_floating_value (arg1) && !is_integral_type (type1)) || (!is_floating_value (arg2) && !is_integral_type (type2))) error (_("Argument to arithmetic operation not a number or boolean.")); if (is_floating_type (type1) || is_floating_type (type2)) { /* If only one type is floating-point, use its type. Otherwise use the bigger type. */ if (!is_floating_type (type1)) result_type = type2; else if (!is_floating_type (type2)) result_type = type1; else if (TYPE_LENGTH (type2) > TYPE_LENGTH (type1)) result_type = type2; else result_type = type1; val = allocate_value (result_type); struct type *eff_type_v1, *eff_type_v2; gdb::byte_vector v1, v2; v1.resize (TYPE_LENGTH (result_type)); v2.resize (TYPE_LENGTH (result_type)); value_args_as_target_float (arg1, arg2, v1.data (), &eff_type_v1, v2.data (), &eff_type_v2); target_float_binop (op, v1.data (), eff_type_v1, v2.data (), eff_type_v2, value_contents_raw (val), result_type); } else if (TYPE_CODE (type1) == TYPE_CODE_BOOL || TYPE_CODE (type2) == TYPE_CODE_BOOL) { LONGEST v1, v2, v = 0; v1 = value_as_long (arg1); v2 = value_as_long (arg2); switch (op) { case BINOP_BITWISE_AND: v = v1 & v2; break; case BINOP_BITWISE_IOR: v = v1 | v2; break; case BINOP_BITWISE_XOR: v = v1 ^ v2; break; case BINOP_EQUAL: v = v1 == v2; break; case BINOP_NOTEQUAL: v = v1 != v2; break; default: error (_("Invalid operation on booleans.")); } result_type = type1; val = allocate_value (result_type); store_signed_integer (value_contents_raw (val), TYPE_LENGTH (result_type), gdbarch_byte_order (get_type_arch (result_type)), v); } else /* Integral operations here. */ { /* Determine type length of the result, and if the operation should be done unsigned. For exponentiation and shift operators, use the length and type of the left operand. Otherwise, use the signedness of the operand with the greater length. If both operands are of equal length, use unsigned operation if one of the operands is unsigned. */ if (op == BINOP_RSH || op == BINOP_LSH || op == BINOP_EXP) result_type = type1; else if (TYPE_LENGTH (type1) > TYPE_LENGTH (type2)) result_type = type1; else if (TYPE_LENGTH (type2) > TYPE_LENGTH (type1)) result_type = type2; else if (TYPE_UNSIGNED (type1)) result_type = type1; else if (TYPE_UNSIGNED (type2)) result_type = type2; else result_type = type1; if (TYPE_UNSIGNED (result_type)) { LONGEST v2_signed = value_as_long (arg2); ULONGEST v1, v2, v = 0; v1 = (ULONGEST) value_as_long (arg1); v2 = (ULONGEST) v2_signed; switch (op) { case BINOP_ADD: v = v1 + v2; break; case BINOP_SUB: v = v1 - v2; break; case BINOP_MUL: v = v1 * v2; break; case BINOP_DIV: case BINOP_INTDIV: if (v2 != 0) v = v1 / v2; else error (_("Division by zero")); break; case BINOP_EXP: v = uinteger_pow (v1, v2_signed); break; case BINOP_REM: if (v2 != 0) v = v1 % v2; else error (_("Division by zero")); break; case BINOP_MOD: /* Knuth 1.2.4, integer only. Note that unlike the C '%' op, v1 mod 0 has a defined value, v1. */ if (v2 == 0) { v = v1; } else { v = v1 / v2; /* Note floor(v1/v2) == v1/v2 for unsigned. */ v = v1 - (v2 * v); } break; case BINOP_LSH: v = v1 << v2; break; case BINOP_RSH: v = v1 >> v2; break; case BINOP_BITWISE_AND: v = v1 & v2; break; case BINOP_BITWISE_IOR: v = v1 | v2; break; case BINOP_BITWISE_XOR: v = v1 ^ v2; break; case BINOP_LOGICAL_AND: v = v1 && v2; break; case BINOP_LOGICAL_OR: v = v1 || v2; break; case BINOP_MIN: v = v1 < v2 ? v1 : v2; break; case BINOP_MAX: v = v1 > v2 ? v1 : v2; break; case BINOP_EQUAL: v = v1 == v2; break; case BINOP_NOTEQUAL: v = v1 != v2; break; case BINOP_LESS: v = v1 < v2; break; case BINOP_GTR: v = v1 > v2; break; case BINOP_LEQ: v = v1 <= v2; break; case BINOP_GEQ: v = v1 >= v2; break; default: error (_("Invalid binary operation on numbers.")); } val = allocate_value (result_type); store_unsigned_integer (value_contents_raw (val), TYPE_LENGTH (value_type (val)), gdbarch_byte_order (get_type_arch (result_type)), v); } else { LONGEST v1, v2, v = 0; v1 = value_as_long (arg1); v2 = value_as_long (arg2); switch (op) { case BINOP_ADD: v = v1 + v2; break; case BINOP_SUB: v = v1 - v2; break; case BINOP_MUL: v = v1 * v2; break; case BINOP_DIV: case BINOP_INTDIV: if (v2 != 0) v = v1 / v2; else error (_("Division by zero")); break; case BINOP_EXP: v = integer_pow (v1, v2); break; case BINOP_REM: if (v2 != 0) v = v1 % v2; else error (_("Division by zero")); break; case BINOP_MOD: /* Knuth 1.2.4, integer only. Note that unlike the C '%' op, X mod 0 has a defined value, X. */ if (v2 == 0) { v = v1; } else { v = v1 / v2; /* Compute floor. */ if (TRUNCATION_TOWARDS_ZERO && (v < 0) && ((v1 % v2) != 0)) { v--; } v = v1 - (v2 * v); } break; case BINOP_LSH: v = v1 << v2; break; case BINOP_RSH: v = v1 >> v2; break; case BINOP_BITWISE_AND: v = v1 & v2; break; case BINOP_BITWISE_IOR: v = v1 | v2; break; case BINOP_BITWISE_XOR: v = v1 ^ v2; break; case BINOP_LOGICAL_AND: v = v1 && v2; break; case BINOP_LOGICAL_OR: v = v1 || v2; break; case BINOP_MIN: v = v1 < v2 ? v1 : v2; break; case BINOP_MAX: v = v1 > v2 ? v1 : v2; break; case BINOP_EQUAL: v = v1 == v2; break; case BINOP_NOTEQUAL: v = v1 != v2; break; case BINOP_LESS: v = v1 < v2; break; case BINOP_GTR: v = v1 > v2; break; case BINOP_LEQ: v = v1 <= v2; break; case BINOP_GEQ: v = v1 >= v2; break; default: error (_("Invalid binary operation on numbers.")); } val = allocate_value (result_type); store_signed_integer (value_contents_raw (val), TYPE_LENGTH (value_type (val)), gdbarch_byte_order (get_type_arch (result_type)), v); } }