double          QxrdAcquisitionExtraInputsChannel::evaluateChannel()
{
  evaluateTrigger();

  int mode = get_Mode();

  switch (mode) {
  case ModeSummed:
  default:
    return sumChannel();

  case ModeAveraged:
    return averageChannel();

  case ModeMaximum:
    return maximumChannel();

  case ModeMinimum:
    return minimumChannel();
  }
}
예제 #2
0
static
void jit_bundle_gen(struct pt_regs *regs, tilegx_bundle_bits bundle,
		    int align_ctl)
{
	struct thread_info *info = current_thread_info();
	struct unaligned_jit_fragment frag;
	struct unaligned_jit_fragment *jit_code_area;
	tilegx_bundle_bits bundle_2 = 0;
	/* If bundle_2_enable = false, bundle_2 is fnop/nop operation. */
	bool     bundle_2_enable = true;
	uint64_t ra = -1, rb = -1, rd = -1, clob1 = -1, clob2 = -1, clob3 = -1;
	/*
	 * Indicate if the unalign access
	 * instruction's registers hit with
	 * others in the same bundle.
	 */
	bool     alias = false;
	bool     load_n_store = true;
	bool     load_store_signed = false;
	unsigned int  load_store_size = 8;
	bool     y1_br = false;  /* True, for a branch in same bundle at Y1.*/
	int      y1_br_reg = 0;
	/* True for link operation. i.e. jalr or lnk at Y1 */
	bool     y1_lr = false;
	int      y1_lr_reg = 0;
	bool     x1_add = false;/* True, for load/store ADD instruction at X1*/
	int      x1_add_imm8 = 0;
	bool     unexpected = false;
	int      n = 0, k;

	jit_code_area =
		(struct unaligned_jit_fragment *)(info->unalign_jit_base);

	memset((void *)&frag, 0, sizeof(frag));

	/* 0: X mode, Otherwise: Y mode. */
	if (bundle & TILEGX_BUNDLE_MODE_MASK) {
		unsigned int mod, opcode;

		if (get_Opcode_Y1(bundle) == RRR_1_OPCODE_Y1 &&
		    get_RRROpcodeExtension_Y1(bundle) ==
		    UNARY_RRR_1_OPCODE_Y1) {

			opcode = get_UnaryOpcodeExtension_Y1(bundle);

			/*
			 * Test "jalr", "jalrp", "jr", "jrp" instruction at Y1
			 * pipeline.
			 */
			switch (opcode) {
			case JALR_UNARY_OPCODE_Y1:
			case JALRP_UNARY_OPCODE_Y1:
				y1_lr = true;
				y1_lr_reg = 55; /* Link register. */
				/* FALLTHROUGH */
			case JR_UNARY_OPCODE_Y1:
			case JRP_UNARY_OPCODE_Y1:
				y1_br = true;
				y1_br_reg = get_SrcA_Y1(bundle);
				break;
			case LNK_UNARY_OPCODE_Y1:
				/* "lnk" at Y1 pipeline. */
				y1_lr = true;
				y1_lr_reg = get_Dest_Y1(bundle);
				break;
			}
		}

		opcode = get_Opcode_Y2(bundle);
		mod = get_Mode(bundle);

		/*
		 *  bundle_2 is bundle after making Y2 as a dummy operation
		 *  - ld zero, sp
		 */
		bundle_2 = (bundle & (~GX_INSN_Y2_MASK)) | jit_y2_dummy();

		/* Make Y1 as fnop if Y1 is a branch or lnk operation. */
		if (y1_br || y1_lr) {
			bundle_2 &= ~(GX_INSN_Y1_MASK);
			bundle_2 |= jit_y1_fnop();
		}

		if (is_y0_y1_nop(bundle_2))
			bundle_2_enable = false;

		if (mod == MODE_OPCODE_YC2) {
			/* Store. */
			load_n_store = false;
			load_store_size = 1 << opcode;
			load_store_signed = false;
			find_regs(bundle, 0, &ra, &rb, &clob1, &clob2,
				  &clob3, &alias);
			if (load_store_size > 8)
				unexpected = true;
		} else {
			/* Load. */
			load_n_store = true;
			if (mod == MODE_OPCODE_YB2) {
				switch (opcode) {
				case LD_OPCODE_Y2:
					load_store_signed = false;
					load_store_size = 8;
					break;
				case LD4S_OPCODE_Y2:
					load_store_signed = true;
					load_store_size = 4;
					break;
				case LD4U_OPCODE_Y2:
					load_store_signed = false;
					load_store_size = 4;
					break;
				default:
					unexpected = true;
				}
			} else if (mod == MODE_OPCODE_YA2) {
				if (opcode == LD2S_OPCODE_Y2) {
					load_store_signed = true;
					load_store_size = 2;
				} else if (opcode == LD2U_OPCODE_Y2) {
					load_store_signed = false;
					load_store_size = 2;
				} else
					unexpected = true;
			} else
				unexpected = true;
			find_regs(bundle, &rd, &ra, &rb, &clob1, &clob2,
				  &clob3, &alias);
		}
	} else {
		unsigned int opcode;

		/* bundle_2 is bundle after making X1 as "fnop". */
		bundle_2 = (bundle & (~GX_INSN_X1_MASK)) | jit_x1_fnop();

		if (is_x0_x1_nop(bundle_2))
			bundle_2_enable = false;

		if (get_Opcode_X1(bundle) == RRR_0_OPCODE_X1) {
			opcode = get_UnaryOpcodeExtension_X1(bundle);

			if (get_RRROpcodeExtension_X1(bundle) ==
			    UNARY_RRR_0_OPCODE_X1) {
				load_n_store = true;
				find_regs(bundle, &rd, &ra, &rb, &clob1,
					  &clob2, &clob3, &alias);

				switch (opcode) {
				case LD_UNARY_OPCODE_X1:
					load_store_signed = false;
					load_store_size = 8;
					break;
				case LD4S_UNARY_OPCODE_X1:
					load_store_signed = true;
					/* FALLTHROUGH */
				case LD4U_UNARY_OPCODE_X1:
					load_store_size = 4;
					break;

				case LD2S_UNARY_OPCODE_X1:
					load_store_signed = true;
					/* FALLTHROUGH */
				case LD2U_UNARY_OPCODE_X1:
					load_store_size = 2;
					break;
				default:
					unexpected = true;
				}
			} else {
				load_n_store = false;
				load_store_signed = false;
				find_regs(bundle, 0, &ra, &rb,
					  &clob1, &clob2, &clob3,
					  &alias);

				opcode = get_RRROpcodeExtension_X1(bundle);
				switch (opcode)	{
				case ST_RRR_0_OPCODE_X1:
					load_store_size = 8;
					break;
				case ST4_RRR_0_OPCODE_X1:
					load_store_size = 4;
					break;
				case ST2_RRR_0_OPCODE_X1:
					load_store_size = 2;
					break;
				default:
					unexpected = true;
				}
			}
		} else if (get_Opcode_X1(bundle) == IMM8_OPCODE_X1) {
			load_n_store = true;
			opcode = get_Imm8OpcodeExtension_X1(bundle);
			switch (opcode)	{
			case LD_ADD_IMM8_OPCODE_X1:
				load_store_size = 8;
				break;

			case LD4S_ADD_IMM8_OPCODE_X1:
				load_store_signed = true;
				/* FALLTHROUGH */
			case LD4U_ADD_IMM8_OPCODE_X1:
				load_store_size = 4;
				break;

			case LD2S_ADD_IMM8_OPCODE_X1:
				load_store_signed = true;
				/* FALLTHROUGH */
			case LD2U_ADD_IMM8_OPCODE_X1:
				load_store_size = 2;
				break;

			case ST_ADD_IMM8_OPCODE_X1:
				load_n_store = false;
				load_store_size = 8;
				break;
			case ST4_ADD_IMM8_OPCODE_X1:
				load_n_store = false;
				load_store_size = 4;
				break;
			case ST2_ADD_IMM8_OPCODE_X1:
				load_n_store = false;
				load_store_size = 2;
				break;
			default:
				unexpected = true;
			}

			if (!unexpected) {
				x1_add = true;
				if (load_n_store)
					x1_add_imm8 = get_Imm8_X1(bundle);
				else
					x1_add_imm8 = get_Dest_Imm8_X1(bundle);
			}

			find_regs(bundle, load_n_store ? (&rd) : NULL,
				  &ra, &rb, &clob1, &clob2, &clob3, &alias);
		} else
			unexpected = true;
	}

	/*
	 * Some sanity check for register numbers extracted from fault bundle.
	 */
	if (check_regs(rd, ra, rb, clob1, clob2, clob3) == true)
		unexpected = true;

	/* Give warning if register ra has an aligned address. */
	if (!unexpected)
		WARN_ON(!((load_store_size - 1) & (regs->regs[ra])));


	/*
	 * Fault came from kernel space, here we only need take care of
	 * unaligned "get_user/put_user" macros defined in "uaccess.h".
	 * Basically, we will handle bundle like this:
	 * {ld/2u/4s rd, ra; movei rx, 0} or {st/2/4 ra, rb; movei rx, 0}
	 * (Refer to file "arch/tile/include/asm/uaccess.h" for details).
	 * For either load or store, byte-wise operation is performed by calling
	 * get_user() or put_user(). If the macro returns non-zero value,
	 * set the value to rx, otherwise set zero to rx. Finally make pc point
	 * to next bundle and return.
	 */

	if (EX1_PL(regs->ex1) != USER_PL) {

		unsigned long rx = 0;
		unsigned long x = 0, ret = 0;

		if (y1_br || y1_lr || x1_add ||
		    (load_store_signed !=
		     (load_n_store && load_store_size == 4))) {
			/* No branch, link, wrong sign-ext or load/store add. */
			unexpected = true;
		} else if (!unexpected) {
			if (bundle & TILEGX_BUNDLE_MODE_MASK) {
				/*
				 * Fault bundle is Y mode.
				 * Check if the Y1 and Y0 is the form of
				 * { movei rx, 0; nop/fnop }, if yes,
				 * find the rx.
				 */

				if ((get_Opcode_Y1(bundle) == ADDI_OPCODE_Y1)
				    && (get_SrcA_Y1(bundle) == TREG_ZERO) &&
				    (get_Imm8_Y1(bundle) == 0) &&
				    is_bundle_y0_nop(bundle)) {
					rx = get_Dest_Y1(bundle);
				} else if ((get_Opcode_Y0(bundle) ==
					    ADDI_OPCODE_Y0) &&
					   (get_SrcA_Y0(bundle) == TREG_ZERO) &&
					   (get_Imm8_Y0(bundle) == 0) &&
					   is_bundle_y1_nop(bundle)) {
					rx = get_Dest_Y0(bundle);
				} else {
					unexpected = true;
				}
			} else {
				/*
				 * Fault bundle is X mode.
				 * Check if the X0 is 'movei rx, 0',
				 * if yes, find the rx.
				 */

				if ((get_Opcode_X0(bundle) == IMM8_OPCODE_X0)
				    && (get_Imm8OpcodeExtension_X0(bundle) ==
					ADDI_IMM8_OPCODE_X0) &&
				    (get_SrcA_X0(bundle) == TREG_ZERO) &&
				    (get_Imm8_X0(bundle) == 0)) {
					rx = get_Dest_X0(bundle);
				} else {
					unexpected = true;
				}
			}

			/* rx should be less than 56. */
			if (!unexpected && (rx >= 56))
				unexpected = true;
		}

		if (!search_exception_tables(regs->pc)) {
			/* No fixup in the exception tables for the pc. */
			unexpected = true;
		}

		if (unexpected) {
			/* Unexpected unalign kernel fault. */
			struct task_struct *tsk = validate_current();

			bust_spinlocks(1);

			show_regs(regs);

			if (unlikely(tsk->pid < 2)) {
				panic("Kernel unalign fault running %s!",
				      tsk->pid ? "init" : "the idle task");
			}
#ifdef SUPPORT_DIE
			die("Oops", regs);
#endif
			bust_spinlocks(1);

			do_group_exit(SIGKILL);

		} else {
			unsigned long i, b = 0;
			unsigned char *ptr =
				(unsigned char *)regs->regs[ra];
			if (load_n_store) {
				/* handle get_user(x, ptr) */
				for (i = 0; i < load_store_size; i++) {
					ret = get_user(b, ptr++);
					if (!ret) {
						/* Success! update x. */
#ifdef __LITTLE_ENDIAN
						x |= (b << (8 * i));
#else
						x <<= 8;
						x |= b;
#endif /* __LITTLE_ENDIAN */
					} else {
						x = 0;
						break;
					}
				}

				/* Sign-extend 4-byte loads. */
				if (load_store_size == 4)
					x = (long)(int)x;

				/* Set register rd. */
				regs->regs[rd] = x;

				/* Set register rx. */
				regs->regs[rx] = ret;

				/* Bump pc. */
				regs->pc += 8;

			} else {
				/* Handle put_user(x, ptr) */
				x = regs->regs[rb];
#ifdef __LITTLE_ENDIAN
				b = x;
#else
				/*
				 * Swap x in order to store x from low
				 * to high memory same as the
				 * little-endian case.
				 */
				switch (load_store_size) {
				case 8:
					b = swab64(x);
					break;
				case 4:
					b = swab32(x);
					break;
				case 2:
					b = swab16(x);
					break;
				}
#endif /* __LITTLE_ENDIAN */
				for (i = 0; i < load_store_size; i++) {
					ret = put_user(b, ptr++);
					if (ret)
						break;
					/* Success! shift 1 byte. */
					b >>= 8;
				}
				/* Set register rx. */
				regs->regs[rx] = ret;

				/* Bump pc. */
				regs->pc += 8;
			}
		}

		unaligned_fixup_count++;

		if (unaligned_printk) {
			pr_info("%s/%d - Unalign fixup for kernel access to userspace %lx\n",
				current->comm, current->pid, regs->regs[ra]);
		}

		/* Done! Return to the exception handler. */
		return;
	}