Beispiel #1
0
void event_call (const u32 id, hashcat_ctx_t *hashcat_ctx, const void *buf, const size_t len)
{
  event_ctx_t *event_ctx = hashcat_ctx->event_ctx;

  bool is_log = false;

  switch (id)
  {
    case EVENT_LOG_INFO:    is_log = true; break;
    case EVENT_LOG_WARNING: is_log = true; break;
    case EVENT_LOG_ERROR:   is_log = true; break;
    case EVENT_LOG_ADVICE:  is_log = true; break;
  }

  if (is_log == false)
  {
    hc_thread_mutex_lock (event_ctx->mux_event);
  }

  hashcat_ctx->event (id, hashcat_ctx, buf, len);

  if (is_log == false)
  {
    hc_thread_mutex_unlock (event_ctx->mux_event);
  }

  // add more back logs in case user wants to access them

  if (is_log == false)
  {
    for (int i = MAX_OLD_EVENTS - 1; i >= 1; i--)
    {
      memcpy (event_ctx->old_buf[i], event_ctx->old_buf[i - 1], event_ctx->old_len[i - 1]);

      event_ctx->old_len[i] = event_ctx->old_len[i - 1];
    }

    size_t copy_len = 0;

    if (buf)
    {
      // truncate the whole buffer if needed (such that it fits into the old_buf):

      const size_t max_buf_len = sizeof (event_ctx->old_buf[0]);

      copy_len = MIN (len, max_buf_len - 1);

      memcpy (event_ctx->old_buf[0], buf, copy_len);
    }

    event_ctx->old_len[0] = copy_len;
  }
}
Beispiel #2
0
void event_call (const u32 id, hashcat_ctx_t *hashcat_ctx, const void *buf, const size_t len)
{
  event_ctx_t *event_ctx = hashcat_ctx->event_ctx;

  bool is_log = false;

  switch (id)
  {
    case EVENT_LOG_INFO:    is_log = true; break;
    case EVENT_LOG_WARNING: is_log = true; break;
    case EVENT_LOG_ERROR:   is_log = true; break;
  }

  if (is_log == false)
  {
    hc_thread_mutex_lock (event_ctx->mux_event);
  }

  hashcat_ctx->event (id, hashcat_ctx, buf, len);

  if (is_log == false)
  {
    hc_thread_mutex_unlock (event_ctx->mux_event);
  }

  // add more back logs in case user wants to access them

  if (is_log == false)
  {
    for (int i = MAX_OLD_EVENTS - 1; i >= 1; i--)
    {
      memcpy (event_ctx->old_buf[i], event_ctx->old_buf[i - 1], event_ctx->old_len[i - 1]);

      event_ctx->old_len[i] = event_ctx->old_len[i - 1];
    }

    if (buf)
    {
      memcpy (event_ctx->old_buf[0], buf, len);
    }

    event_ctx->old_len[0] = len;
  }
}
Beispiel #3
0
static int monitor (hashcat_ctx_t *hashcat_ctx)
{
  hashes_t       *hashes        = hashcat_ctx->hashes;
  hwmon_ctx_t    *hwmon_ctx     = hashcat_ctx->hwmon_ctx;
  opencl_ctx_t   *opencl_ctx    = hashcat_ctx->opencl_ctx;
  restore_ctx_t  *restore_ctx   = hashcat_ctx->restore_ctx;
  status_ctx_t   *status_ctx    = hashcat_ctx->status_ctx;
  user_options_t *user_options  = hashcat_ctx->user_options;

  bool runtime_check      = false;
  bool remove_check       = false;
  bool status_check       = false;
  bool restore_check      = false;
  bool hwmon_check        = false;
  bool performance_check  = false;

  const int    sleep_time      = 1;
  const int    temp_threshold  = 1;      // degrees celcius
  const int    fan_speed_min   = 33;     // in percentage
  const int    fan_speed_max   = 100;
  const double exec_low        = 50.0;  // in ms
  const double util_low        = 90.0;  // in percent

  if (user_options->runtime)
  {
    runtime_check = true;
  }

  if (restore_ctx->enabled == true)
  {
    restore_check = true;
  }

  if ((user_options->remove == true) && (hashes->hashlist_mode == HL_MODE_FILE))
  {
    remove_check = true;
  }

  if (user_options->status == true)
  {
    status_check = true;
  }

  if (hwmon_ctx->enabled == true)
  {
    hwmon_check = true;
  }

  if (hwmon_ctx->enabled == true)
  {
    performance_check = true; // this check simply requires hwmon to work
  }

  if ((runtime_check == false) && (remove_check == false) && (status_check == false) && (restore_check == false) && (hwmon_check == false) && (performance_check == false))
  {
    return 0;
  }

  // these variables are mainly used for fan control

  int fan_speed_chgd[DEVICES_MAX];

  memset (fan_speed_chgd, 0, sizeof (fan_speed_chgd));

  // temperature controller "loopback" values

  int temp_diff_old[DEVICES_MAX];
  int temp_diff_sum[DEVICES_MAX];

  memset (temp_diff_old, 0, sizeof (temp_diff_old));
  memset (temp_diff_sum, 0, sizeof (temp_diff_sum));

  // timer

  hc_time_t last_temp_check_time;

  hc_time (&last_temp_check_time);

  u32 slowdown_warnings    = 0;
  u32 performance_warnings = 0;

  u32 restore_left  = user_options->restore_timer;
  u32 remove_left   = user_options->remove_timer;
  u32 status_left   = user_options->status_timer;

  while (status_ctx->shutdown_inner == false)
  {
    sleep (sleep_time);

    if (status_ctx->devices_status == STATUS_INIT) continue;

    if (hwmon_check == true)
    {
      hc_thread_mutex_lock (status_ctx->mux_hwmon);

      for (u32 device_id = 0; device_id < opencl_ctx->devices_cnt; device_id++)
      {
        hc_device_param_t *device_param = &opencl_ctx->devices_param[device_id];

        if (device_param->skipped == true) continue;

        const int rc_throttle = hm_get_throttle_with_device_id (hashcat_ctx, device_id);

        if (rc_throttle == -1) continue;

        if (rc_throttle > 0)
        {
          slowdown_warnings++;

          if (slowdown_warnings == 1) EVENT_DATA (EVENT_MONITOR_THROTTLE1, &device_id, sizeof (u32));
          if (slowdown_warnings == 2) EVENT_DATA (EVENT_MONITOR_THROTTLE2, &device_id, sizeof (u32));
          if (slowdown_warnings == 3) EVENT_DATA (EVENT_MONITOR_THROTTLE3, &device_id, sizeof (u32));
        }
        else
        {
          slowdown_warnings = 0;
        }
      }

      hc_thread_mutex_unlock (status_ctx->mux_hwmon);
    }

    if (hwmon_check == true)
    {
      hc_thread_mutex_lock (status_ctx->mux_hwmon);

      hc_time_t temp_check_time;

      hc_time (&temp_check_time);

      u32 Ta = temp_check_time - last_temp_check_time; // set Ta = sleep_time; is not good enough (see --remove etc)

      if (Ta == 0) Ta = 1;

      for (u32 device_id = 0; device_id < opencl_ctx->devices_cnt; device_id++)
      {
        hc_device_param_t *device_param = &opencl_ctx->devices_param[device_id];

        if (device_param->skipped == true) continue;

        if ((opencl_ctx->devices_param[device_id].device_type & CL_DEVICE_TYPE_GPU) == 0) continue;

        const int temperature = hm_get_temperature_with_device_id (hashcat_ctx, device_id);

        if (temperature > (int) user_options->gpu_temp_abort)
        {
          EVENT_DATA (EVENT_MONITOR_TEMP_ABORT, &device_id, sizeof (u32));

          myabort (hashcat_ctx);
        }

        if (hwmon_ctx->hm_device[device_id].fanspeed_get_supported == false) continue;
        if (hwmon_ctx->hm_device[device_id].fanspeed_set_supported == false) continue;

        const u32 gpu_temp_retain = user_options->gpu_temp_retain;

        if (gpu_temp_retain > 0)
        {
          int temp_cur = temperature;

          int temp_diff_new = (int) gpu_temp_retain - temp_cur;

          temp_diff_sum[device_id] = temp_diff_sum[device_id] + temp_diff_new;

          // calculate Ta value (time difference in seconds between the last check and this check)

          last_temp_check_time = temp_check_time;

          float Kp = 1.6f;
          float Ki = 0.001f;
          float Kd = 10.0f;

          // PID controller (3-term controller: proportional - Kp, integral - Ki, derivative - Kd)

          int fan_diff_required = (int) (Kp * (float)temp_diff_new + Ki * Ta * (float)temp_diff_sum[device_id] + Kd * ((float)(temp_diff_new - temp_diff_old[device_id])) / Ta);

          if (abs (fan_diff_required) >= temp_threshold)
          {
            const int fan_speed_cur = hm_get_fanspeed_with_device_id (hashcat_ctx, device_id);

            int fan_speed_level = fan_speed_cur;

            if (fan_speed_chgd[device_id] == 0) fan_speed_level = temp_cur;

            int fan_speed_new = fan_speed_level - fan_diff_required;

            if (fan_speed_new > fan_speed_max) fan_speed_new = fan_speed_max;
            if (fan_speed_new < fan_speed_min) fan_speed_new = fan_speed_min;

            if (fan_speed_new != fan_speed_cur)
            {
              int freely_change_fan_speed = (fan_speed_chgd[device_id] == 1);
              int fan_speed_must_change = (fan_speed_new > fan_speed_cur);

              if ((freely_change_fan_speed == 1) || (fan_speed_must_change == 1))
              {
                if (device_param->device_vendor_id == VENDOR_ID_AMD)
                {
                  if (hwmon_ctx->hm_adl)
                  {
                    hm_set_fanspeed_with_device_id_adl (hashcat_ctx, device_id, fan_speed_new, 1);
                  }

                  if (hwmon_ctx->hm_sysfs)
                  {
                    hm_set_fanspeed_with_device_id_sysfs (hashcat_ctx, device_id, fan_speed_new);
                  }
                }
                else if (device_param->device_vendor_id == VENDOR_ID_NV)
                {
                  if (hwmon_ctx->hm_nvapi)
                  {
                    hm_set_fanspeed_with_device_id_nvapi (hashcat_ctx, device_id, fan_speed_new, 1);
                  }

                  if (hwmon_ctx->hm_xnvctrl)
                  {
                    hm_set_fanspeed_with_device_id_xnvctrl (hashcat_ctx, device_id, fan_speed_new);
                  }
                }

                fan_speed_chgd[device_id] = 1;
              }

              temp_diff_old[device_id] = temp_diff_new;
            }
          }
        }
      }

      hc_thread_mutex_unlock (status_ctx->mux_hwmon);
    }

    if (restore_check == true)
    {
      restore_left--;

      if (restore_left == 0)
      {
        const int rc = cycle_restore (hashcat_ctx);

        if (rc == -1) return -1;

        restore_left = user_options->restore_timer;
      }
    }

    if ((runtime_check == true) && (status_ctx->runtime_start > 0))
    {
      const int runtime_left = get_runtime_left (hashcat_ctx);

      if (runtime_left <= 0)
      {
        EVENT_DATA (EVENT_MONITOR_RUNTIME_LIMIT, NULL, 0);

        myabort_runtime (hashcat_ctx);
      }
    }

    if (remove_check == true)
    {
      remove_left--;

      if (remove_left == 0)
      {
        if (hashes->digests_saved != hashes->digests_done)
        {
          hashes->digests_saved = hashes->digests_done;

          const int rc = save_hash (hashcat_ctx);

          if (rc == -1) return -1;
        }

        remove_left = user_options->remove_timer;
      }
    }

    if (status_check == true)
    {
      status_left--;

      if (status_left == 0)
      {
        hc_thread_mutex_lock (status_ctx->mux_display);

        EVENT_DATA (EVENT_MONITOR_STATUS_REFRESH, NULL, 0);

        hc_thread_mutex_unlock (status_ctx->mux_display);

        status_left = user_options->status_timer;
      }
    }

    if (performance_check == true)
    {
      int exec_cnt = 0;
      int util_cnt = 0;

      double exec_total = 0;
      double util_total = 0;

      hc_thread_mutex_lock (status_ctx->mux_hwmon);

      for (u32 device_id = 0; device_id < opencl_ctx->devices_cnt; device_id++)
      {
        hc_device_param_t *device_param = &opencl_ctx->devices_param[device_id];

        if (device_param->skipped == true) continue;

        exec_cnt++;

        const double exec = status_get_exec_msec_dev (hashcat_ctx, device_id);

        exec_total += exec;

        const int util = hm_get_utilization_with_device_id (hashcat_ctx, device_id);

        if (util == -1) continue;

        util_total += (double) util;

        util_cnt++;
      }

      hc_thread_mutex_unlock (status_ctx->mux_hwmon);

      double exec_avg = 0;
      double util_avg = 0;

      if (exec_cnt > 0) exec_avg = exec_total / exec_cnt;
      if (util_cnt > 0) util_avg = util_total / util_cnt;

      if ((exec_avg > 0) && (exec_avg < exec_low))
      {
        performance_warnings++;

        if (performance_warnings == 10) EVENT_DATA (EVENT_MONITOR_PERFORMANCE_HINT, NULL, 0);
      }

      if ((util_avg > 0) && (util_avg < util_low))
      {
        performance_warnings++;

        if (performance_warnings == 10) EVENT_DATA (EVENT_MONITOR_PERFORMANCE_HINT, NULL, 0);
      }
    }
  }

  // final round of save_hash

  if (remove_check == true)
  {
    if (hashes->digests_saved != hashes->digests_done)
    {
      const int rc = save_hash (hashcat_ctx);

      if (rc == -1) return -1;
    }
  }

  // final round of cycle_restore

  if (restore_check == true)
  {
    const int rc = cycle_restore (hashcat_ctx);

    if (rc == -1) return -1;
  }

  return 0;
}
Beispiel #4
0
static int selftest (hashcat_ctx_t *hashcat_ctx, hc_device_param_t *device_param)
{
  hashconfig_t         *hashconfig         = hashcat_ctx->hashconfig;
  hashes_t             *hashes             = hashcat_ctx->hashes;
  status_ctx_t         *status_ctx         = hashcat_ctx->status_ctx;
  user_options_extra_t *user_options_extra = hashcat_ctx->user_options_extra;

  cl_int CL_err;

  int CL_rc;

  if (hashconfig->st_hash == NULL) return 0;

  // init : replace hashes with selftest hash

  device_param->kernel_params[15] = &device_param->d_st_digests_buf;
  device_param->kernel_params[17] = &device_param->d_st_salts_buf;
  device_param->kernel_params[18] = &device_param->d_st_esalts_buf;

  device_param->kernel_params_buf32[31] = 1;
  device_param->kernel_params_buf32[32] = 0;

  // password : move the known password into a fake buffer

  u32 highest_pw_len = 0;

  if (hashconfig->attack_exec == ATTACK_EXEC_INSIDE_KERNEL)
  {
    if (user_options_extra->attack_kern == ATTACK_KERN_STRAIGHT)
    {
      device_param->kernel_params_buf32[30] = 1;

      pw_t pw; memset (&pw, 0, sizeof (pw));

      char *pw_ptr = (char *) &pw.i;

      const size_t pw_len = strlen (hashconfig->st_pass);

      memcpy (pw_ptr, hashconfig->st_pass, pw_len);

      pw.pw_len = (u32) pw_len;

      if (hashconfig->opts_type & OPTS_TYPE_PT_UPPER)
      {
        uppercase ((u8 *) pw_ptr, pw.pw_len);
      }

      CL_err = hc_clEnqueueWriteBuffer (hashcat_ctx, device_param->command_queue, device_param->d_pws_buf, CL_TRUE, 0, 1 * sizeof (pw_t), &pw, 0, NULL, NULL);

      if (CL_err != CL_SUCCESS) return -1;
    }
    else if (user_options_extra->attack_kern == ATTACK_KERN_COMBI)
    {
      device_param->kernel_params_buf32[30] = 1;
      device_param->kernel_params_buf32[33] = COMBINATOR_MODE_BASE_LEFT;

      pw_t pw; memset (&pw, 0, sizeof (pw));

      char *pw_ptr = (char *) &pw.i;

      const size_t pw_len = strlen (hashconfig->st_pass);

      memcpy (pw_ptr, hashconfig->st_pass, pw_len - 1);

      pw.pw_len = (u32) pw_len - 1;

      if (hashconfig->opts_type & OPTS_TYPE_PT_UPPER)
      {
        uppercase ((u8 *) pw_ptr, pw.pw_len);
      }

      pw_t comb; memset (&comb, 0, sizeof (comb));

      char *comb_ptr = (char *) &comb.i;

      memcpy (comb_ptr, hashconfig->st_pass + pw_len - 1, 1);

      comb.pw_len = 1;

      if (hashconfig->opts_type & OPTS_TYPE_PT_UPPER)
      {
        uppercase ((u8 *) comb_ptr, comb.pw_len);
      }

      if (hashconfig->opts_type & OPTS_TYPE_PT_ADD01)
      {
        comb_ptr[comb.pw_len] = 0x01;
      }

      if (hashconfig->opts_type & OPTS_TYPE_PT_ADD80)
      {
        comb_ptr[comb.pw_len] = 0x80;
      }

      CL_err = hc_clEnqueueWriteBuffer (hashcat_ctx, device_param->command_queue, device_param->d_combs_c, CL_TRUE, 0, 1 * sizeof (pw_t), &comb, 0, NULL, NULL);

      if (CL_err != CL_SUCCESS) return -1;

      CL_err = hc_clEnqueueWriteBuffer (hashcat_ctx, device_param->command_queue, device_param->d_pws_buf, CL_TRUE, 0, 1 * sizeof (pw_t), &pw, 0, NULL, NULL);

      if (CL_err != CL_SUCCESS) return -1;
    }
    else if (user_options_extra->attack_kern == ATTACK_KERN_BF)
    {
      device_param->kernel_params_buf32[30] = 1;

      if (hashconfig->opts_type & OPTS_TYPE_PT_BITSLICE)
      {
        pw_t pw; memset (&pw, 0, sizeof (pw));

        char *pw_ptr = (char *) &pw.i;

        const size_t pw_len = strlen (hashconfig->st_pass);

        memcpy (pw_ptr, hashconfig->st_pass, pw_len);

        if (hashconfig->opts_type & OPTS_TYPE_PT_UPPER)
        {
          uppercase ((u8 *) pw_ptr, pw_len);
        }

        pw.pw_len = (u32) pw_len;

        CL_err = hc_clEnqueueWriteBuffer (hashcat_ctx, device_param->command_queue, device_param->d_pws_buf, CL_TRUE, 0, 1 * sizeof (pw_t), &pw, 0, NULL, NULL);

        if (CL_err != CL_SUCCESS) return -1;
      }
      else
      {
        bf_t bf; memset (&bf, 0, sizeof (bf));

        char *bf_ptr = (char *) &bf.i;

        memcpy (bf_ptr, hashconfig->st_pass, 1);

        if (hashconfig->opts_type & OPTS_TYPE_PT_UTF16LE)
        {
          memset (bf_ptr, 0, 4);

          for (int i = 0, j = 0; i < 1; i += 1, j += 2)
          {
            bf_ptr[j + 0] = hashconfig->st_pass[i];
            bf_ptr[j + 1] = 0;
          }
        }
        else if (hashconfig->opts_type & OPTS_TYPE_PT_UTF16BE)
        {
          memset (bf_ptr, 0, 4);

          for (int i = 0, j = 0; i < 1; i += 1, j += 2)
          {
            bf_ptr[j + 0] = 0;
            bf_ptr[j + 1] = hashconfig->st_pass[i];
          }
        }

        if (hashconfig->opts_type & OPTS_TYPE_PT_UPPER)
        {
          uppercase ((u8 *) bf_ptr, 4);
        }

        if (hashconfig->opts_type & OPTS_TYPE_PT_GENERATE_BE)
        {
          bf.i = byte_swap_32 (bf.i);
        }

        CL_err = hc_clEnqueueWriteBuffer (hashcat_ctx, device_param->command_queue, device_param->d_bfs_c, CL_TRUE, 0, 1 * sizeof (bf_t), &bf, 0, NULL, NULL);

        if (CL_err != CL_SUCCESS) return -1;

        pw_t pw; memset (&pw, 0, sizeof (pw));

        char *pw_ptr = (char *) &pw.i;

        const size_t pw_len = strlen (hashconfig->st_pass);

        memcpy (pw_ptr + 1, hashconfig->st_pass + 1, pw_len - 1);

        size_t new_pass_len = pw_len;

        if (hashconfig->opts_type & OPTS_TYPE_PT_UTF16LE)
        {
          memset (pw_ptr, 0, pw_len);

          for (size_t i = 1, j = 2; i < new_pass_len; i += 1, j += 2)
          {
            pw_ptr[j + 0] = hashconfig->st_pass[i];
            pw_ptr[j + 1] = 0;
          }

          new_pass_len *= 2;
        }
        else if (hashconfig->opts_type & OPTS_TYPE_PT_UTF16BE)
        {
          memset (pw_ptr, 0, pw_len);

          for (size_t i = 1, j = 2; i < new_pass_len; i += 1, j += 2)
          {
            pw_ptr[j + 0] = 0;
            pw_ptr[j + 1] = hashconfig->st_pass[i];
          }

          new_pass_len *= 2;
        }

        if (hashconfig->opts_type & OPTS_TYPE_PT_UPPER)
        {
          uppercase ((u8 *) pw_ptr, new_pass_len);
        }

        if (hashconfig->opti_type & OPTI_TYPE_SINGLE_HASH)
        {
          if (hashconfig->opti_type & OPTI_TYPE_APPENDED_SALT)
          {
            memcpy (pw_ptr + new_pass_len, (char *) hashes->st_salts_buf[0].salt_buf, 64 - new_pass_len);

            new_pass_len += hashes->st_salts_buf[0].salt_len;
          }
        }

        pw.pw_len = (u32) new_pass_len;

        if (hashconfig->opts_type & OPTS_TYPE_PT_ADD01)
        {
          pw_ptr[new_pass_len] = 0x01;
        }

        if (hashconfig->opts_type & OPTS_TYPE_PT_ADD80)
        {
          pw_ptr[new_pass_len] = 0x80;
        }

        if (hashconfig->opts_type & OPTS_TYPE_PT_ADDBITS14)
        {
          pw.i[14] = (u32) new_pass_len * 8;
          pw.i[15] = 0;
        }

        if (hashconfig->opts_type & OPTS_TYPE_PT_ADDBITS15)
        {
          pw.i[14] = 0;
          pw.i[15] = (u32) new_pass_len * 8;
        }

        if (hashconfig->opts_type & OPTS_TYPE_PT_GENERATE_BE)
        {
          for (int i = 0; i < 14; i++) pw.i[i] = byte_swap_32 (pw.i[i]);
        }

        CL_err = hc_clEnqueueWriteBuffer (hashcat_ctx, device_param->command_queue, device_param->d_pws_buf, CL_TRUE, 0, 1 * sizeof (pw_t), &pw, 0, NULL, NULL);

        if (CL_err != CL_SUCCESS) return -1;

        highest_pw_len = pw.pw_len;
      }
    }
  }
  else
  {
    pw_t pw; memset (&pw, 0, sizeof (pw));

    char *pw_ptr = (char *) &pw.i;

    const size_t pw_len = strlen (hashconfig->st_pass);

    memcpy (pw_ptr, hashconfig->st_pass, pw_len);

    pw.pw_len = (u32) pw_len;

    CL_err = hc_clEnqueueWriteBuffer (hashcat_ctx, device_param->command_queue, device_param->d_pws_buf, CL_TRUE, 0, 1 * sizeof (pw_t), &pw, 0, NULL, NULL);

    if (CL_err != CL_SUCCESS) return -1;
  }

  // main : run the kernel

  if (hashconfig->attack_exec == ATTACK_EXEC_INSIDE_KERNEL)
  {
    if (hashconfig->opti_type & OPTI_TYPE_OPTIMIZED_KERNEL)
    {
      if (highest_pw_len < 16)
      {
        CL_rc = run_kernel (hashcat_ctx, device_param, KERN_RUN_1, 1, false, 0);

        if (CL_rc == -1) return -1;
      }
      else if (highest_pw_len < 32)
      {
        CL_rc = run_kernel (hashcat_ctx, device_param, KERN_RUN_2, 1, false, 0);

        if (CL_rc == -1) return -1;
      }
      else
      {
        CL_rc = run_kernel (hashcat_ctx, device_param, KERN_RUN_3, 1, false, 0);

        if (CL_rc == -1) return -1;
      }
    }
    else
    {
      CL_rc = run_kernel (hashcat_ctx, device_param, KERN_RUN_4, 1, false, 0);

      if (CL_rc == -1) return -1;
    }
  }
  else
  {
    // missing handling hooks

    CL_rc = run_kernel (hashcat_ctx, device_param, KERN_RUN_1, 1, false, 0);

    if (CL_rc == -1) return -1;

    if (hashconfig->opts_type & OPTS_TYPE_HOOK12)
    {
      CL_rc = run_kernel (hashcat_ctx, device_param, KERN_RUN_12, 1, false, 0);

      if (CL_rc == -1) return -1;

      CL_rc = hc_clEnqueueReadBuffer (hashcat_ctx, device_param->command_queue, device_param->d_hooks, CL_TRUE, 0, device_param->size_hooks, device_param->hooks_buf, 0, NULL, NULL);

      if (CL_rc == -1) return -1;

      // do something with data

      CL_rc = hc_clEnqueueWriteBuffer (hashcat_ctx, device_param->command_queue, device_param->d_hooks, CL_TRUE, 0, device_param->size_hooks, device_param->hooks_buf, 0, NULL, NULL);

      if (CL_rc == -1) return -1;
    }

    const u32 salt_pos = 0;

    salt_t *salt_buf = &hashes->st_salts_buf[salt_pos];

    const u32 kernel_loops_fixed = hashconfig_get_kernel_loops (hashcat_ctx);

    const u32 loop_step = (kernel_loops_fixed) ? kernel_loops_fixed : 1;

    const u32 iter = salt_buf->salt_iter;

    for (u32 loop_pos = 0; loop_pos < iter; loop_pos += loop_step)
    {
      u32 loop_left = iter - loop_pos;

      loop_left = MIN (loop_left, loop_step);

      device_param->kernel_params_buf32[28] = loop_pos;
      device_param->kernel_params_buf32[29] = loop_left;

      CL_rc = run_kernel (hashcat_ctx, device_param, KERN_RUN_2, 1, false, 0);

      if (CL_rc == -1) return -1;
    }

    if (hashconfig->opts_type & OPTS_TYPE_HOOK23)
    {
      CL_rc = run_kernel (hashcat_ctx, device_param, KERN_RUN_23, 1, false, 0);

      if (CL_rc == -1) return -1;

      CL_rc = hc_clEnqueueReadBuffer (hashcat_ctx, device_param->command_queue, device_param->d_hooks, CL_TRUE, 0, device_param->size_hooks, device_param->hooks_buf, 0, NULL, NULL);

      if (CL_rc == -1) return -1;

      /*
       * The following section depends on the hash mode
       */

      switch (hashconfig->hash_mode)
      {
        // for 7z we only need device_param->hooks_buf, but other hooks could use any info from device_param. All of them should/must update hooks_buf
        case 11600: seven_zip_hook_func (device_param, hashes->st_hook_salts_buf, 0, 1); break;
      }

      /*
       * END of hash mode specific hook operations
       */

      CL_rc = hc_clEnqueueWriteBuffer (hashcat_ctx, device_param->command_queue, device_param->d_hooks, CL_TRUE, 0, device_param->size_hooks, device_param->hooks_buf, 0, NULL, NULL);

      if (CL_rc == -1) return -1;
    }

    if (hashconfig->opts_type & OPTS_TYPE_INIT2)
    {
      CL_rc = run_kernel (hashcat_ctx, device_param, KERN_RUN_INIT2, 1, false, 0);

      if (CL_rc == -1) return -1;
    }

    if (hashconfig->opts_type & OPTS_TYPE_LOOP2)
    {
      const u32 iter2 = salt_buf->salt_iter2;

      for (u32 loop_pos = 0; loop_pos < iter2; loop_pos += loop_step)
      {
        u32 loop_left = iter2 - loop_pos;

        loop_left = MIN (loop_left, loop_step);

        device_param->kernel_params_buf32[28] = loop_pos;
        device_param->kernel_params_buf32[29] = loop_left;

        CL_rc = run_kernel (hashcat_ctx, device_param, KERN_RUN_LOOP2, 1, false, 0);

        if (CL_rc == -1) return -1;
      }
    }

    if ((hashconfig->hash_mode == 2500) || (hashconfig->hash_mode == 2501))
    {
      device_param->kernel_params_buf32[28] = 0;
      device_param->kernel_params_buf32[29] = 1;

      CL_rc = run_kernel (hashcat_ctx, device_param, KERN_RUN_AUX1, 1, false, 0);

      if (CL_rc == -1) return -1;
    }
    else
    {
      CL_rc = run_kernel (hashcat_ctx, device_param, KERN_RUN_3, 1, false, 0);

      if (CL_rc == -1) return -1;
    }
  }

  // check : check if cracked

  u32 num_cracked;

  CL_err = hc_clEnqueueReadBuffer (hashcat_ctx, device_param->command_queue, device_param->d_result, CL_TRUE, 0, sizeof (u32), &num_cracked, 0, NULL, NULL);

  if (CL_err != CL_SUCCESS) return -1;

  // finish : cleanup and restore

  device_param->kernel_params_buf32[27] = 0;
  device_param->kernel_params_buf32[28] = 0;
  device_param->kernel_params_buf32[29] = 0;
  device_param->kernel_params_buf32[30] = 0;
  device_param->kernel_params_buf32[31] = 0;
  device_param->kernel_params_buf32[32] = 0;
  device_param->kernel_params_buf32[33] = 0;
  device_param->kernel_params_buf64[34] = 0;

  device_param->kernel_params[15] = &device_param->d_digests_buf;
  device_param->kernel_params[17] = &device_param->d_salt_bufs;
  device_param->kernel_params[18] = &device_param->d_esalt_bufs;

  CL_rc = run_kernel_bzero (hashcat_ctx, device_param, device_param->d_pws_buf,       device_param->size_pws);      if (CL_rc == -1) return -1;
  CL_rc = run_kernel_bzero (hashcat_ctx, device_param, device_param->d_tmps,          device_param->size_tmps);     if (CL_rc == -1) return -1;
  CL_rc = run_kernel_bzero (hashcat_ctx, device_param, device_param->d_hooks,         device_param->size_hooks);    if (CL_rc == -1) return -1;
  CL_rc = run_kernel_bzero (hashcat_ctx, device_param, device_param->d_plain_bufs,    device_param->size_plains);   if (CL_rc == -1) return -1;
  CL_rc = run_kernel_bzero (hashcat_ctx, device_param, device_param->d_digests_shown, device_param->size_shown);    if (CL_rc == -1) return -1;
  CL_rc = run_kernel_bzero (hashcat_ctx, device_param, device_param->d_result,        device_param->size_results);  if (CL_rc == -1) return -1;

  if (user_options_extra->attack_kern == ATTACK_KERN_STRAIGHT)
  {
    CL_rc = run_kernel_bzero (hashcat_ctx, device_param, device_param->d_rules_c, device_param->size_rules_c);

    if (CL_rc == -1) return -1;
  }
  else if (user_options_extra->attack_kern == ATTACK_KERN_COMBI)
  {
    CL_rc = run_kernel_bzero (hashcat_ctx, device_param, device_param->d_combs_c, device_param->size_combs);

    if (CL_rc == -1) return -1;
  }
  else if (user_options_extra->attack_kern == ATTACK_KERN_BF)
  {
    CL_rc = run_kernel_bzero (hashcat_ctx, device_param, device_param->d_bfs_c, device_param->size_bfs);

    if (CL_rc == -1) return -1;
  }

  // check return

  if (num_cracked == 0)
  {
    hc_thread_mutex_lock (status_ctx->mux_display);

    event_log_error (hashcat_ctx, "* Device #%u: ATTENTION! OpenCL kernel self-test failed.", device_param->device_id + 1);

    event_log_warning (hashcat_ctx, "Your device driver installation is probably broken.");
    event_log_warning (hashcat_ctx, "See also: https://hashcat.net/faq/wrongdriver");
    event_log_warning (hashcat_ctx, NULL);

    hc_thread_mutex_unlock (status_ctx->mux_display);

    return -1;
  }

  return 0;
}
Beispiel #5
0
int check_cracked (hashcat_ctx_t *hashcat_ctx, hc_device_param_t *device_param, const u32 salt_pos)
{
  cpt_ctx_t      *cpt_ctx      = hashcat_ctx->cpt_ctx;
  hashconfig_t   *hashconfig   = hashcat_ctx->hashconfig;
  hashes_t       *hashes       = hashcat_ctx->hashes;
  status_ctx_t   *status_ctx   = hashcat_ctx->status_ctx;
  user_options_t *user_options = hashcat_ctx->user_options;

  salt_t *salt_buf = &hashes->salts_buf[salt_pos];

  u32 num_cracked;

  cl_int CL_err;

  CL_err = hc_clEnqueueReadBuffer (hashcat_ctx, device_param->command_queue, device_param->d_result, CL_TRUE, 0, sizeof (u32), &num_cracked, 0, NULL, NULL);

  if (CL_err != CL_SUCCESS)
  {
    event_log_error (hashcat_ctx, "clEnqueueReadBuffer(): %s", val2cstr_cl (CL_err));

    return -1;
  }

  if (user_options->speed_only == true)
  {
    // we want the hc_clEnqueueReadBuffer to run in benchmark mode because it has an influence in performance
    // however if the benchmark cracks the artificial hash used for benchmarks we don't want to see that!

    return 0;
  }

  if (num_cracked)
  {
    plain_t *cracked = (plain_t *) hccalloc (num_cracked, sizeof (plain_t));

    CL_err = hc_clEnqueueReadBuffer (hashcat_ctx, device_param->command_queue, device_param->d_plain_bufs, CL_TRUE, 0, num_cracked * sizeof (plain_t), cracked, 0, NULL, NULL);

    if (CL_err != CL_SUCCESS)
    {
      event_log_error (hashcat_ctx, "clEnqueueReadBuffer(): %s", val2cstr_cl (CL_err));

      return -1;
    }

    u32 cpt_cracked = 0;

    hc_thread_mutex_lock (status_ctx->mux_display);

    for (u32 i = 0; i < num_cracked; i++)
    {
      const u32 hash_pos = cracked[i].hash_pos;

      if (hashes->digests_shown[hash_pos] == 1) continue;

      if ((hashconfig->opts_type & OPTS_TYPE_PT_NEVERCRACK) == 0)
      {
        hashes->digests_shown[hash_pos] = 1;

        hashes->digests_done++;

        cpt_cracked++;

        salt_buf->digests_done++;

        if (salt_buf->digests_done == salt_buf->digests_cnt)
        {
          hashes->salts_shown[salt_pos] = 1;

          hashes->salts_done++;
        }
      }

      if (hashes->salts_done == hashes->salts_cnt) mycracked (hashcat_ctx);

      check_hash (hashcat_ctx, device_param, &cracked[i]);
    }

    hc_thread_mutex_unlock (status_ctx->mux_display);

    hcfree (cracked);

    if (cpt_cracked > 0)
    {
      hc_thread_mutex_lock (status_ctx->mux_display);

      cpt_ctx->cpt_buf[cpt_ctx->cpt_pos].timestamp = time (NULL);
      cpt_ctx->cpt_buf[cpt_ctx->cpt_pos].cracked   = cpt_cracked;

      cpt_ctx->cpt_pos++;

      cpt_ctx->cpt_total += cpt_cracked;

      if (cpt_ctx->cpt_pos == CPT_CACHE) cpt_ctx->cpt_pos = 0;

      hc_thread_mutex_unlock (status_ctx->mux_display);
    }

    if (hashconfig->opts_type & OPTS_TYPE_PT_NEVERCRACK)
    {
      // we need to reset cracked state on the device
      // otherwise host thinks again and again the hash was cracked
      // and returns invalid password each time

      memset (hashes->digests_shown_tmp, 0, salt_buf->digests_cnt * sizeof (u32));

      CL_err = hc_clEnqueueWriteBuffer (hashcat_ctx, device_param->command_queue, device_param->d_digests_shown, CL_TRUE, salt_buf->digests_offset * sizeof (u32), salt_buf->digests_cnt * sizeof (u32), &hashes->digests_shown_tmp[salt_buf->digests_offset], 0, NULL, NULL);

      if (CL_err != CL_SUCCESS)
      {
        event_log_error (hashcat_ctx, "clEnqueueWriteBuffer(): %s", val2cstr_cl (CL_err));

        return -1;
      }
    }

    num_cracked = 0;

    CL_err = hc_clEnqueueWriteBuffer (hashcat_ctx, device_param->command_queue, device_param->d_result, CL_TRUE, 0, sizeof (u32), &num_cracked, 0, NULL, NULL);

    if (CL_err != CL_SUCCESS)
    {
      event_log_error (hashcat_ctx, "clEnqueueWriteBuffer(): %s", val2cstr_cl (CL_err));

      return -1;
    }
  }

  return 0;
}
Beispiel #6
0
static void keypress (hashcat_ctx_t *hashcat_ctx)
{
  status_ctx_t   *status_ctx   = hashcat_ctx->status_ctx;
  user_options_t *user_options = hashcat_ctx->user_options;

  // this is required, because some of the variables down there are not initialized at that point
  while (status_ctx->devices_status == STATUS_INIT) usleep (100000);

  const bool quiet = user_options->quiet;

  tty_break ();

  while (status_ctx->shutdown_outer == false)
  {
    int ch = tty_getchar ();

    if (ch == -1) break;

    if (ch ==  0) continue;

    //https://github.com/hashcat/hashcat/issues/302
    //#if defined (_POSIX)
    //if (ch != '\n')
    //#endif

    hc_thread_mutex_lock (status_ctx->mux_display);

    event_log_info (hashcat_ctx, NULL);

    switch (ch)
    {
      case 's':
      case '\r':
      case '\n':

        event_log_info (hashcat_ctx, NULL);

        status_display (hashcat_ctx);

        event_log_info (hashcat_ctx, NULL);

        if (quiet == false) send_prompt (hashcat_ctx);

        break;

      case 'b':

        event_log_info (hashcat_ctx, NULL);

        bypass (hashcat_ctx);

        event_log_info (hashcat_ctx, "Next dictionary / mask in queue selected. Bypassing current one.");

        event_log_info (hashcat_ctx, NULL);

        if (quiet == false) send_prompt (hashcat_ctx);

        break;

      case 'p':

        if (status_ctx->devices_status != STATUS_PAUSED)
        {
          event_log_info (hashcat_ctx, NULL);

          SuspendThreads (hashcat_ctx);

          if (status_ctx->devices_status == STATUS_PAUSED)
          {
            event_log_info (hashcat_ctx, "Paused");
          }

          event_log_info (hashcat_ctx, NULL);
        }

        if (quiet == false) send_prompt (hashcat_ctx);

        break;

      case 'r':

        if (status_ctx->devices_status == STATUS_PAUSED)
        {
          event_log_info (hashcat_ctx, NULL);

          ResumeThreads (hashcat_ctx);

          if (status_ctx->devices_status != STATUS_PAUSED)
          {
            event_log_info (hashcat_ctx, "Resumed");
          }

          event_log_info (hashcat_ctx, NULL);
        }

        if (quiet == false) send_prompt (hashcat_ctx);

        break;

      case 'c':

        event_log_info (hashcat_ctx, NULL);

        stop_at_checkpoint (hashcat_ctx);

        if (status_ctx->checkpoint_shutdown == true)
        {
          event_log_info (hashcat_ctx, "Checkpoint enabled. Will quit at next restore-point update.");
        }
        else
        {
          event_log_info (hashcat_ctx, "Checkpoint disabled. Restore-point updates will no longer be monitored.");
        }

        event_log_info (hashcat_ctx, NULL);

        if (quiet == false) send_prompt (hashcat_ctx);

        break;

      case 'q':

        event_log_info (hashcat_ctx, NULL);

        myquit (hashcat_ctx);

        break;

      default:

        if (quiet == false) send_prompt (hashcat_ctx);

        break;
    }

    //https://github.com/hashcat/hashcat/issues/302
    //#if defined (_POSIX)
    //if (ch != '\n')
    //#endif

    hc_thread_mutex_unlock (status_ctx->mux_display);
  }

  tty_fix ();
}
Beispiel #7
0
static int monitor (hashcat_ctx_t *hashcat_ctx)
{
  hashes_t       *hashes        = hashcat_ctx->hashes;
  hwmon_ctx_t    *hwmon_ctx     = hashcat_ctx->hwmon_ctx;
  opencl_ctx_t   *opencl_ctx    = hashcat_ctx->opencl_ctx;
  restore_ctx_t  *restore_ctx   = hashcat_ctx->restore_ctx;
  status_ctx_t   *status_ctx    = hashcat_ctx->status_ctx;
  user_options_t *user_options  = hashcat_ctx->user_options;

  bool runtime_check      = false;
  bool remove_check       = false;
  bool status_check       = false;
  bool restore_check      = false;
  bool hwmon_check        = false;
  bool performance_check  = false;

  const int    sleep_time = 1;
  const double exec_low   = 50.0;  // in ms
  const double util_low   = 90.0;  // in percent

  if (user_options->runtime)
  {
    runtime_check = true;
  }

  if (restore_ctx->enabled == true)
  {
    restore_check = true;
  }

  if ((user_options->remove == true) && (hashes->hashlist_mode == HL_MODE_FILE))
  {
    remove_check = true;
  }

  if (user_options->status == true)
  {
    status_check = true;
  }

  if (hwmon_ctx->enabled == true)
  {
    hwmon_check = true;
  }

  if (hwmon_ctx->enabled == true)
  {
    performance_check = true; // this check simply requires hwmon to work
  }

  if ((runtime_check == false) && (remove_check == false) && (status_check == false) && (restore_check == false) && (hwmon_check == false) && (performance_check == false))
  {
    return 0;
  }

  // timer

  u32 slowdown_warnings    = 0;
  u32 performance_warnings = 0;

  u32 restore_left  = user_options->restore_timer;
  u32 remove_left   = user_options->remove_timer;
  u32 status_left   = user_options->status_timer;

  while (status_ctx->shutdown_inner == false)
  {
    sleep (sleep_time);

    if (status_ctx->devices_status == STATUS_INIT) continue;

    if (hwmon_ctx->enabled == true)
    {
      hc_thread_mutex_lock (status_ctx->mux_hwmon);

      for (u32 device_id = 0; device_id < opencl_ctx->devices_cnt; device_id++)
      {
        hc_device_param_t *device_param = &opencl_ctx->devices_param[device_id];

        if (device_param->skipped == true) continue;

        if ((opencl_ctx->devices_param[device_id].device_type & CL_DEVICE_TYPE_GPU) == 0) continue;

        const int temperature = hm_get_temperature_with_device_id (hashcat_ctx, device_id);

        if (temperature > (int) user_options->hwmon_temp_abort)
        {
          EVENT_DATA (EVENT_MONITOR_TEMP_ABORT, &device_id, sizeof (u32));

          myabort (hashcat_ctx);
        }
      }

      for (u32 device_id = 0; device_id < opencl_ctx->devices_cnt; device_id++)
      {
        hc_device_param_t *device_param = &opencl_ctx->devices_param[device_id];

        if (device_param->skipped == true) continue;

        const int rc_throttle = hm_get_throttle_with_device_id (hashcat_ctx, device_id);

        if (rc_throttle == -1) continue;

        if (rc_throttle > 0)
        {
          slowdown_warnings++;

          if (slowdown_warnings == 1) EVENT_DATA (EVENT_MONITOR_THROTTLE1, &device_id, sizeof (u32));
          if (slowdown_warnings == 2) EVENT_DATA (EVENT_MONITOR_THROTTLE2, &device_id, sizeof (u32));
          if (slowdown_warnings == 3) EVENT_DATA (EVENT_MONITOR_THROTTLE3, &device_id, sizeof (u32));
        }
        else
        {
          if (slowdown_warnings > 0) slowdown_warnings--;
        }
      }

      hc_thread_mutex_unlock (status_ctx->mux_hwmon);
    }

    if (restore_check == true)
    {
      restore_left--;

      if (restore_left == 0)
      {
        const int rc = cycle_restore (hashcat_ctx);

        if (rc == -1) return -1;

        restore_left = user_options->restore_timer;
      }
    }

    if ((runtime_check == true) && (status_ctx->runtime_start > 0))
    {
      const int runtime_left = get_runtime_left (hashcat_ctx);

      if (runtime_left <= 0)
      {
        EVENT_DATA (EVENT_MONITOR_RUNTIME_LIMIT, NULL, 0);

        myabort_runtime (hashcat_ctx);
      }
    }

    if (remove_check == true)
    {
      remove_left--;

      if (remove_left == 0)
      {
        if (hashes->digests_saved != hashes->digests_done)
        {
          hashes->digests_saved = hashes->digests_done;

          const int rc = save_hash (hashcat_ctx);

          if (rc == -1) return -1;
        }

        remove_left = user_options->remove_timer;
      }
    }

    if (status_check == true)
    {
      status_left--;

      if (status_left == 0)
      {
        hc_thread_mutex_lock (status_ctx->mux_display);

        EVENT_DATA (EVENT_MONITOR_STATUS_REFRESH, NULL, 0);

        hc_thread_mutex_unlock (status_ctx->mux_display);

        status_left = user_options->status_timer;
      }
    }

    if (performance_check == true)
    {
      int exec_cnt = 0;
      int util_cnt = 0;

      double exec_total = 0;
      double util_total = 0;

      hc_thread_mutex_lock (status_ctx->mux_hwmon);

      for (u32 device_id = 0; device_id < opencl_ctx->devices_cnt; device_id++)
      {
        hc_device_param_t *device_param = &opencl_ctx->devices_param[device_id];

        if (device_param->skipped == true) continue;

        exec_cnt++;

        const double exec = status_get_exec_msec_dev (hashcat_ctx, device_id);

        exec_total += exec;

        const int util = hm_get_utilization_with_device_id (hashcat_ctx, device_id);

        if (util == -1) continue;

        util_total += (double) util;

        util_cnt++;
      }

      hc_thread_mutex_unlock (status_ctx->mux_hwmon);

      double exec_avg = 0;
      double util_avg = 0;

      if (exec_cnt > 0) exec_avg = exec_total / exec_cnt;
      if (util_cnt > 0) util_avg = util_total / util_cnt;

      if ((exec_avg > 0) && (exec_avg < exec_low))
      {
        performance_warnings++;

        if (performance_warnings == 10) EVENT_DATA (EVENT_MONITOR_PERFORMANCE_HINT, NULL, 0);
      }

      if ((util_avg > 0) && (util_avg < util_low))
      {
        performance_warnings++;

        if (performance_warnings == 10) EVENT_DATA (EVENT_MONITOR_PERFORMANCE_HINT, NULL, 0);
      }
    }

    // stdin read timeout check
    // note: we skip the stdin timeout check if it was disabled with stdin_timeout_abort set to 0

    if (user_options->stdin_timeout_abort != 0)
    {
      if (status_get_progress_done (hashcat_ctx) == 0)
      {
        if (status_ctx->stdin_read_timeout_cnt > 0)
        {
          if (status_ctx->stdin_read_timeout_cnt >= user_options->stdin_timeout_abort)
          {
            EVENT_DATA (EVENT_MONITOR_NOINPUT_ABORT, NULL, 0);

            myabort (hashcat_ctx);

            status_ctx->shutdown_inner = true;

            break;
          }

          if ((status_ctx->stdin_read_timeout_cnt % STDIN_TIMEOUT_WARN) == 0)
          {
            EVENT_DATA (EVENT_MONITOR_NOINPUT_HINT, NULL, 0);
          }
        }
      }
    }
  }

  // final round of save_hash

  if (remove_check == true)
  {
    if (hashes->digests_saved != hashes->digests_done)
    {
      const int rc = save_hash (hashcat_ctx);

      if (rc == -1) return -1;
    }
  }

  // final round of cycle_restore

  if (restore_check == true)
  {
    const int rc = cycle_restore (hashcat_ctx);

    if (rc == -1) return -1;
  }

  return 0;
}