void HiveSerializedRowSet::extractField(size_t column_idx) { assert(column_idx < getColumnCount()); assert(m_row_weak_ptr != NULL); /* The field buffer should always be large enough to hold the field */ assert(getFieldLen(column_idx) < sizeof(m_field_buffer)); /* Just safety precaution to prevent buffer overflow */ /* Reduce buffer size by one to save space for null terminator */ size_t extract_len = min(getFieldLen(column_idx), sizeof(m_field_buffer) - 1); size_t copied = m_row_weak_ptr->copy(m_field_buffer, extract_len, m_field_offsets[column_idx]); assert(copied == extract_len); /* Make sure the buffer is null terminated */ m_field_buffer[extract_len] = '\0'; }
inline size_t getTupleLen(const MetaRelation& relation){ size_t len = 0; for(auto field_map: relation.fields){ len += getFieldLen(field_map.second); } return len; }
HiveReturn HiveRowSet::getFieldAsCString(size_t column_idx, char* buffer, size_t buffer_len, size_t* data_byte_size, int* is_null_value, char* err_buf, size_t err_buf_len) { RETURN_ON_ASSERT(buffer == NULL, __FUNCTION__, "Column data output buffer cannot be NULL.", err_buf, err_buf_len, HIVE_ERROR); RETURN_ON_ASSERT(is_null_value == NULL, __FUNCTION__, "Column data is_null_value (output) cannot be NULL.", err_buf, err_buf_len, HIVE_ERROR); RETURN_ON_ASSERT(getColumnCount() == 0, __FUNCTION__, "Rowset contains zero columns.", err_buf, err_buf_len, HIVE_ERROR); RETURN_ON_ASSERT(column_idx >= getColumnCount(), __FUNCTION__, "Column index out of bounds.", err_buf, err_buf_len, HIVE_ERROR); RETURN_ON_ASSERT(buffer_len == 0, __FUNCTION__, "Output buffer cannot have a size of zero.", err_buf, err_buf_len, HIVE_ERROR); if (m_last_column_fetched != column_idx) { extractField(column_idx); m_bytes_read = 0; /* Reset the read offset if different from the last column fetched */ m_last_column_fetched = column_idx; m_is_completely_read = false; } if (m_is_completely_read) { return HIVE_NO_MORE_DATA; /* This field has already been completely fetched by a previous call*/ } /* If the column data is the same as the null format spec... */ if (strcmp(getNullFormat(), m_field_buffer) == 0) { /* This value must be NULL */ *is_null_value = 1; if (data_byte_size != NULL) { *data_byte_size = 0; } buffer[0] = '\0'; } else { /* This value has been determined not to be NULL */ *is_null_value = 0; size_t data_total_len = getFieldLen(column_idx); /* Cannot read more data then the total number of bytes available */ assert(data_total_len >= m_bytes_read); size_t bytes_remaining = data_total_len - m_bytes_read; // Excludes null char if (data_byte_size != NULL) { /* Save the number of remaining characters to return before this fetch */ *data_byte_size = bytes_remaining; } /* Move pointer to the read location */ const char* src_str_ptr = m_field_buffer + m_bytes_read; /* The total number of bytes to read (+1 null terminator) should be no more than the * size of the field buffer */ assert(m_bytes_read + bytes_remaining + 1 <= sizeof(m_field_buffer)); /* Copy as many characters as possible from the read location */ size_t bytes_copied = safe_strncpy(buffer, src_str_ptr, min(buffer_len, bytes_remaining + 1)); // +1 for null terminator /* bytes_copied does not count the null terminator */ m_bytes_read += bytes_copied; if (m_bytes_read < data_total_len) { return HIVE_SUCCESS_WITH_MORE_DATA; /* Data truncated; more data to return */ } } m_is_completely_read = true; return HIVE_SUCCESS; /* All data successfully read */ }
HiveReturn HiveRowSet::getFieldDataLen(size_t column_idx, size_t* col_len, char* err_buf, size_t err_buf_len) { RETURN_ON_ASSERT(col_len == NULL, __FUNCTION__, "Pointer to col_len (output) cannot be NULL.", err_buf, err_buf_len, HIVE_ERROR); RETURN_ON_ASSERT(getColumnCount() == 0, __FUNCTION__, "Rowset contains zero columns.", err_buf, err_buf_len, HIVE_ERROR); RETURN_ON_ASSERT(column_idx >= getColumnCount(), __FUNCTION__, "Column index out of bounds.", err_buf, err_buf_len, HIVE_ERROR); *col_len = getFieldLen(column_idx); return HIVE_SUCCESS; }