Skip to content

Commit

Permalink
Merge branch 'main' into rebase-to-302
Browse files Browse the repository at this point in the history
  • Loading branch information
matt-fleming authored Jan 31, 2024
2 parents a92acb2 + ac5ebee commit 17ce223
Show file tree
Hide file tree
Showing 2 changed files with 34 additions and 83 deletions.
1 change: 1 addition & 0 deletions src/databricks/sql/auth/thrift_http_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@

import six
import thrift
import thrift.transport.THttpClient

logger = logging.getLogger(__name__)

Expand Down
116 changes: 33 additions & 83 deletions src/databricks/sql/thrift_backend.py
Original file line number Diff line number Diff line change
@@ -1,27 +1,27 @@
from decimal import Decimal
import errno
import logging
import math
import time
import uuid
import os
import threading
import uuid
import time

from ssl import CERT_NONE, CERT_REQUIRED, create_default_context
from typing import List, Union

import databricks.sql.auth.thrift_http_client
import pyarrow
import thrift.transport.THttpClient
import thrift.protocol.TBinaryProtocol
import thrift.transport.THttpClient
import thrift.transport.TSocket
import thrift.transport.TTransport

import urllib3.exceptions

import databricks.sql.auth.thrift_http_client
from databricks.sql.auth.thrift_http_client import CommandType
from databricks.sql.auth.authenticators import AuthProvider
from databricks.sql.thrift_api.TCLIService import TCLIService, ttypes
from databricks.sql import *
from databricks.sql.exc import MaxRetryDurationError
from databricks.sql.thrift_api.TCLIService.TCLIService import (
Client as TCLIServiceClient,
)
Expand Down Expand Up @@ -155,14 +155,9 @@ def __init__(

self.staging_allowed_local_path = staging_allowed_local_path
self._initialize_retry_args(kwargs)
self._use_arrow_native_complex_types = kwargs.get(
"_use_arrow_native_complex_types", True
)
self._use_arrow_native_complex_types = kwargs.get("_use_arrow_native_complex_types", True)
self._use_arrow_native_decimals = kwargs.get("_use_arrow_native_decimals", True)
self._use_arrow_native_timestamps = kwargs.get(
"_use_arrow_native_timestamps", True
)

self._use_arrow_native_timestamps = kwargs.get("_use_arrow_native_timestamps", True)
# Cloud fetch
self.max_download_threads = kwargs.get("max_download_threads", 10)

Expand Down Expand Up @@ -231,13 +226,13 @@ def __init__(
**additional_transport_args, # type: ignore
)

timeout = THRIFT_SOCKET_TIMEOUT or kwargs.get(
"_socket_timeout", DEFAULT_SOCKET_TIMEOUT
)

# HACK!
timeout = THRIFT_SOCKET_TIMEOUT or kwargs.get("_socket_timeout", DEFAULT_SOCKET_TIMEOUT)
logger.info(f"Setting timeout HACK! to {timeout}")

# setTimeout defaults to 15 minutes and is expected in ms

self._transport.setTimeout(timeout and (float(timeout) * 1000.0))

self._transport.setCustomHeaders(dict(http_headers))
Expand All @@ -260,15 +255,11 @@ def _initialize_retry_args(self, kwargs):
given_or_default = type_(kwargs.get(key, default))
bound = _bound(min, max, given_or_default)
setattr(self, key, bound)
logger.debug(
"retry parameter: {} given_or_default {}".format(key, given_or_default)
)
logger.debug("retry parameter: {} given_or_default {}".format(key, given_or_default))
if bound != given_or_default:
logger.warning(
"Override out of policy retry parameter: "
+ "{} given {}, restricted to {}".format(
key, given_or_default, bound
)
+ "{} given {}, restricted to {}".format(key, given_or_default, bound)
)

# Fail on retry delay min > max; consider later adding fail on min > duration?
Expand Down Expand Up @@ -296,9 +287,7 @@ def _extract_error_message_from_headers(headers):
if THRIFT_ERROR_MESSAGE_HEADER in headers:
err_msg = headers[THRIFT_ERROR_MESSAGE_HEADER]
if DATABRICKS_ERROR_OR_REDIRECT_HEADER in headers:
if (
err_msg
): # We don't expect both to be set, but log both here just in case
if err_msg: # We don't expect both to be set, but log both here just in case
err_msg = "Thriftserver error: {}, Databricks error: {}".format(
err_msg, headers[DATABRICKS_ERROR_OR_REDIRECT_HEADER]
)
Expand Down Expand Up @@ -529,10 +518,7 @@ def _check_initial_namespace(self, catalog, schema, response):
if not (catalog or schema):
return

if (
response.serverProtocolVersion
< ttypes.TProtocolVersion.SPARK_CLI_SERVICE_PROTOCOL_V4
):
if response.serverProtocolVersion < ttypes.TProtocolVersion.SPARK_CLI_SERVICE_PROTOCOL_V4:
raise InvalidServerResponseError(
"Setting initial namespace not supported by the DBR version, "
"Please use a Databricks SQL endpoint or a cluster with DBR >= 9.0."
Expand All @@ -547,10 +533,7 @@ def _check_initial_namespace(self, catalog, schema, response):

def _check_session_configuration(self, session_configuration):
# This client expects timetampsAsString to be false, so we do not allow users to modify that
if (
session_configuration.get(TIMESTAMP_AS_STRING_CONFIG, "false").lower()
!= "false"
):
if session_configuration.get(TIMESTAMP_AS_STRING_CONFIG, "false").lower() != "false":
raise Error(
"Invalid session configuration: {} cannot be changed "
"while using the Databricks SQL connector, it must be false not {}".format(
Expand All @@ -562,18 +545,14 @@ def _check_session_configuration(self, session_configuration):
def open_session(self, session_configuration, catalog, schema):
try:
self._transport.open()
session_configuration = {
k: str(v) for (k, v) in (session_configuration or {}).items()
}
session_configuration = {k: str(v) for (k, v) in (session_configuration or {}).items()}
self._check_session_configuration(session_configuration)
# We want to receive proper Timestamp arrow types.
# We set it also in confOverlay in TExecuteStatementReq on a per query basic,
# but it doesn't hurt to also set for the whole session.
session_configuration[TIMESTAMP_AS_STRING_CONFIG] = "false"
if catalog or schema:
initial_namespace = ttypes.TNamespace(
catalogName=catalog, schemaName=schema
)
initial_namespace = ttypes.TNamespace(catalogName=catalog, schemaName=schema)
else:
initial_namespace = None

Expand All @@ -599,9 +578,7 @@ def close_session(self, session_handle) -> None:
finally:
self._transport.close()

def _check_command_not_in_error_or_closed_state(
self, op_handle, get_operations_resp
):
def _check_command_not_in_error_or_closed_state(self, op_handle, get_operations_resp):
if get_operations_resp.operationState == ttypes.TOperationState.ERROR_STATE:
if get_operations_resp.displayMessage:
raise ServerOperationError(
Expand Down Expand Up @@ -691,9 +668,7 @@ def map_type(t_type_entry):
else:
# Current thriftserver implementation should always return a primitiveEntry,
# even for complex types
raise OperationalError(
"Thrift protocol error: t_type_entry not a primitiveEntry"
)
raise OperationalError("Thrift protocol error: t_type_entry not a primitiveEntry")

def convert_col(t_column_desc):
return pyarrow.field(
Expand All @@ -711,9 +686,7 @@ def _col_to_description(col):
# Drop _TYPE suffix
cleaned_type = (name[:-5] if name.endswith("_TYPE") else name).lower()
else:
raise OperationalError(
"Thrift protocol error: t_type_entry not a primitiveEntry"
)
raise OperationalError("Thrift protocol error: t_type_entry not a primitiveEntry")

if type_entry.primitiveEntry.type == ttypes.TTypeId.DECIMAL_TYPE:
qualifiers = type_entry.primitiveEntry.typeQualifiers.qualifiers
Expand All @@ -734,9 +707,7 @@ def _col_to_description(col):

@staticmethod
def _hive_schema_to_description(t_table_schema):
return [
ThriftBackend._col_to_description(col) for col in t_table_schema.columns
]
return [ThriftBackend._col_to_description(col) for col in t_table_schema.columns]

def _results_message_to_execute_response(self, resp, operation_state):
if resp.directResults and resp.directResults.resultSetMetadata:
Expand Down Expand Up @@ -764,9 +735,7 @@ def _results_message_to_execute_response(self, resp, operation_state):
or (not direct_results.resultSet)
or direct_results.resultSet.hasMoreRows
)
description = self._hive_schema_to_description(
t_result_set_metadata_resp.schema
)
description = self._hive_schema_to_description(t_result_set_metadata_resp.schema)
schema_bytes = (
t_result_set_metadata_resp.arrowSchema
or self._hive_schema_to_arrow_schema(t_result_set_metadata_resp.schema)
Expand Down Expand Up @@ -807,8 +776,7 @@ def _wait_until_command_done(self, op_handle, initial_operation_status_resp):
op_handle, initial_operation_status_resp
)
operation_state = (
initial_operation_status_resp
and initial_operation_status_resp.operationState
initial_operation_status_resp and initial_operation_status_resp.operationState
)
while not operation_state or operation_state in [
ttypes.TOperationState.RUNNING_STATE,
Expand All @@ -823,21 +791,13 @@ def _wait_until_command_done(self, op_handle, initial_operation_status_resp):
def _check_direct_results_for_error(t_spark_direct_results):
if t_spark_direct_results:
if t_spark_direct_results.operationStatus:
ThriftBackend._check_response_for_error(
t_spark_direct_results.operationStatus
)
ThriftBackend._check_response_for_error(t_spark_direct_results.operationStatus)
if t_spark_direct_results.resultSetMetadata:
ThriftBackend._check_response_for_error(
t_spark_direct_results.resultSetMetadata
)
ThriftBackend._check_response_for_error(t_spark_direct_results.resultSetMetadata)
if t_spark_direct_results.resultSet:
ThriftBackend._check_response_for_error(
t_spark_direct_results.resultSet
)
ThriftBackend._check_response_for_error(t_spark_direct_results.resultSet)
if t_spark_direct_results.closeOperation:
ThriftBackend._check_response_for_error(
t_spark_direct_results.closeOperation
)
ThriftBackend._check_response_for_error(t_spark_direct_results.closeOperation)

def execute_command(
self,
Expand All @@ -864,9 +824,7 @@ def execute_command(
sessionHandle=session_handle,
statement=operation,
runAsync=True,
getDirectResults=ttypes.TSparkGetDirectResults(
maxRows=max_rows, maxBytes=max_bytes
),
getDirectResults=ttypes.TSparkGetDirectResults(maxRows=max_rows, maxBytes=max_bytes),
canReadArrowResult=True,
canDecompressLZ4Result=lz4_compression,
canDownloadResult=use_cloud_fetch,
Expand All @@ -885,9 +843,7 @@ def get_catalogs(self, session_handle, max_rows, max_bytes, cursor):

req = ttypes.TGetCatalogsReq(
sessionHandle=session_handle,
getDirectResults=ttypes.TSparkGetDirectResults(
maxRows=max_rows, maxBytes=max_bytes
),
getDirectResults=ttypes.TSparkGetDirectResults(maxRows=max_rows, maxBytes=max_bytes),
)
resp = self.make_request(self._client.GetCatalogs, req)
return self._handle_execute_response(resp, cursor)
Expand All @@ -905,9 +861,7 @@ def get_schemas(

req = ttypes.TGetSchemasReq(
sessionHandle=session_handle,
getDirectResults=ttypes.TSparkGetDirectResults(
maxRows=max_rows, maxBytes=max_bytes
),
getDirectResults=ttypes.TSparkGetDirectResults(maxRows=max_rows, maxBytes=max_bytes),
catalogName=catalog_name,
schemaName=schema_name,
)
Expand All @@ -929,9 +883,7 @@ def get_tables(

req = ttypes.TGetTablesReq(
sessionHandle=session_handle,
getDirectResults=ttypes.TSparkGetDirectResults(
maxRows=max_rows, maxBytes=max_bytes
),
getDirectResults=ttypes.TSparkGetDirectResults(maxRows=max_rows, maxBytes=max_bytes),
catalogName=catalog_name,
schemaName=schema_name,
tableName=table_name,
Expand All @@ -955,9 +907,7 @@ def get_columns(

req = ttypes.TGetColumnsReq(
sessionHandle=session_handle,
getDirectResults=ttypes.TSparkGetDirectResults(
maxRows=max_rows, maxBytes=max_bytes
),
getDirectResults=ttypes.TSparkGetDirectResults(maxRows=max_rows, maxBytes=max_bytes),
catalogName=catalog_name,
schemaName=schema_name,
tableName=table_name,
Expand Down

0 comments on commit 17ce223

Please sign in to comment.