Package org.apache.spark.sql.jdbc
Class TeradataDialect
Object
org.apache.spark.sql.jdbc.TeradataDialect
-
Constructor Summary
Constructors -
Method Summary
Modifier and TypeMethodDescriptionstatic String[]alterTable(String tableName, scala.collection.Seq<TableChange> changes, int dbMajorVersion) static voidbeforeFetch(Connection connection, scala.collection.immutable.Map<String, String> properties) abstract static booleanstatic booleanstatic AnalysisExceptionclassifyException(String message, Throwable e) static scala.Option<String>compileAggregate(AggregateFunc aggFunction) static scala.Option<String>compileExpression(Expression expr) static ObjectcompileValue(Object value) static Timestampstatic LocalDateTimestatic Timestampstatic scala.Function1<Object,Connection> createConnectionFactory(org.apache.spark.sql.execution.datasources.jdbc.JDBCOptions options) static StringcreateIndex(String indexName, Identifier tableIdent, NamedReference[] columns, Map<NamedReference, Map<String, String>> columnsProperties, Map<String, String> properties) static voidcreateSchema(Statement statement, String schema, String comment) static voidcreateTable(Statement statement, String tableName, String strSchema, org.apache.spark.sql.execution.datasources.jdbc.JdbcOptionsInWrite options) static StringdropIndex(String indexName, Identifier tableIdent) static StringdropSchema(String schema, boolean cascade) abstract static booleanstatic scala.collection.Seq<scala.Tuple2<String,UnboundFunction>> static StringgetAddColumnQuery(String tableName, String columnName, String dataType) static scala.Option<DataType>getCatalystType(int sqlType, String typeName, int size, MetadataBuilder md) static StringgetDeleteColumnQuery(String tableName, String columnName) static Stringstatic JdbcSQLQueryBuildergetJdbcSQLQueryBuilder(org.apache.spark.sql.execution.datasources.jdbc.JDBCOptions options) static scala.Option<JdbcType>getJDBCType(DataType dt) static StringgetLimitClause(Integer limit) static StringgetOffsetClause(Integer offset) static StringgetRenameColumnQuery(String tableName, String columnName, String newName, int dbMajorVersion) static StringgetSchemaCommentQuery(String schema, String comment) static StringgetSchemaQuery(String table) static StringgetTableCommentQuery(String table, String comment) static StringgetTableExistsQuery(String table) static StringgetTableSample(org.apache.spark.sql.execution.datasources.v2.TableSampleInfo sample) static StringgetTruncateQuery(String table, scala.Option<Object> cascade) The SQL query used to truncate a table.static scala.Option<Object>static StringgetUpdateColumnNullabilityQuery(String tableName, String columnName, boolean isNullable) static StringgetUpdateColumnTypeQuery(String tableName, String columnName, String newDataType) static booleanindexExists(Connection conn, String indexName, Identifier tableIdent, org.apache.spark.sql.execution.datasources.jdbc.JDBCOptions options) static scala.Option<Object>static booleanisSupportedFunction(String funcName) static TableIndex[]listIndexes(Connection conn, Identifier tableIdent, org.apache.spark.sql.execution.datasources.jdbc.JDBCOptions options) static String[][]listSchemas(Connection conn, org.apache.spark.sql.execution.datasources.jdbc.JDBCOptions options) static org.slf4j.Loggerstatic voidorg$apache$spark$internal$Logging$$log__$eq(org.slf4j.Logger x$1) abstract static intabstract static ObjectproductElement(int n) static scala.collection.Iterator<Object>static Stringstatic StringquoteIdentifier(String colName) static StringremoveSchemaCommentQuery(String schema) static StringrenameTable(Identifier oldTable, Identifier newTable) static booleanschemasExists(Connection conn, org.apache.spark.sql.execution.datasources.jdbc.JDBCOptions options, String schema) static booleanstatic booleanstatic boolean
-
Constructor Details
-
TeradataDialect
public TeradataDialect()
-
-
Method Details
-
canHandle
-
isSupportedFunction
-
getJDBCType
-
isCascadingTruncateTable
-
getTruncateQuery
The SQL query used to truncate a table. Teradata does not support the 'TRUNCATE' syntax that other dialects use. Instead, we need to use a 'DELETE FROM' statement.- Parameters:
table- The table to truncate.cascade- Whether or not to cascade the truncation. Default value is the value of isCascadingTruncateTable(). Teradata does not support cascading a 'DELETE FROM' statement (and as mentioned, does not support 'TRUNCATE' syntax)- Returns:
- The SQL query to use for truncating a table
-
renameTable
-
getLimitClause
-
getCatalystType
public static scala.Option<DataType> getCatalystType(int sqlType, String typeName, int size, MetadataBuilder md) -
convertJavaTimestampToTimestamp
-
convertJavaTimestampToTimestampNTZ
-
convertTimestampNTZToJavaTimestamp
-
createConnectionFactory
public static scala.Function1<Object,Connection> createConnectionFactory(org.apache.spark.sql.execution.datasources.jdbc.JDBCOptions options) -
quoteIdentifier
-
createTable
-
getTableExistsQuery
-
getSchemaQuery
-
getTruncateQuery$default$2
-
beforeFetch
public static void beforeFetch(Connection connection, scala.collection.immutable.Map<String, String> properties) -
compileValue
-
compileExpression
-
compileAggregate
-
functions
-
createSchema
-
schemasExists
public static boolean schemasExists(Connection conn, org.apache.spark.sql.execution.datasources.jdbc.JDBCOptions options, String schema) -
listSchemas
public static String[][] listSchemas(Connection conn, org.apache.spark.sql.execution.datasources.jdbc.JDBCOptions options) -
alterTable
public static String[] alterTable(String tableName, scala.collection.Seq<TableChange> changes, int dbMajorVersion) -
getAddColumnQuery
-
getRenameColumnQuery
-
getDeleteColumnQuery
-
getUpdateColumnTypeQuery
-
getUpdateColumnNullabilityQuery
-
getTableCommentQuery
-
getSchemaCommentQuery
-
removeSchemaCommentQuery
-
dropSchema
-
createIndex
public static String createIndex(String indexName, Identifier tableIdent, NamedReference[] columns, Map<NamedReference, Map<String, String>> columnsProperties, Map<String, String> properties) -
indexExists
public static boolean indexExists(Connection conn, String indexName, Identifier tableIdent, org.apache.spark.sql.execution.datasources.jdbc.JDBCOptions options) -
dropIndex
-
listIndexes
public static TableIndex[] listIndexes(Connection conn, Identifier tableIdent, org.apache.spark.sql.execution.datasources.jdbc.JDBCOptions options) -
classifyException
-
getOffsetClause
-
getJdbcSQLQueryBuilder
public static JdbcSQLQueryBuilder getJdbcSQLQueryBuilder(org.apache.spark.sql.execution.datasources.jdbc.JDBCOptions options) -
supportsLimit
public static boolean supportsLimit() -
supportsOffset
public static boolean supportsOffset() -
supportsTableSample
public static boolean supportsTableSample() -
getTableSample
public static String getTableSample(org.apache.spark.sql.execution.datasources.v2.TableSampleInfo sample) -
getFullyQualifiedQuotedTableName
-
org$apache$spark$internal$Logging$$log_
public static org.slf4j.Logger org$apache$spark$internal$Logging$$log_() -
org$apache$spark$internal$Logging$$log__$eq
public static void org$apache$spark$internal$Logging$$log__$eq(org.slf4j.Logger x$1) -
canEqual
-
equals
-
productElement
-
productArity
public abstract static int productArity() -
productIterator
-
productPrefix
-