public class TeradataDialect
extends Object
Constructor and Description |
---|
TeradataDialect() |
Modifier and Type | Method and Description |
---|---|
static String[] |
alterTable(String tableName,
scala.collection.Seq<org.apache.spark.sql.connector.catalog.TableChange> changes,
int dbMajorVersion) |
static void |
beforeFetch(java.sql.Connection connection,
scala.collection.immutable.Map<String,String> properties) |
abstract static boolean |
canEqual(Object that) |
static boolean |
canHandle(String url) |
static AnalysisException |
classifyException(String message,
Throwable e) |
static scala.Option<String> |
compileAggregate(org.apache.spark.sql.connector.expressions.aggregate.AggregateFunc aggFunction) |
static scala.Option<String> |
compileExpression(org.apache.spark.sql.connector.expressions.Expression expr) |
static Object |
compileValue(Object value) |
static java.sql.Timestamp |
convertJavaTimestampToTimestamp(java.sql.Timestamp t) |
static java.time.LocalDateTime |
convertJavaTimestampToTimestampNTZ(java.sql.Timestamp t) |
static java.sql.Timestamp |
convertTimestampNTZToJavaTimestamp(java.time.LocalDateTime ldt) |
static scala.Function1<Object,java.sql.Connection> |
createConnectionFactory(org.apache.spark.sql.execution.datasources.jdbc.JDBCOptions options) |
static String |
createIndex(String indexName,
org.apache.spark.sql.connector.catalog.Identifier tableIdent,
org.apache.spark.sql.connector.expressions.NamedReference[] columns,
java.util.Map<org.apache.spark.sql.connector.expressions.NamedReference,java.util.Map<String,String>> columnsProperties,
java.util.Map<String,String> properties) |
static void |
createSchema(java.sql.Statement statement,
String schema,
String comment) |
static void |
createTable(java.sql.Statement statement,
String tableName,
String strSchema,
org.apache.spark.sql.execution.datasources.jdbc.JdbcOptionsInWrite options) |
static String |
dropIndex(String indexName,
org.apache.spark.sql.connector.catalog.Identifier tableIdent) |
static String |
dropSchema(String schema,
boolean cascade) |
abstract static boolean |
equals(Object that) |
static scala.collection.Seq<scala.Tuple2<String,org.apache.spark.sql.connector.catalog.functions.UnboundFunction>> |
functions() |
static String |
getAddColumnQuery(String tableName,
String columnName,
String dataType) |
static scala.Option<DataType> |
getCatalystType(int sqlType,
String typeName,
int size,
MetadataBuilder md) |
static String |
getDeleteColumnQuery(String tableName,
String columnName) |
static String |
getFullyQualifiedQuotedTableName(org.apache.spark.sql.connector.catalog.Identifier ident) |
static JdbcSQLQueryBuilder |
getJdbcSQLQueryBuilder(org.apache.spark.sql.execution.datasources.jdbc.JDBCOptions options) |
static scala.Option<JdbcType> |
getJDBCType(DataType dt) |
static String |
getLimitClause(Integer limit) |
static String |
getOffsetClause(Integer offset) |
static String |
getRenameColumnQuery(String tableName,
String columnName,
String newName,
int dbMajorVersion) |
static String |
getSchemaCommentQuery(String schema,
String comment) |
static String |
getSchemaQuery(String table) |
static String |
getTableCommentQuery(String table,
String comment) |
static String |
getTableExistsQuery(String table) |
static String |
getTableSample(org.apache.spark.sql.execution.datasources.v2.TableSampleInfo sample) |
static String |
getTruncateQuery(String table,
scala.Option<Object> cascade)
The SQL query used to truncate a table.
|
static scala.Option<Object> |
getTruncateQuery$default$2() |
static String |
getUpdateColumnNullabilityQuery(String tableName,
String columnName,
boolean isNullable) |
static String |
getUpdateColumnTypeQuery(String tableName,
String columnName,
String newDataType) |
static boolean |
indexExists(java.sql.Connection conn,
String indexName,
org.apache.spark.sql.connector.catalog.Identifier tableIdent,
org.apache.spark.sql.execution.datasources.jdbc.JDBCOptions options) |
static scala.Option<Object> |
isCascadingTruncateTable() |
static boolean |
isSupportedFunction(String funcName) |
static org.apache.spark.sql.connector.catalog.index.TableIndex[] |
listIndexes(java.sql.Connection conn,
org.apache.spark.sql.connector.catalog.Identifier tableIdent,
org.apache.spark.sql.execution.datasources.jdbc.JDBCOptions options) |
static String[][] |
listSchemas(java.sql.Connection conn,
org.apache.spark.sql.execution.datasources.jdbc.JDBCOptions options) |
static void |
org$apache$spark$internal$Logging$$log__$eq(org.slf4j.Logger x$1) |
static org.slf4j.Logger |
org$apache$spark$internal$Logging$$log_() |
abstract static int |
productArity() |
abstract static Object |
productElement(int n) |
static scala.collection.Iterator<Object> |
productIterator() |
static String |
productPrefix() |
static String |
quoteIdentifier(String colName) |
static String |
removeSchemaCommentQuery(String schema) |
static String |
renameTable(org.apache.spark.sql.connector.catalog.Identifier oldTable,
org.apache.spark.sql.connector.catalog.Identifier newTable) |
static boolean |
schemasExists(java.sql.Connection conn,
org.apache.spark.sql.execution.datasources.jdbc.JDBCOptions options,
String schema) |
static boolean |
supportsLimit() |
static boolean |
supportsOffset() |
static boolean |
supportsTableSample() |
public static boolean canHandle(String url)
public static boolean isSupportedFunction(String funcName)
public static scala.Option<Object> isCascadingTruncateTable()
public static String getTruncateQuery(String table, scala.Option<Object> cascade)
table
- The table to truncate.cascade
- Whether or not to cascade the truncation. Default value is the
value of isCascadingTruncateTable(). Teradata does not support cascading a
'DELETE FROM' statement (and as mentioned, does not support 'TRUNCATE' syntax)public static String renameTable(org.apache.spark.sql.connector.catalog.Identifier oldTable, org.apache.spark.sql.connector.catalog.Identifier newTable)
public static String getLimitClause(Integer limit)
public static scala.Option<DataType> getCatalystType(int sqlType, String typeName, int size, MetadataBuilder md)
public static java.sql.Timestamp convertJavaTimestampToTimestamp(java.sql.Timestamp t)
public static java.time.LocalDateTime convertJavaTimestampToTimestampNTZ(java.sql.Timestamp t)
public static java.sql.Timestamp convertTimestampNTZToJavaTimestamp(java.time.LocalDateTime ldt)
public static scala.Function1<Object,java.sql.Connection> createConnectionFactory(org.apache.spark.sql.execution.datasources.jdbc.JDBCOptions options)
public static String quoteIdentifier(String colName)
public static void createTable(java.sql.Statement statement, String tableName, String strSchema, org.apache.spark.sql.execution.datasources.jdbc.JdbcOptionsInWrite options)
public static String getTableExistsQuery(String table)
public static String getSchemaQuery(String table)
public static scala.Option<Object> getTruncateQuery$default$2()
public static void beforeFetch(java.sql.Connection connection, scala.collection.immutable.Map<String,String> properties)
public static Object compileValue(Object value)
public static scala.Option<String> compileExpression(org.apache.spark.sql.connector.expressions.Expression expr)
public static scala.Option<String> compileAggregate(org.apache.spark.sql.connector.expressions.aggregate.AggregateFunc aggFunction)
public static scala.collection.Seq<scala.Tuple2<String,org.apache.spark.sql.connector.catalog.functions.UnboundFunction>> functions()
public static void createSchema(java.sql.Statement statement, String schema, String comment)
public static boolean schemasExists(java.sql.Connection conn, org.apache.spark.sql.execution.datasources.jdbc.JDBCOptions options, String schema)
public static String[][] listSchemas(java.sql.Connection conn, org.apache.spark.sql.execution.datasources.jdbc.JDBCOptions options)
public static String[] alterTable(String tableName, scala.collection.Seq<org.apache.spark.sql.connector.catalog.TableChange> changes, int dbMajorVersion)
public static String getAddColumnQuery(String tableName, String columnName, String dataType)
public static String getRenameColumnQuery(String tableName, String columnName, String newName, int dbMajorVersion)
public static String getDeleteColumnQuery(String tableName, String columnName)
public static String getUpdateColumnTypeQuery(String tableName, String columnName, String newDataType)
public static String getUpdateColumnNullabilityQuery(String tableName, String columnName, boolean isNullable)
public static String getTableCommentQuery(String table, String comment)
public static String getSchemaCommentQuery(String schema, String comment)
public static String removeSchemaCommentQuery(String schema)
public static String dropSchema(String schema, boolean cascade)
public static String createIndex(String indexName, org.apache.spark.sql.connector.catalog.Identifier tableIdent, org.apache.spark.sql.connector.expressions.NamedReference[] columns, java.util.Map<org.apache.spark.sql.connector.expressions.NamedReference,java.util.Map<String,String>> columnsProperties, java.util.Map<String,String> properties)
public static boolean indexExists(java.sql.Connection conn, String indexName, org.apache.spark.sql.connector.catalog.Identifier tableIdent, org.apache.spark.sql.execution.datasources.jdbc.JDBCOptions options)
public static String dropIndex(String indexName, org.apache.spark.sql.connector.catalog.Identifier tableIdent)
public static org.apache.spark.sql.connector.catalog.index.TableIndex[] listIndexes(java.sql.Connection conn, org.apache.spark.sql.connector.catalog.Identifier tableIdent, org.apache.spark.sql.execution.datasources.jdbc.JDBCOptions options)
public static AnalysisException classifyException(String message, Throwable e)
public static String getOffsetClause(Integer offset)
public static JdbcSQLQueryBuilder getJdbcSQLQueryBuilder(org.apache.spark.sql.execution.datasources.jdbc.JDBCOptions options)
public static boolean supportsLimit()
public static boolean supportsOffset()
public static boolean supportsTableSample()
public static String getTableSample(org.apache.spark.sql.execution.datasources.v2.TableSampleInfo sample)
public static String getFullyQualifiedQuotedTableName(org.apache.spark.sql.connector.catalog.Identifier ident)
public static org.slf4j.Logger org$apache$spark$internal$Logging$$log_()
public static void org$apache$spark$internal$Logging$$log__$eq(org.slf4j.Logger x$1)
public abstract static boolean canEqual(Object that)
public abstract static boolean equals(Object that)
public abstract static Object productElement(int n)
public abstract static int productArity()
public static scala.collection.Iterator<Object> productIterator()
public static String productPrefix()