|
@@ -11,6 +11,7 @@ import org.elasticsearch.xpack.sql.analysis.index.EsIndex;
|
|
|
import org.elasticsearch.xpack.sql.expression.Attribute;
|
|
|
import org.elasticsearch.xpack.sql.expression.predicate.regex.LikePattern;
|
|
|
import org.elasticsearch.xpack.sql.plan.logical.command.Command;
|
|
|
+import org.elasticsearch.xpack.sql.proto.Mode;
|
|
|
import org.elasticsearch.xpack.sql.session.Rows;
|
|
|
import org.elasticsearch.xpack.sql.session.SchemaRowSet;
|
|
|
import org.elasticsearch.xpack.sql.session.SqlSession;
|
|
@@ -58,21 +59,29 @@ public class SysColumns extends Command {
|
|
|
|
|
|
@Override
|
|
|
public List<Attribute> output() {
|
|
|
+ return output(false);
|
|
|
+ }
|
|
|
+
|
|
|
+ private List<Attribute> output(boolean odbcCompatible) {
|
|
|
+ // https://github.com/elastic/elasticsearch/issues/35376
|
|
|
+ // ODBC expects some fields as SHORT while JDBC as Integer
|
|
|
+ // which causes conversion issues and CCE
|
|
|
+ DataType clientBasedType = odbcCompatible ? SHORT : INTEGER;
|
|
|
return asList(keyword("TABLE_CAT"),
|
|
|
keyword("TABLE_SCHEM"),
|
|
|
keyword("TABLE_NAME"),
|
|
|
keyword("COLUMN_NAME"),
|
|
|
- field("DATA_TYPE", INTEGER),
|
|
|
+ field("DATA_TYPE", clientBasedType),
|
|
|
keyword("TYPE_NAME"),
|
|
|
field("COLUMN_SIZE", INTEGER),
|
|
|
field("BUFFER_LENGTH", INTEGER),
|
|
|
- field("DECIMAL_DIGITS", INTEGER),
|
|
|
- field("NUM_PREC_RADIX", INTEGER),
|
|
|
- field("NULLABLE", INTEGER),
|
|
|
+ field("DECIMAL_DIGITS", clientBasedType),
|
|
|
+ field("NUM_PREC_RADIX", clientBasedType),
|
|
|
+ field("NULLABLE", clientBasedType),
|
|
|
keyword("REMARKS"),
|
|
|
keyword("COLUMN_DEF"),
|
|
|
- field("SQL_DATA_TYPE", INTEGER),
|
|
|
- field("SQL_DATETIME_SUB", INTEGER),
|
|
|
+ field("SQL_DATA_TYPE", clientBasedType),
|
|
|
+ field("SQL_DATETIME_SUB", clientBasedType),
|
|
|
field("CHAR_OCTET_LENGTH", INTEGER),
|
|
|
field("ORDINAL_POSITION", INTEGER),
|
|
|
keyword("IS_NULLABLE"),
|
|
@@ -88,11 +97,13 @@ public class SysColumns extends Command {
|
|
|
|
|
|
@Override
|
|
|
public void execute(SqlSession session, ActionListener<SchemaRowSet> listener) {
|
|
|
+ boolean isOdbcClient = session.settings().mode() == Mode.ODBC;
|
|
|
+ List<Attribute> output = output(isOdbcClient);
|
|
|
String cluster = session.indexResolver().clusterName();
|
|
|
|
|
|
// bail-out early if the catalog is present but differs
|
|
|
if (Strings.hasText(catalog) && !cluster.equals(catalog)) {
|
|
|
- listener.onResponse(Rows.empty(output()));
|
|
|
+ listener.onResponse(Rows.empty(output));
|
|
|
return;
|
|
|
}
|
|
|
|
|
@@ -104,15 +115,15 @@ public class SysColumns extends Command {
|
|
|
session.indexResolver().resolveAsSeparateMappings(idx, regex, ActionListener.wrap(esIndices -> {
|
|
|
List<List<?>> rows = new ArrayList<>();
|
|
|
for (EsIndex esIndex : esIndices) {
|
|
|
- fillInRows(cluster, esIndex.name(), esIndex.mapping(), null, rows, columnMatcher);
|
|
|
+ fillInRows(cluster, esIndex.name(), esIndex.mapping(), null, rows, columnMatcher, isOdbcClient);
|
|
|
}
|
|
|
|
|
|
- listener.onResponse(Rows.of(output(), rows));
|
|
|
+ listener.onResponse(Rows.of(output, rows));
|
|
|
}, listener::onFailure));
|
|
|
}
|
|
|
|
|
|
static void fillInRows(String clusterName, String indexName, Map<String, EsField> mapping, String prefix, List<List<?>> rows,
|
|
|
- Pattern columnMatcher) {
|
|
|
+ Pattern columnMatcher, boolean isOdbcClient) {
|
|
|
int pos = 0;
|
|
|
for (Map.Entry<String, EsField> entry : mapping.entrySet()) {
|
|
|
pos++; // JDBC is 1-based so we start with 1 here
|
|
@@ -128,24 +139,24 @@ public class SysColumns extends Command {
|
|
|
null,
|
|
|
indexName,
|
|
|
name,
|
|
|
- type.sqlType.getVendorTypeNumber(),
|
|
|
+ odbcCompatible(type.sqlType.getVendorTypeNumber(), isOdbcClient),
|
|
|
type.esType.toUpperCase(Locale.ROOT),
|
|
|
type.displaySize,
|
|
|
// TODO: is the buffer_length correct?
|
|
|
type.size,
|
|
|
// no DECIMAL support
|
|
|
null,
|
|
|
- DataTypes.metaSqlRadix(type),
|
|
|
+ odbcCompatible(DataTypes.metaSqlRadix(type), isOdbcClient),
|
|
|
// everything is nullable
|
|
|
- DatabaseMetaData.columnNullable,
|
|
|
+ odbcCompatible(DatabaseMetaData.columnNullable, isOdbcClient),
|
|
|
// no remarks
|
|
|
null,
|
|
|
// no column def
|
|
|
null,
|
|
|
// SQL_DATA_TYPE apparently needs to be same as DATA_TYPE except for datetime and interval data types
|
|
|
- DataTypes.metaSqlDataType(type),
|
|
|
+ odbcCompatible(DataTypes.metaSqlDataType(type), isOdbcClient),
|
|
|
// SQL_DATETIME_SUB ?
|
|
|
- DataTypes.metaSqlDateTimeSub(type),
|
|
|
+ odbcCompatible(DataTypes.metaSqlDateTimeSub(type), isOdbcClient),
|
|
|
// char octet length
|
|
|
type.isString() || type == DataType.BINARY ? type.size : null,
|
|
|
// position
|
|
@@ -160,11 +171,18 @@ public class SysColumns extends Command {
|
|
|
));
|
|
|
}
|
|
|
if (field.getProperties() != null) {
|
|
|
- fillInRows(clusterName, indexName, field.getProperties(), name, rows, columnMatcher);
|
|
|
+ fillInRows(clusterName, indexName, field.getProperties(), name, rows, columnMatcher, isOdbcClient);
|
|
|
}
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+ private static Object odbcCompatible(Integer value, boolean isOdbcClient) {
|
|
|
+ if (isOdbcClient && value != null) {
|
|
|
+ return Short.valueOf(value.shortValue());
|
|
|
+ }
|
|
|
+ return value;
|
|
|
+ }
|
|
|
+
|
|
|
@Override
|
|
|
public int hashCode() {
|
|
|
return Objects.hash(catalog, index, pattern, columnPattern);
|