Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions conf/cassandra.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -196,7 +196,7 @@ batchlog_replay_throttle: 1024KiB
# Please increase system_auth keyspace replication factor if you use this authenticator.
# If using PasswordAuthenticator, CassandraRoleManager must also be used (see below)
authenticator:
class_name: AllowAllAuthenticator
class_name: PasswordAuthenticator
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Please revert the changes in this file.

# MutualTlsAuthenticator can be configured using the following configuration. One can add their own validator
# which implements MutualTlsCertificateValidator class and provide logic for extracting identity out of certificates
# and validating certificates.
Expand All @@ -216,7 +216,7 @@ authenticator:
# - CassandraAuthorizer stores permissions in system_auth.role_permissions table. Please
# increase system_auth keyspace replication factor if you use this authorizer.
authorizer:
class_name: AllowAllAuthorizer
class_name: CassandraAuthorizer

# Part of the Authentication & Authorization backend, implementing IRoleManager; used
# to maintain grants and memberships between roles.
Expand Down Expand Up @@ -421,7 +421,7 @@ partitioner: org.apache.cassandra.dht.Murmur3Partitioner
# Enable / disable CDC functionality on a per-node basis. This modifies the logic used
# for write path allocation rejection (standard: never reject. cdc: reject Mutation
# containing a CDC-enabled table if at space limit in cdc_raw_directory).
cdc_enabled: false
cdc_enabled: true

# Specify whether writes to the CDC-enabled tables should be blocked when CDC data on disk has reached to the limit.
# When setting to false, the writes will not be blocked and the oldest CDC data on disk will be deleted to
Expand Down Expand Up @@ -1939,7 +1939,7 @@ trace_type_repair_ttl: 7d
# INFO level
# UDFs (user defined functions) are disabled by default.
# As of Cassandra 3.0 there is a sandbox in place that should prevent execution of evil code.
user_defined_functions_enabled: false
user_defined_functions_enabled: true

# Triggers are enabled by default.
# `enabled` executes queries and their triggers.
Expand Down
61 changes: 61 additions & 0 deletions pylib/cqlshlib/cql3handling.py
Original file line number Diff line number Diff line change
Expand Up @@ -268,6 +268,10 @@ def dequote_value(cqlword):
| <schemaChangeStatement>
| <authenticationStatement>
| <authorizationStatement>
| <createDataSourceStatement>
| <createDataSinkStatement>
| <dropDataSourceStatement>
| <dropDataSinkStatement>
;

<dataChangeStatement> ::= <insertStatement>
Expand Down Expand Up @@ -1816,6 +1820,63 @@ def drop_trigger_completer(ctxt, cass):
;
'''

syntax_rules += r'''
<createDataSourceStatement> ::= "CREATE" "DATA_SOURCE" ( "IF" "NOT" "EXISTS" )?
srcname=<cident>
"ON" "TABLE" table=<columnFamilyName>
"WITH" sinkname=<cident>
;

<dropDataSourceStatement> ::= "DROP" "DATA_SOURCE" ("IF" "EXISTS")?
srcname=<cident>
"ON" "TABLE" table=<columnFamilyName>
;
'''
syntax_rules += r'''
<createDataSinkStatement> ::= "CREATE" "DATA_SINK" ("IF" "NOT" "EXISTS")?
sinkname=<cident>
"WITH" uri=<stringLiteral>
;

<dropDataSinkStatement> ::= "DROP" "DATA_SINK" ("IF" "EXISTS")? sinkname=<cident>
;
'''

def get_data_source_names(ctxt, cass):
ks = ctxt.get_binding('ksname', None)
if ks is not None:
ks = dequote_name(ks)
return cass.get_data_source_names(ks)

def get_data_sink_names(ctxt, cass):
return cass.get_data_sink_names()


# Autocompletion hints for CREATE DATA_SOURCE
explain_completion('createDataSourceStatement', 'srcname', '<srcname>')
explain_completion('createDataSourceStatement', 'sinkname', '<sinkname>')

# Completer for CREATE DATA_SOURCE sinkname - suggest existing sinks
@completer_for('createDataSourceStatement', 'sinkname')
def create_data_source_completer(ctxt, cass):
names = get_data_sink_names(ctxt, cass)
return list(map(maybe_escape_name, names))

# Completer for DROP DATA_SOURCE - autocomplete existing source names
@completer_for('dropDataSourceStatement', 'srcname')
def drop_data_source_completer(ctxt, cass):
names = get_data_source_names(ctxt, cass)
return list(map(maybe_escape_name, names))

# Autocompletion hints for CREATE DATA_SINK
explain_completion('createDataSinkStatement', 'sinkname', '<sinkname>')

# Completer for DROP DATA_SINK - autocomplete existing sink names
@completer_for('dropDataSinkStatement', 'sinkname')
def drop_data_sink_completer(ctxt, cass):
names = get_data_sink_names(ctxt, cass)
return list(map(maybe_escape_name, names))

# END SYNTAX/COMPLETION RULE DEFINITIONS

CqlRuleSet.append_rules(syntax_rules)
2 changes: 2 additions & 0 deletions pylib/cqlshlib/cqlshhandling.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,6 +96,8 @@ def registrator(f):
| "CLUSTER"
| "TYPES"
| "TYPE" ut=<userTypeName>
| "DATA_SOURCES"
| "DATA_SINKS"
| (ksname=<keyspaceName> | cf=<columnFamilyName> | idx=<indexName> | mv=<materializedViewName>)
) ("WITH" "INTERNALS")?
)
Expand Down
2 changes: 1 addition & 1 deletion pylib/cqlshlib/cqlshmain.py
Original file line number Diff line number Diff line change
Expand Up @@ -1207,7 +1207,7 @@ def do_describe(self, parsed):

what = parsed.matched[1][1].lower()

if what in ('columnfamilies', 'tables', 'types', 'functions', 'aggregates'):
if what in ('columnfamilies', 'tables', 'types', 'functions', 'aggregates', 'data_sources', 'data_sinks'):
self.describe_list(result)
elif what == 'keyspaces':
self.describe_keyspaces(result)
Expand Down
4 changes: 4 additions & 0 deletions src/antlr/Lexer.g
Original file line number Diff line number Diff line change
Expand Up @@ -167,6 +167,10 @@ K_NOLOGIN: N O L O G I N;
K_OPTIONS: O P T I O N S;
K_ACCESS: A C C E S S;
K_DATACENTERS: D A T A C E N T E R S;
K_DATA_SOURCE: D A T A '_' S O U R C E;
K_DATA_SOURCES:D A T A '_' S O U R C E S;
K_DATA_SINK: D A T A '_' S I N K;
K_DATA_SINKS: D A T A '_' S I N K S;
K_CIDRS: C I D R S;
K_IDENTITY: I D E N T I T Y;

Expand Down
58 changes: 58 additions & 0 deletions src/antlr/Parser.g
Original file line number Diff line number Diff line change
Expand Up @@ -294,6 +294,10 @@ cqlStatement returns [CQLStatement.Raw stmt]
| st55=securityLabelOnUserTypeStatement { $stmt = st55; }
| st56=commentOnUserTypeFieldStatement { $stmt = st56; }
| st57=securityLabelOnUserTypeFieldStatement { $stmt = st57; }
| st58=createDataSourceStatement { $stmt = st58; }
| st59=createDataSinkStatement { $stmt = st59; }
| st60=dropDataSourceStatement { $stmt = st60; }
| st61=dropDataSinkStatement { $stmt = st61; }
;

/*
Expand Down Expand Up @@ -1819,6 +1823,8 @@ describeStatement returns [DescribeStatement stmt]
| K_FUNCTION fn=functionName { $stmt = DescribeStatement.function(fn.keyspace, fn.name); }
| (K_AGGREGATES) => K_AGGREGATES { $stmt = DescribeStatement.aggregates(); }
| K_AGGREGATE ag=functionName { $stmt = DescribeStatement.aggregate(ag.keyspace, ag.name); }
| (K_DATA_SOURCES) => K_DATA_SOURCES { $stmt = DescribeStatement.dataSources(); }
| (K_DATA_SINKS) => K_DATA_SINKS { $stmt = DescribeStatement.dataSinks(); }
| ( ( ksT=IDENT { gen.setKeyspace($ksT.text, false);}
| ksT=QUOTED_NAME { gen.setKeyspace($ksT.text, true);}
| ksK=unreserved_keyword { gen.setKeyspace(ksK, false);} ) '.' )?
Expand All @@ -1830,6 +1836,58 @@ describeStatement returns [DescribeStatement stmt]
( K_WITH K_INTERNALS { $stmt.withInternalDetails(); } )?
;

/**
* CREATE DATA_SOURCE <srcname> ON TABLE <table> with <sinkname>
*/
createDataSourceStatement returns [CreateDataSourceStatement.Raw stmt]
@init {
boolean ifNotExists = false;
}
: K_CREATE K_DATA_SOURCE (K_IF K_NOT K_EXISTS { ifNotExists = true; } )? srcname=noncol_ident
K_ON (K_COLUMNFAMILY)? name=columnFamilyName
K_WITH sinkname=noncol_ident {
$stmt = new CreateDataSourceStatement.Raw(srcname, name, sinkname, ifNotExists);
}
;

/**
* DROP DATA_SOURCE [IF EXISTS] <srcname> ON TABLE <table_name>
*/
dropDataSourceStatement returns [DropDataSourceStatement.Raw stmt]
@init {
boolean ifExists = false;
}
: K_DROP K_DATA_SOURCE (K_IF K_EXISTS { ifExists = true; } )? (srcname=noncol_ident)
K_ON (K_COLUMNFAMILY)? table=columnFamilyName {
$stmt = new DropDataSourceStatement.Raw(srcname, table, ifExists);
}
;

/**
* CREATE DATA_SINK [IF NOT EXISTS] <sinkname> WITH <uri>
*/
createDataSinkStatement returns [CreateDataSinkStatement.Raw stmt]
@init {
boolean ifNotExists = false;
}
: K_CREATE K_DATA_SINK (K_IF K_NOT K_EXISTS { ifNotExists = true; } )? sinkname=noncol_ident
K_WITH uri=STRING_LITERAL {
$stmt = new CreateDataSinkStatement.Raw(sinkname, $uri.text, ifNotExists);
}
;

/**
* DROP DATA_SINK [IF EXISTS] <sinkname>
*/
dropDataSinkStatement returns [DropDataSinkStatement.Raw stmt]
@init {
boolean ifExists = false;
}
: K_DROP K_DATA_SINK (K_IF K_EXISTS { ifExists = true; } )? sinkname=noncol_ident{
$stmt = new DropDataSinkStatement.Raw(sinkname, ifExists);
}
;

/** DEFINITIONS **/

// Like ident, but for case where we take a column name that can be the legacy super column empty name. Importantly,
Expand Down
Loading