Configuration of the sample

Data fragmentation

The following configuration realization of DataSourceUtil DataSourceUtil, ModuloShardingTableAlgorithm user-defined implementation class, ModuloShardingTableAlgorithm detailed examples

     DataSource getShardingDataSource(a) throws SQLException {
         ShardingRuleConfiguration shardingRuleConfig = new ShardingRuleConfiguration();
         shardingRuleConfig.getTableRuleConfigs().add(getOrderTableRuleConfiguration());
         shardingRuleConfig.getTableRuleConfigs().add(getOrderItemTableRuleConfiguration());
         shardingRuleConfig.getBindingTableGroups().add("t_order, t_order_item");
         shardingRuleConfig.getBroadcastTables().add("t_config");
         shardingRuleConfig.setDefaultDatabaseShardingStrategyConfig(new InlineShardingStrategyConfiguration("user_id"."ds${user_id % 2}"));
         shardingRuleConfig.setDefaultTableShardingStrategyConfig(new StandardShardingStrategyConfiguration("order_id".new ModuloShardingTableAlgorithm()));
         return ShardingDataSourceFactory.createDataSource(createDataSourceMap(), shardingRuleConfig, new Properties());
     }
     
     private static KeyGeneratorConfiguration getKeyGeneratorConfiguration(a) {
         KeyGeneratorConfiguration result = new KeyGeneratorConfiguration("SNOWFLAKE"."order_id");
         return result;
     }
     
     TableRuleConfiguration getOrderTableRuleConfiguration(a) {
         TableRuleConfiguration result = new TableRuleConfiguration("t_order"."ds${0.. 1}.t_order${0.. 1}");
         result.setKeyGeneratorConfig(getKeyGeneratorConfiguration());
         return result;
     }
     
     TableRuleConfiguration getOrderItemTableRuleConfiguration(a) {
         TableRuleConfiguration result = new TableRuleConfiguration("t_order_item"."ds${0.. 1}.t_order_item${0.. 1}");
         return result;
     }
     
     Map<String, DataSource> createDataSourceMap(a) {
         Map<String, DataSource> result = new HashMap<>();
         result.put("ds0", DataSourceUtil.createDataSource("ds0"));
         result.put("ds1", DataSourceUtil.createDataSource("ds1"));
         return result;
     }
Copy the code

Reading and writing separation

     DataSource getMasterSlaveDataSource(a) throws SQLException {
         MasterSlaveRuleConfiguration masterSlaveRuleConfig = new MasterSlaveRuleConfiguration("ds_master_slave"."ds_master", Arrays.asList("ds_slave0"."ds_slave1"));
         return MasterSlaveDataSourceFactory.createDataSource(createDataSourceMap(), masterSlaveRuleConfig, new Properties());
     }
     
     Map<String, DataSource> createDataSourceMap(a) {
         Map<String, DataSource> result = new HashMap<>();
         result.put("ds_master", DataSourceUtil.createDataSource("ds_master"));
         result.put("ds_slave0", DataSourceUtil.createDataSource("ds_slave0"));
         result.put("ds_slave1", DataSourceUtil.createDataSource("ds_slave1"));
         return result;
     }
Copy the code

Data desensitization

    DataSource getEncryptDataSource(a) throws SQLException {
        return EncryptDataSourceFactory.createDataSource(DataSourceUtil.createDataSource("demo_ds"), getEncryptRuleConfiguration(), new Properties());
    }

    private static EncryptRuleConfiguration getEncryptRuleConfiguration(a) {
        Properties props = new Properties();
        props.setProperty("aes.key.value"."123456");
        EncryptorRuleConfiguration encryptorConfig = new EncryptorRuleConfiguration("AES", props);
        EncryptColumnRuleConfiguration columnConfig = new EncryptColumnRuleConfiguration("plain_pwd"."cipher_pwd".""."aes");
        EncryptTableRuleConfiguration tableConfig = new EncryptTableRuleConfiguration(Collections.singletonMap("pwd", columnConfig));
        EncryptRuleConfiguration encryptRuleConfig = new EncryptRuleConfiguration();
        encryptRuleConfig.getEncryptors().put("aes", encryptorConfig);
        encryptRuleConfig.getTables().put("t_encrypt", tableConfig);
        return encryptRuleConfig;
    }
Copy the code

Data fragmentation + Read/write separation

    DataSource getDataSource(a) throws SQLException {
        ShardingRuleConfiguration shardingRuleConfig = new ShardingRuleConfiguration();
        shardingRuleConfig.getTableRuleConfigs().add(getOrderTableRuleConfiguration());
        shardingRuleConfig.getTableRuleConfigs().add(getOrderItemTableRuleConfiguration());
        shardingRuleConfig.getBindingTableGroups().add("t_order, t_order_item");
        shardingRuleConfig.getBroadcastTables().add("t_config");
        shardingRuleConfig.setDefaultDatabaseShardingStrategyConfig(new StandardShardingStrategyConfiguration("user_id".new PreciseModuloShardingDatabaseAlgorithm()));
        shardingRuleConfig.setDefaultTableShardingStrategyConfig(new StandardShardingStrategyConfiguration("order_id".new PreciseModuloShardingTableAlgorithm()));
        shardingRuleConfig.setMasterSlaveRuleConfigs(getMasterSlaveRuleConfigurations());
        return ShardingDataSourceFactory.createDataSource(createDataSourceMap(), shardingRuleConfig, new Properties());
    }
    
    private static KeyGeneratorConfiguration getKeyGeneratorConfiguration(a) {
        KeyGeneratorConfiguration result = new KeyGeneratorConfiguration("SNOWFLAKE"."order_id");
        return result;
    }
    
    TableRuleConfiguration getOrderTableRuleConfiguration(a) {
        TableRuleConfiguration result = new TableRuleConfiguration("t_order"."ds_${0.. 1}.t_order_${[0, 1]}");
        result.setKeyGeneratorConfig(getKeyGeneratorConfiguration());
        return result;
    }
    
    TableRuleConfiguration getOrderItemTableRuleConfiguration(a) {
        TableRuleConfiguration result = new TableRuleConfiguration("t_order_item"."ds_${0.. 1}.t_order_item_${[0, 1]}");
        return result;
    }
    
    List<MasterSlaveRuleConfiguration> getMasterSlaveRuleConfigurations(a) {
        MasterSlaveRuleConfiguration masterSlaveRuleConfig1 = new MasterSlaveRuleConfiguration("ds_0"."demo_ds_master_0", Arrays.asList("demo_ds_master_0_slave_0"."demo_ds_master_0_slave_1"));
        MasterSlaveRuleConfiguration masterSlaveRuleConfig2 = new MasterSlaveRuleConfiguration("ds_1"."demo_ds_master_1", Arrays.asList("demo_ds_master_1_slave_0"."demo_ds_master_1_slave_1"));
        return Lists.newArrayList(masterSlaveRuleConfig1, masterSlaveRuleConfig2);
    }
    
    Map<String, DataSource> createDataSourceMap(a) {
        final Map<String, DataSource> result = new HashMap<>();
        result.put("demo_ds_master_0", DataSourceUtil.createDataSource("demo_ds_master_0"));
        result.put("demo_ds_master_0_slave_0", DataSourceUtil.createDataSource("demo_ds_master_0_slave_0"));
        result.put("demo_ds_master_0_slave_1", DataSourceUtil.createDataSource("demo_ds_master_0_slave_1"));
        result.put("demo_ds_master_1", DataSourceUtil.createDataSource("demo_ds_master_1"));
        result.put("demo_ds_master_1_slave_0", DataSourceUtil.createDataSource("demo_ds_master_1_slave_0"));
        result.put("demo_ds_master_1_slave_1", DataSourceUtil.createDataSource("demo_ds_master_1_slave_1"));
        return result;
    }
Copy the code

Data fragmentation + data desensitization

    public DataSource getDataSource(a) throws SQLException {
        ShardingRuleConfiguration shardingRuleConfig = new ShardingRuleConfiguration();
        shardingRuleConfig.getTableRuleConfigs().add(getOrderTableRuleConfiguration());
        shardingRuleConfig.getTableRuleConfigs().add(getOrderItemTableRuleConfiguration());
        shardingRuleConfig.getTableRuleConfigs().add(getOrderEncryptTableRuleConfiguration());
        shardingRuleConfig.getBindingTableGroups().add("t_order, t_order_item");
        shardingRuleConfig.setDefaultDatabaseShardingStrategyConfig(new InlineShardingStrategyConfiguration("user_id"."demo_ds_${user_id % 2}"));
        shardingRuleConfig.setDefaultTableShardingStrategyConfig(new StandardShardingStrategyConfiguration("order_id".new PreciseModuloShardingTableAlgorithm()));
        shardingRuleConfig.setEncryptRuleConfig(getEncryptRuleConfiguration());
        return ShardingDataSourceFactory.createDataSource(createDataSourceMap(), shardingRuleConfig, new Properties());
    }
    
    private static TableRuleConfiguration getOrderTableRuleConfiguration(a) {
        TableRuleConfiguration result = new TableRuleConfiguration("t_order"."demo_ds_${0.. 1}.t_order_${[0, 1]}");
        result.setKeyGeneratorConfig(getKeyGeneratorConfiguration());
        return result;
    }
    
    private static TableRuleConfiguration getOrderItemTableRuleConfiguration(a) {
        TableRuleConfiguration result = new TableRuleConfiguration("t_order_item"."demo_ds_${0.. 1}.t_order_item_${[0, 1]}");
        result.setEncryptorConfig(new EncryptorConfiguration("MD5"."status".new Properties()));
        return result;
    }
    
    private static EncryptRuleConfiguration getEncryptRuleConfiguration(a) {
        Properties props = new Properties();
        props.setProperty("aes.key.value"."123456");
        EncryptorRuleConfiguration encryptorConfig = new EncryptorRuleConfiguration("AES", props);
        EncryptColumnRuleConfiguration columnConfig = new EncryptColumnRuleConfiguration("plain_order"."cipher_order".""."aes");
        EncryptTableRuleConfiguration tableConfig = new EncryptTableRuleConfiguration(Collections.singletonMap("order_id", columnConfig));
        EncryptRuleConfiguration encryptRuleConfig = new EncryptRuleConfiguration();
        encryptRuleConfig.getEncryptors().put("aes", encryptorConfig);
        encryptRuleConfig.getTables().put("t_order", tableConfig);
		return encryptRuleConfig;
    }
    
    private static Map<String, DataSource> createDataSourceMap(a) {
        Map<String, DataSource> result = new HashMap<>();
        result.put("demo_ds_0", DataSourceUtil.createDataSource("demo_ds_0"));
        result.put("demo_ds_1", DataSourceUtil.createDataSource("demo_ds_1"));
        return result;
    }
    
    private static KeyGeneratorConfiguration getKeyGeneratorConfiguration(a) {
        return new KeyGeneratorConfiguration("SNOWFLAKE"."order_id".new Properties());
    }
Copy the code

governance

    DataSource getDataSource(a) throws SQLException {
        / / OrchestrationShardingDataSourceFactory can replace OrchestrationMasterSlaveDataSourceFactory or OrchestrationEncryptDataSourceFactory
        return OrchestrationShardingDataSourceFactory.createDataSource(
                createDataSourceMap(), createShardingRuleConfig(), new HashMap<String, Object>(), new Properties(), 
                new OrchestrationConfiguration(createCenterConfigurationMap()));
    }
    private Map<String, CenterConfiguration> createCenterConfigurationMap(a) {
        Map<String, CenterConfiguration> instanceConfigurationMap = new HashMap<String, CenterConfiguration>();
        CenterConfiguration config = createCenterConfiguration();
        instanceConfigurationMap.put("orchestration-sharding-data-source", config);
        return instanceConfigurationMap;
    }
    private CenterConfiguration createCenterConfiguration(a) {
        Properties properties = new Properties();
        properties.setProperty("overwrite", overwrite);
        CenterConfiguration result = new CenterConfiguration("zookeeper", properties);
        result.setServerLists("localhost:2181");
        result.setNamespace("sharding-sphere-orchestration");
        result.setOrchestrationType("registry_center,config_center");
        return result;
    }
Copy the code

Configuration Item Description

Data fragmentation

ShardingDataSourceFactory

Data source creation factory for data sharding.

The name of the The data type instructions
dataSourceMap Map<String, DataSource> Data Source Configuration
shardingRuleConfig ShardingRuleConfiguration Rules for configuring data fragments
props (?) Properties The configuration properties

ShardingRuleConfiguration

Fragment rule configuration object.

The name of the The data type instructions
tableRuleConfigs Collection<TableRuleConfiguration> Sharding rule list
bindingTableGroups (?) Collection<String> List of binding table rules
broadcastTables (?) Collection<String> List of broadcast table rules
defaultDataSourceName (?) String Tables without sharding rules are located through the default data source
defaultDatabaseShardingStrategyConfig (?) ShardingStrategyConfiguration Default repository splitting policy
defaultTableShardingStrategyConfig (?) ShardingStrategyConfiguration Default split table policy
defaultKeyGeneratorConfig (?) KeyGeneratorConfiguration The default on the column value generator configuration, the default will use org. Apache. Shardingsphere. Core. The keygen. The generator. The impl. SnowflakeKeyGenerator
masterSlaveRuleConfigs (?) Collection<MasterSlaveRuleConfiguration> Read/write separation rule. By default, read/write separation is not used

TableRuleConfiguration

Table sharding rule configuration object.

The name of the The data type instructions
logicTable String Logical table name
actualDataNodes (?) String Consists of data source name + table name, separated by decimal points. Multiple tables are comma-separated and support inline expressions. By default, data nodes are generated using known data sources and logical table names for broadcast tables (that is, each library needs the same table for associated query, mostly dictionary tables) or only separate databases without separate tables and all libraries have identical table structures
databaseShardingStrategyConfig (?) ShardingStrategyConfiguration Branch library policy. By default, the default branch library policy is used
tableShardingStrategyConfig (?) ShardingStrategyConfiguration Sub-table policy. By default, the default sub-table policy is used
keyGeneratorConfig (?) KeyGeneratorConfiguration Auto-increment column value generator configuration, which defaults to using the default auto-increment primary key generator
encryptorConfiguration (?) EncryptorConfiguration Encryption and decryption generator configuration

StandardShardingStrategyConfiguration

ShardingStrategyConfiguration implementation class for standard fragmentation of the single shard key scenes.

The name of the The data type instructions
shardingColumn String Shard column name
preciseShardingAlgorithm PreciseShardingAlgorithm Exact sharding algorithm for = and IN
rangeShardingAlgorithm (?) RangeShardingAlgorithm Range sharding algorithm for BETWEEN

ComplexShardingStrategyConfiguration

ShardingStrategyConfiguration implementation class for more shard key composite subdivision scenarios.

The name of the The data type instructions
shardingColumns String Shard column name. Multiple columns are separated by commas
shardingAlgorithm ComplexKeysShardingAlgorithm Compound sharding algorithm

InlineShardingStrategyConfiguration

ShardingStrategyConfiguration implementation class, used to configure line expression subdivision strategy.

The name of the The data type instructions
shardingColumn String Shard column name
algorithmExpression String The line expression of the sharding algorithm must comply with groovy syntax. For details, seeLine expression

HintShardingStrategyConfiguration

ShardingStrategyConfiguration implementation class, used to configure Hint way subdivision strategy.

The name of the The data type instructions
shardingAlgorithm HintShardingAlgorithm Hint sharding algorithm

NoneShardingStrategyConfiguration

ShardingStrategyConfiguration implementation class, used to configure shard strategy.

KeyGeneratorConfiguration

The name of the The data type instructions
column String Increment column name
type String Increment column value generator type, you can customize or choose the built-in type: SNOWFLAKE/UUID
props Properties Configuration of properties associated with the increment column value generator

Properties

Property configuration item that can be a property of the following auto-increment column value generator.

SNOWFLAKE
The name of the The data type instructions
worker.id (?) long Unique ID of the working machine. Default is 0
max.tolerate.time.difference.milliseconds (?) long Maximum time allowed for a clock rollback, in milliseconds. The default is 10 milliseconds
max.vibration.offset (?) int The value ranges from 0 to 4096. The default value is 1. If the value generated by this algorithm is used as the fragment value, you are advised to configure this attribute. The result is always 0 or 1 after the key generated by this algorithm is modular 2^n (2^n is generally the number of branches or tables) in different milliseconds

EncryptRuleConfiguration

The name of the The data type instructions
encryptors Map<String, EncryptorRuleConfiguration> Configuration list of encryption and decryption device. You can customize or select the built-in type: MD5/AES
tables Map<String, EncryptTableRuleConfiguration> Encryption table configuration list

EncryptorRuleConfiguration

The name of the The data type instructions
type String The encryption and decryption type can be customized or the built-in type is MD5 or AES
properties Properties Note: To use AES encryption, configure the KEY attribute aes.key.value for the AES encryption

EncryptTableRuleConfiguration

The name of the The data type instructions
tables Map<String, EncryptColumnRuleConfiguration> Encrypted column configuration list

EncryptColumnRuleConfiguration

The name of the The data type instructions
plainColumn String Store plaintext fields
cipherColumn String Field that stores ciphertext
assistedQueryColumn String Auxiliary query field, in view of ShardingQueryAssistedEncryptor types of encryption device for auxiliary query
encryptor String The name of the decryptor

Properties

Properties Configuration item, which can be the following properties.

The name of the The data type instructions
sql.show (?) boolean Whether to enable SQL display. Default value: false
executor.size (?) int Number of worker threads. Default value: number of CPU cores
max.connections.size.per.query (?) int The maximum number of connections per physical database allocated for each query. Default value: 1
check.table.metadata.enabled (?) boolean Whether to check sub-table metadata consistency at startup. Default value: false
query.with.cipher.column (?) boolean Specifies whether to use ciphertext when plaintext columns exist. Default value: true
allow.range.query.with.inline.sharding (?) boolean Whether to allow a range query when using the inline table policy. Default: false

Reading and writing separation

MasterSlaveDataSourceFactory

Create factories for read-write data sources.

The name of the The data type instructions
dataSourceMap Map<String, DataSource> Mapping of data sources to their names
masterSlaveRuleConfig MasterSlaveRuleConfiguration Read/write separation rule
props (?) Properties The configuration properties

MasterSlaveRuleConfiguration

Read/write separation rule configuration object.

The name of the The data type instructions
name String Read/write split data source name
masterDataSourceName String Primary library data source name
slaveDataSourceNames Collection<String> List of slave data source names
loadBalanceAlgorithm (?) MasterSlaveLoadBalanceAlgorithm Load balancing algorithm from library

Properties

Properties Configuration item, which can be the following properties.

The name of the The data type instructions
sql.show (?) boolean Whether to print SQL parsing and rewriting logs. Default value: false
executor.size (?) int The number of worker threads used for SQL execution. Zero indicates unlimited. Default value: 0
max.connections.size.per.query (?) int The maximum number of connections per physical database allocated for each query. Default value: 1
check.table.metadata.enabled (?) boolean Whether to check sub-table metadata consistency at startup. Default value: false

Data desensitization

EncryptDataSourceFactory

The name of the The data type instructions
dataSource DataSource Data source, any connection pool
encryptRuleConfig EncryptRuleConfiguration Data desensitization rules
props (?) Properties The configuration properties

EncryptRuleConfiguration

The name of the The data type instructions
encryptors Map<String, EncryptorRuleConfiguration> Configuration list of encryption and decryption device. You can customize or select the built-in type: MD5/AES
tables Map<String, EncryptTableRuleConfiguration> Encryption table configuration list

Properties

Properties Configuration item, which can be the following properties.

The name of the The data type instructions
sql.show (?) boolean Whether to enable SQL display. Default value: false
query.with.cipher.column (?) boolean Specifies whether to use ciphertext when plaintext columns exist. Default value: true

governance

OrchestrationShardingDataSourceFactory

Data sharding + data source factory for governance.

The name of the The data type instructions
dataSourceMap Map<String, DataSource> With ShardingDataSourceFactory
shardingRuleConfig ShardingRuleConfiguration With ShardingDataSourceFactory
props (?) Properties With ShardingDataSourceFactory
orchestrationConfig OrchestrationConfiguration Governance Rule Configuration

OrchestrationMasterSlaveDataSourceFactory

Read/write separation + governed data source factory.

The name of the The data type instructions
dataSourceMap Map<String, DataSource> With MasterSlaveDataSourceFactory
masterSlaveRuleConfig MasterSlaveRuleConfiguration With MasterSlaveDataSourceFactory
props (?) Properties With ShardingDataSourceFactory
orchestrationConfig OrchestrationConfiguration Governance Rule Configuration

OrchestrationEncryptDataSourceFactory

Data desensitization + data source factory governance.

The name of the The data type instructions
dataSource DataSource With EncryptDataSourceFactory
encryptRuleConfig EncryptRuleConfiguration With EncryptDataSourceFactory
props (?) Properties With ShardingDataSourceFactory
orchestrationConfig OrchestrationConfiguration Governance Rule Configuration

OrchestrationConfiguration

Governance rule configuration objects.

The name of the The data type instructions
instanceConfigurationMap Map<String, CenterConfiguration> Configure the configuration map of the configuration center and registry, where key is the name and value is the configuration or registry

CenterConfiguration

Used to configure the configuration center or registry.

The name of the The data type instructions
type String Configure the instance type of the center or registry, such as ZooKeeper or ETCD, Apollo, NACOS
properties String Configure other parameters required by this example, such as connection parameters of ZooKeeper. For details, see Properties configuration
orchestrationType String Configure the type of the center or registry, such as config-center or registry-center. If both, “setOrchestrationType(“registry_center,config_center”);”
serverLists String A list of connections to a configuration center or registry server, including IP addresses and port numbers, separated by commas. Such as: host1:2181, host2:2181
namespace (?) String Configure the namespace of the center or registry

The general configuration of properties is as follows:

The name of the The data type instructions
overwrite boolean Whether the local configuration overrides the registry configuration, and if so, the local configuration prevails at each startup

If ZooKeeper is used as the configuration center or/and registry, properties can also be configured:

The name of the The data type instructions
digest (?) String Access token to connect to the registry. By default, no permission authentication is required
operationTimeoutMilliseconds (?) int The number of milliseconds that the operation timed out. Default: 500 milliseconds
maxRetries (?) int Maximum number of retries after a connection failure. The default value is 3
retryIntervalMilliseconds (?) int Number of milliseconds between retries. Default: 500 milliseconds
timeToLiveSeconds (?) int The number of seconds that temporary nodes live. The default is 60 seconds

If etCD is used as the configuration center or/and registry, properties can also be configured:

The name of the The data type instructions
timeToLiveSeconds (?) long TTL Indicates the TTL, in seconds. The default value is 30 seconds

If Apollo is used as the configuration center, properties can also be configured with:

The name of the The data type instructions
appId (?) String Apollo appId, default is “APOLLO_SHARDINGSPHERE”
env (?) String Apollo env, default is “DEV”
clusterName (?) String Apollo clusterName, default is “default”
administrator (?) String Apollo Administrator, default is “”
token (?) String Apollo Token, default value “”
portalUrl (?) String Apollo portalUrl, default is “”
connectTimeout (?) int Apollo connectTimeout. The default value is 1000 milliseconds
readTimeout (?) int Apollo readTimeout, default is 5000 ms

If nacOS is used as the configuration center, properties can also be configured with:

The name of the The data type instructions
group (?) String Nacos group configuration, default is “SHARDING_SPHERE_DEFAULT_GROUP”
timeout (?) long Nacos Specifies the timeout period for obtaining data, in milliseconds. The default value is 3000 milliseconds