shardingsphere實現分庫分表
阿新 • • 發佈:2022-03-02
1.POM配置
<dependency> <groupId>org.springframework.boot</groupId> <artifactId>spring-boot-starter-jdbc</artifactId> </dependency> <!--mybatis--> <dependency> <groupId>org.mybatis.spring.boot</groupId> <artifactId>mybatis-spring-boot-starter</artifactId> </dependency> <!--mysql--> <dependency> <groupId>mysql</groupId> <artifactId>mysql-connector-java</artifactId> </dependency> <dependency> <groupId>com.alibaba</groupId> <artifactId>druid-spring-boot-starter</artifactId> </dependency> <!-- shardingsphere --> <dependency> <groupId>org.apache.shardingsphere</groupId> <artifactId>sharding-core-api</artifactId> <version>4.1.1</version> </dependency> <dependency> <groupId>org.apache.shardingsphere</groupId> <artifactId>sharding-jdbc-core</artifactId> <version>4.1.1</version> </dependency>
2.變數配置.這裡變數名可根據自己需求進行命名
# ************************** sharding sphere config start **************************** # 參與分庫的表,用逗號分隔,不在此定義的,直接路由到預設庫中 SHARDING-DATABASE-TABLE-NAMES=B_USER,T_SP # 參與分表的表,用逗號分隔 SHARDING-TABLE-NAMES=T_SP # 公共分庫欄位 database.sharding.column=TENANT_ID # 表b_user的路由節點 table.user.actual-data-nodes=cloudtravel_consumer$->{1..2}.b_user # 表t_sp的路由節點 table.sp.actual-data-nodes=cloudtravel_consumer$->{1..2}.t_sp_$->{0..1} # 表t_sp分表字段 table.sp.sharding.column=BIZ_ID # Configure the 1st data source sharding.datasource1.name=cloudtravel_consumer1 datasource.cloudtravel-consumer1.driver-class-name=com.mysql.cj.jdbc.Driver datasource.cloudtravel-consumer1.url=jdbc:mysql://localhost:3306/cloudtravel_consumer1?useAffectedRows=true&serverTimezone=UTC&characterEncoding=utf-8 datasource.cloudtravel-consumer1.username=root datasource.cloudtravel-consumer1.password=root # Configure the 2nd data source SHARDING.datasource2.name=cloudtravel_consumer2 datasource.cloudtravel-consumer2.driver-class-name=com.mysql.cj.jdbc.Driver datasource.cloudtravel-consumer2.url=jdbc:mysql://localhost:3306/cloudtravel_consumer2?useAffectedRows=true&serverTimezone=UTC&characterEncoding=utf-8 datasource.cloudtravel-consumer2.username=root datasource.cloudtravel-consumer2.password=root # 是否列印sql語句 mybatis.mapper-locations=classpath:mapper/*.xml # ************************** sharding sphere config end ****************************
3.基礎配置類
package com.cloudtravel.shardingsphere.db; import org.springframework.beans.factory.annotation.Value; import org.springframework.context.annotation.Configuration; import org.springframework.core.annotation.Order; @Configuration @Order(10) public class DataSourceConfigBase { @Value("${sharding.datasource1.name}") private String datasourceName1; @Value("${SHARDING.datasource2.name}") private String datasourceName2; @Value("${mybatis.mapper-locations}") private String mapperLocations; @Value("${database.sharding.column}") private String databaseShardingColumnDefault; @Value("${SHARDING-DATABASE-TABLE-NAMES}") private String shardingDatabaseTableNames; @Value("${SHARDING-TABLE-NAMES}") private String shardingTableNames; @Value("${table.user.actual-data-nodes}") private String userActualDataNodes; @Value("${table.sp.actual-data-nodes}") private String spActualDataNodes; @Value("${table.sp.sharding.column}") private String tableSpShardingColumn; public String getDatasourceName1() { return datasourceName1; } public String getDatasourceName2() { return datasourceName2; } public String getMapperLocations() { return mapperLocations; } public String getDatabaseShardingColumnDefault() { return databaseShardingColumnDefault; } public String getShardingDatabaseTableNames() { return shardingDatabaseTableNames; } public String getShardingTableNames() { return shardingTableNames; } public String getUserActualDataNodes() { return userActualDataNodes; } public String getSpActualDataNodes() { return spActualDataNodes; } public String getTableSpShardingColumn() { return tableSpShardingColumn; } }
4.分庫邏輯實現
/** * 分庫邏輯 */ @Service public class DatabaseShardingAlgorithm implements PreciseShardingAlgorithm { @Autowired private DataSourceConfigBase dataSourceConfigBase; @Override public String doSharding(Collection collection, PreciseShardingValue preciseShardingValue) { String dataSource = null; String tableName = preciseShardingValue.getLogicTableName(); Integer value = StringUtils.isNotEmpty(preciseShardingValue.getValue().toString()) ? Integer.parseInt(preciseShardingValue.getValue().toString()) : 0; if (value % 2 == 0) { dataSource = dataSourceConfigBase.getDatasourceName1(); } else { dataSource = dataSourceConfigBase.getDatasourceName2(); } System.out.println(tableName + "走分庫,tenantId = " + value + "進入" + dataSource); return dataSource; } }
5.分表邏輯實現
/** *分表邏輯 */ @Service public class TableShardingAlgorithm implements ComplexKeysShardingAlgorithm { @Value("${SHARDING-TABLE-NAMES}") private String SHARDING_TABLE_NAMES; @Override public Collection<String> doSharding(Collection collection, ComplexKeysShardingValue complexKeysShardingValue) { List<String> tables = new ArrayList<>(); Long bizId = (Long) ((List) complexKeysShardingValue.getColumnNameAndShardingValuesMap().get("biz_id")).get(0); String tableName = complexKeysShardingValue.getLogicTableName(); String physicsTable = tableName + "_"; physicsTable += String.valueOf(bizId % 2); tables.add(physicsTable); System.out.println(tableName + "走分表,bizId = " + bizId + "路由到表" + physicsTable); return tables; } }
6.dataSource配置
package com.cloudtravel.shardingsphere.db; import com.alibaba.druid.pool.DruidDataSource; import org.apache.ibatis.session.SqlSessionFactory; import org.apache.shardingsphere.api.config.sharding.ShardingRuleConfiguration; import org.apache.shardingsphere.api.config.sharding.TableRuleConfiguration; import org.apache.shardingsphere.api.config.sharding.strategy.ComplexShardingStrategyConfiguration; import org.apache.shardingsphere.api.config.sharding.strategy.StandardShardingStrategyConfiguration; import org.apache.shardingsphere.shardingjdbc.api.ShardingDataSourceFactory; import org.mybatis.spring.SqlSessionFactoryBean; import org.mybatis.spring.SqlSessionTemplate; import org.mybatis.spring.annotation.MapperScan; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.boot.context.properties.ConfigurationProperties; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; import org.springframework.context.annotation.Primary; import org.springframework.core.annotation.Order; import org.springframework.core.io.support.PathMatchingResourcePatternResolver; import org.springframework.jdbc.datasource.DataSourceTransactionManager; import javax.sql.DataSource; import java.sql.SQLException; import java.util.Arrays; import java.util.HashMap; import java.util.Map; import java.util.Properties; @Configuration @MapperScan(basePackages = "com.cloudtravel.shardingsphere" , sqlSessionTemplateRef = "testSqlSessionTemplate") @Order(11) public class DataSourceConfig { @Autowired private TableShardingAlgorithm tableShardingAlgorithmCom; @Autowired private DatabaseShardingAlgorithm databaseShardingAlgorithm; @Autowired DataSourceConfigBase dataSourceConfigBase; /** * 設定資料來源 * @return * @throws SQLException */ @Primary @Bean(name = "shardingDataSource") DataSource getShardingDataSource() throws SQLException { ShardingRuleConfiguration shardingRuleConfig = new ShardingRuleConfiguration(); // 設定預設的分庫策略 shardingRuleConfig.setDefaultDatabaseShardingStrategyConfig( new StandardShardingStrategyConfiguration( dataSourceConfigBase.getDatabaseShardingColumnDefault(), databaseShardingAlgorithm ) ); // 配置表規則 shardingRuleConfig.getTableRuleConfigs(). addAll(Arrays.asList( getBUserRuleConfiguration() , getTSpRuleConfiguration() ) ); // 設定預設資料庫 shardingRuleConfig.setDefaultDataSourceName(dataSourceConfigBase.getDatasourceName1()); return ShardingDataSourceFactory.createDataSource(createDataSourceMap(), shardingRuleConfig, new Properties()); } /** * 獲取sqlSessionFactory例項 * @param shardingDataSource * @return * @throws Exception */ @Bean @Primary public SqlSessionFactory sqlSessionFactory(DataSource shardingDataSource) throws Exception { SqlSessionFactoryBean bean = new SqlSessionFactoryBean(); bean.setDataSource(shardingDataSource); bean.setMapperLocations(new PathMatchingResourcePatternResolver().getResources(dataSourceConfigBase.getMapperLocations())); return bean.getObject(); } @Bean @Primary public SqlSessionTemplate testSqlSessionTemplate(SqlSessionFactory sqlSessionFactory) { return new SqlSessionTemplate(sqlSessionFactory); } /** * 需要手動配置事務管理器 * @param shardingDataSource * @return */ @Bean public DataSourceTransactionManager transactitonManager(DataSource shardingDataSource) { return new DataSourceTransactionManager(shardingDataSource); } /** * 具體表進行分庫分表的規則配置 * @return */ private TableRuleConfiguration getBUserRuleConfiguration() { TableRuleConfiguration orderTableRuleConfig=new TableRuleConfiguration("b_user", dataSourceConfigBase.getUserActualDataNodes()); orderTableRuleConfig.setDatabaseShardingStrategyConfig( new StandardShardingStrategyConfiguration( dataSourceConfigBase.getDatabaseShardingColumnDefault(), databaseShardingAlgorithm ) ); return orderTableRuleConfig; } /** * 具體表進行分庫分表的規則配置 * @return */ private TableRuleConfiguration getTSpRuleConfiguration() { TableRuleConfiguration orderTableRuleConfig=new TableRuleConfiguration("t_sp", dataSourceConfigBase.getSpActualDataNodes()); orderTableRuleConfig.setDatabaseShardingStrategyConfig( new StandardShardingStrategyConfiguration( dataSourceConfigBase.getDatabaseShardingColumnDefault(), databaseShardingAlgorithm ) ); orderTableRuleConfig.setTableShardingStrategyConfig( new ComplexShardingStrategyConfiguration( dataSourceConfigBase.getTableSpShardingColumn(), tableShardingAlgorithmCom ) ); return orderTableRuleConfig; } @Bean("dataSource1") @ConfigurationProperties(prefix = "datasource.cloudtravel-consumer1") public DataSource druidDataSource1() { return new DruidDataSource(); } @Bean("dataSource2") @ConfigurationProperties(prefix = "datasource.cloudtravel-consumer2") public DataSource druidDataSource2() { return new DruidDataSource(); } /** * 多庫的連線配置 * @return */ private Map<String, DataSource> createDataSourceMap() { Map<String, DataSource> result = new HashMap<>(); result.put(dataSourceConfigBase.getDatasourceName1(),druidDataSource1()); result.put(dataSourceConfigBase.getDatasourceName2(), druidDataSource2()); return result; } }