shareJDBC+springboot 實現分庫分表
阿新 • • 發佈:2021-09-14
1.分別建立兩個資料庫:db0 和 db1
2.每個庫分別建立表:t_user0/t_user1
建表語句如下:兩個表格式一致
CREATE TABLE `t_user0` ( `id` bigint(20) NOT NULL, `name` varchar(64) DEFAULT NULL COMMENT '名稱', `sex` tinyint(1) DEFAULT NULL COMMENT '性別', `create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT'建立時間', PRIMARY KEY (`id`) USING BTREE ) ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=DYNAMIC; CREATE TABLE `t_user1` ( `id` bigint(20) NOT NULL, `name` varchar(64) DEFAULT NULL COMMENT '名稱', `sex` tinyint(1) DEFAULT NULL COMMENT '性別', `create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT'建立時間', PRIMARY KEY (`id`) USING BTREE ) ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=DYNAMIC;
3.建立是springboot專案如下:
4.pom依賴如下:
<properties> <java.version>1.8</java.version> <!-- 3.0.0版本分庫失效--> <sharding.jdbc.version>3.1.0</sharding.jdbc.version> <mybatis.version>1.3.0</mybatis.version> <druid.version>1.1.10</druid.version> <mysql.version>8.0.19</mysql.version> </properties> <dependencies> <dependency> <groupId>org.springframework.boot</groupId> <artifactId>spring-boot-starter-web</artifactId> </dependency> <dependency> <groupId>org.springframework.boot</groupId> <artifactId>spring-boot-starter-test</artifactId> <scope>test</scope> </dependency> <dependency> <groupId>mysql</groupId> <artifactId>mysql-connector-java</artifactId> </dependency> <dependency> <groupId>org.mybatis.spring.boot</groupId> <artifactId>mybatis-spring-boot-starter</artifactId> <version>${mybatis.version}</version> </dependency> <dependency> <groupId>com.alibaba</groupId> <artifactId>druid</artifactId> <version>${druid.version}</version> </dependency> <dependency> <groupId>io.shardingsphere</groupId> <artifactId>sharding-jdbc-spring-boot-starter</artifactId> </dependency> <dependency> <groupId>org.projectlombok</groupId> <artifactId>lombok</artifactId> <optional>true</optional> </dependency> </dependencies>
5.yml 設定
mybatis:
mapper-locations: classpath:mapper/*.xml
# 官網 https://shardingsphere.apache.org/document/current/cn/user-manual/shardingsphere-jdbc/usage/sharding/yaml/
sharding:
jdbc:
datasource:
# 資料來源ds0,ds1
names: ds0,ds1
ds0:
type: com.alibaba.druid.pool.DruidDataSource
driver-class-name: com.mysql.cj.jdbc.Driver
url: jdbc:mysql://127.0.0.1:3306/db1?useSSL=false&serverTimezone=UTC
username: root
password: root
ds1:
type: com.alibaba.druid.pool.DruidDataSource
driver-class-name: com.mysql.cj.jdbc.Driver
url: jdbc:mysql://127.0.0.1:3306/db0?useSSL=false&serverTimezone=UTC
username: root
password: root
config:
sharding:
props:
sql.show: true
tables:
t_user: #t_user表
key-generator-column-name: id #主鍵
actual-data-nodes: ds${0..1}.t_user${0..1} #資料節點,均勻分佈
# 分庫策略
databaseStrategy:
inline:
sharding-column: id
algorithm-expression: ds${id % 2} #按模運算分配
table-strategy: #分表策略
inline: #行表示式
sharding-column: sex
algorithm-expression: t_user${sex % 2} #按模運算分配
6.雪花演算法工具類
public class SnowflakeIdWorker { // ==============================Fields=========================================== /** * 開始時間截 (2015-01-01) */ private final long twepoch = 1420041600000L; /** * 機器id所佔的位數 */ private final long workerIdBits = 5L; /** * 資料標識id所佔的位數 */ private final long datacenterIdBits = 5L; /** * 支援的最大機器id,結果是31 (這個移位演算法可以很快的計算出幾位二進位制數所能表示的最大十進位制數) */ private final long maxWorkerId = -1L ^ (-1L << workerIdBits); /** * 支援的最大資料標識id,結果是31 */ private final long maxDatacenterId = -1L ^ (-1L << datacenterIdBits); /** * 序列在id中佔的位數 */ private final long sequenceBits = 12L; /** * 機器ID向左移12位 */ private final long workerIdShift = sequenceBits; /** * 資料標識id向左移17位(12+5) */ private final long datacenterIdShift = sequenceBits + workerIdBits; /** * 時間截向左移22位(5+5+12) */ private final long timestampLeftShift = sequenceBits + workerIdBits + datacenterIdBits; /** * 生成序列的掩碼,這裡為4095 (0b111111111111=0xfff=4095) */ private final long sequenceMask = -1L ^ (-1L << sequenceBits); /** * 工作機器ID(0~31) */ private long workerId; /** * 資料中心ID(0~31) */ private long datacenterId; /** * 毫秒內序列(0~4095) */ private long sequence = 0L; /** * 上次生成ID的時間截 */ private long lastTimestamp = -1L; //==============================Constructors===================================== /** * 建構函式 * * @param workerId 工作ID (0~31) * @param datacenterId 資料中心ID (0~31) */ public SnowflakeIdWorker(long workerId, long datacenterId) { if (workerId > maxWorkerId || workerId < 0) { throw new IllegalArgumentException(String.format("worker Id can't be greater than %d or less than 0", maxWorkerId)); } if (datacenterId > maxDatacenterId || datacenterId < 0) { throw new IllegalArgumentException(String.format("datacenter Id can't be greater than %d or less than 0", maxDatacenterId)); } this.workerId = workerId; this.datacenterId = datacenterId; } // ==============================Methods========================================== /** * 獲得下一個ID (該方法是執行緒安全的) * * @return SnowflakeId */ public synchronized long nextId() { long timestamp = timeGen(); //如果當前時間小於上一次ID生成的時間戳,說明系統時鐘回退過這個時候應當丟擲異常 if (timestamp < lastTimestamp) { throw new RuntimeException( String.format("Clock moved backwards. Refusing to generate id for %d milliseconds", lastTimestamp - timestamp)); } //如果是同一時間生成的,則進行毫秒內序列 if (lastTimestamp == timestamp) { sequence = (sequence + 1) & sequenceMask; //毫秒內序列溢位 if (sequence == 0) { //阻塞到下一個毫秒,獲得新的時間戳 timestamp = tilNextMillis(lastTimestamp); } } //時間戳改變,毫秒內序列重置 else { sequence = 0L; } //上次生成ID的時間截 lastTimestamp = timestamp; //移位並通過或運算拼到一起組成64位的ID return ((timestamp - twepoch) << timestampLeftShift) // | (datacenterId << datacenterIdShift) // | (workerId << workerIdShift) // | sequence; } /** * 阻塞到下一個毫秒,直到獲得新的時間戳 * * @param lastTimestamp 上次生成ID的時間截 * @return 當前時間戳 */ protected long tilNextMillis(long lastTimestamp) { long timestamp = timeGen(); while (timestamp <= lastTimestamp) { timestamp = timeGen(); } return timestamp; } /** * 返回以毫秒為單位的當前時間 * * @return 當前時間(毫秒) */ protected long timeGen() { return System.currentTimeMillis(); } //==============================Test============================================= }
7.實體類
@Data @AllArgsConstructor @NoArgsConstructor public class User { private Long id; private String name; private Date createTime; private Integer sex; }
8.dao層
@Mapper public interface UserMapper { /** * 儲存 */ void save(User user); /** * 查詢 * @param id * @return */ User get(Long id); }
9.service層
@Service public class UserService { @Autowired private UserMapper userMapper; public void save(User user){ this.userMapper.save(user); } public User get(Long id){ User user = this.userMapper.get(id); return user; } }
10.controller層
@RestController @RequestMapping("/user") public class UserController { @Autowired private UserService userService; @RequestMapping("/save") public String save() { SnowflakeIdWorker idWorker = new SnowflakeIdWorker(1, 3); for (int i = 1; i < 50; i++) { User user = new User(); long id = idWorker.nextId(); long a= Math.random() > 0.5 ? 1l : 2l; user.setId ( id+a); //此處只為方便顯示不同庫中不同表,不保證id不重複;僅僅為了更好的測試出效果 user.setName("test" + i); // 1 男 2 女 user.setSex(Math.random() > 0.5 ? 1 : 2); userService.save(user); } return "success"; } @RequestMapping("/get") public User get(Long id) { User user = this.userService.get(id); return user; } }
11.測試效果:
db0 資料庫:
t_user0表:
t_user1表:
db1 資料庫:
t_user0表:
t_user1表:
49條資料已經按照需求加入了兩個庫的四張表中