操作系統:CentOS 6 x86_64
MongoDB版本:3.4.3
集群主機拓撲:
| 主機 | mongo shardsvr & ReplSetName | mongo configsvr & ReplSetName | mongos |
| test1.lan | shard-a shard-b | ||
| test2.lan | shard-a shard-b | ||
| test3.lan | shard-a shard-b | ||
| test4.lan | cfgshard | ||
| test5.lan | cfgshard | ||
| test6.lan | cfgshard | ||
| test7.lan | yes |
test1-3 分別在一臺主機上啟動兩個不同副本集名稱的mongod實例。
test4-6 三臺主機作為 config server 單獨運行。
test7 主機作為 mongos 路由主機。

安裝 MongoDB
配置 repo 源
[mongodb-org-3.4] name=MongoDB Repository #baseurl=https://repo.mongodb.org/yum/redhat//mongodb-org/3.4/x86_64/ baseurl=https://mirrors.aliyun.com/mongodb/yum/redhat/$releasever/mongodb-org/3.4/x86_64/ gpgcheck=0 enabled=1 gpgkey=https://www.mongodb.org/static/pgp/server-3.4.asc
選擇國內 阿里云 鏡像資源。
# yum install mongodb-org -y
配置 /etc/mongod.conf
# mongod.conf # for documentation of all options, see: # http://docs.mongodb.org/manual/reference/configuration-options/ # where to write logging data. systemLog: destination: file logAppend: true path: /var/log/mongodb/mongod.log # Where and how to store data. storage: dbPath: /var/lib/mongo journal: enabled: true # engine: # mmapv1: # wiredTiger: # how the process runs processManagement: fork: true # fork and run in background pidFilePath: /var/run/mongodb/mongod.pid # location of pidfile # network interfaces net: port: 27017 bindIp: 0.0.0.0 # Listen to local interface only, comment to listen on all interfaces. #security: #operationProfiling: replication: replSetName: shard-a sharding: clusterRole: shardsvr ## Enterprise-Only Options #auditLog: #snmp:
replication 處配置 副本集 名,sharding 開啟 shardsvr 模式。
啟動 mongod 服務
[root@test1 ~]# service mongod start Starting mongod: [ OK ] [root@test2 ~]# service mongod start Starting mongod: [ OK ] [root@test3 ~]# service mongod start Starting mongod: [ OK ]
配置 shard-a 副本集
[root@test1 ~]# mongo test1.lan:27017
MongoDB shell version v3.4.3
connecting to: test1.lan:27017
MongoDB server version: 3.4.3
Server has startup warnings:
2017-04-24T22:46:19.703+0800 I STORAGE [initandlisten]
2017-04-24T22:46:19.703+0800 I STORAGE [initandlisten] ** WARNING: Using the XFS filesystem is strongly recommended with the WiredTiger storage engine
2017-04-24T22:46:19.703+0800 I STORAGE [initandlisten] ** See http://dochub.mongodb.org/core/prodnotes-filesystem
2017-04-24T22:46:20.321+0800 I CONTROL [initandlisten]
> rs.initiate()
{
"info2" : "no configuration specified. Using a default configuration for the set",
"me" : "test1.lan:27017",
"ok" : 1
}
shard-a:SECONDARY>
shard-a:PRIMARY> config = rs.config() # 保存配置對象
{
"_id" : "shard-a",
"version" : 1,
"protocolVersion" : NumberLong(1),
"members" : [
{
"_id" : 0,
"host" : "test1.lan:27017",
"arbiterOnly" : false,
"buildIndexes" : true,
"hidden" : false,
"priority" : 1,
"tags" : {
},
"slaveDelay" : NumberLong(0),
"votes" : 1
}
],
"settings" : {
"chainingAllowed" : true,
"heartbeatIntervalMillis" : 2000,
"heartbeatTimeoutSecs" : 10,
"electionTimeoutMillis" : 10000,
"catchUpTimeoutMillis" : 2000,
"getLastErrorModes" : {
},
"getLastErrorDefaults" : {
"w" : 1,
"wtimeout" : 0
},
"replicaSetId" : ObjectId("58fe111823612a418eb7f3fc")
}
}
shard-a:PRIMARY> config.members[0].priority = 2 # 這里增加自身主機的優先級為 2,防止后面 PRIMARY 重新選舉到其余主機
2
shard-a:PRIMARY> rs.reconfig(config) # 重新應用該配置
{ "ok" : 1 }
shard-a:PRIMARY> rs.add("test2.lan:27017") # 添加副本集主機
{ "ok" : 1 }
shard-a:PRIMARY> rs.add("test3.lan") # 添加副本集主機(默認端口為 27017)
{ "ok" : 1 }
shard-a:PRIMARY> rs.config()
{
"_id" : "shard-a",
"version" : 4,
"protocolVersion" : NumberLong(1),
"members" : [
{
"_id" : 0,
"host" : "test1.lan:27017",
"arbiterOnly" : false,
"buildIndexes" : true,
"hidden" : false,
"priority" : 2,
"tags" : {
},
"slaveDelay" : NumberLong(0),
"votes" : 1
},
{
"_id" : 1,
"host" : "test2.lan:27017",
"arbiterOnly" : false,
"buildIndexes" : true,
"hidden" : false,
"priority" : 1,
"tags" : {
},
"slaveDelay" : NumberLong(0),
"votes" : 1
},
{
"_id" : 2,
"host" : "test3.lan:27017",
"arbiterOnly" : false,
"buildIndexes" : true,
"hidden" : false,
"priority" : 1,
"tags" : {
},
"slaveDelay" : NumberLong(0),
"votes" : 1
}
],
"settings" : {
"chainingAllowed" : true,
"heartbeatIntervalMillis" : 2000,
"heartbeatTimeoutSecs" : 10,
"electionTimeoutMillis" : 10000,
"catchUpTimeoutMillis" : 2000,
"getLastErrorModes" : {
},
"getLastErrorDefaults" : {
"w" : 1,
"wtimeout" : 0
},
"replicaSetId" : ObjectId("58fe111823612a418eb7f3fc")
}
}這樣,副本集 shard-a 就配置完成
接下來我們啟動并配置副本集 shard-b
[root@test1 ~]# mkdir /var/lib/mongo2 [root@test1 ~]# mongod --shardsvr --replSet shard-b --dbpath /var/lib/mongo2/ --port 37017 --logpath /var/log/mongodb/mongod2.log --fork --journal about to fork child process, waiting until server is ready for connections. forked process: 14323 child process started successfully, parent exiting [root@test2 ~]# mkdir /var/lib/mongo2 [root@test2 ~]# mongod --shardsvr --replSet shard-b --dbpath /var/lib/mongo2/ --port 37017 --logpath /var/log/mongodb/mongod2.log --fork --journal about to fork child process, waiting until server is ready for connections. forked process: 5623 child process started successfully, parent exiting [root@test3 ~]# mkdir /var/lib/mongo2 [root@test3 ~]# mongod --shardsvr --replSet shard-b --dbpath /var/lib/mongo2/ --port 37017 --logpath /var/log/mongodb/mongod2.log --fork --journal about to fork child process, waiting until server is ready for connections. forked process: 4303 child process started successfully, parent exiting
配置 shard-b 副本集
[root@test1 ~]# mongo test1.lan:37017
MongoDB shell version v3.4.3
connecting to: test1.lan:37017
MongoDB server version: 3.4.3
Server has startup warnings:
2017-04-24T22:59:43.476+0800 I STORAGE [initandlisten]
2017-04-24T22:59:43.476+0800 I STORAGE [initandlisten] ** WARNING: Using the XFS filesystem is strongly recommended with the WiredTiger storage engine
2017-04-24T22:59:43.476+0800 I STORAGE [initandlisten] ** See http://dochub.mongodb.org/core/prodnotes-filesystem
2017-04-24T22:59:44.019+0800 I CONTROL [initandlisten]
> rs.initiate()
{
"info2" : "no configuration specified. Using a default configuration for the set",
"me" : "test1.lan:37017",
"ok" : 1
}
shard-b:SECONDARY>
shard-b:PRIMARY> config = rs.config()
{
"_id" : "shard-b",
"version" : 1,
"protocolVersion" : NumberLong(1),
"members" : [
{
"_id" : 0,
"host" : "test1.lan:37017",
"arbiterOnly" : false,
"buildIndexes" : true,
"hidden" : false,
"priority" : 1,
"tags" : {
},
"slaveDelay" : NumberLong(0),
"votes" : 1
}
],
"settings" : {
"chainingAllowed" : true,
"heartbeatIntervalMillis" : 2000,
"heartbeatTimeoutSecs" : 10,
"electionTimeoutMillis" : 10000,
"catchUpTimeoutMillis" : 2000,
"getLastErrorModes" : {
},
"getLastErrorDefaults" : {
"w" : 1,
"wtimeout" : 0
},
"replicaSetId" : ObjectId("58fe1465f7a2e985d87b8bf8")
}
}
shard-b:PRIMARY> config.members[0].priority = 2
2
shard-b:PRIMARY> rs.reconfig(config)
{ "ok" : 1 }
shard-b:PRIMARY> rs.add("test2.lan:37017")
{ "ok" : 1 }
shard-b:PRIMARY> rs.add("test3.lan:37017")
{ "ok" : 1 }
shard-b:PRIMARY> rs.isMaster()
{
"hosts" : [
"test1.lan:37017",
"test2.lan:37017",
"test3.lan:37017"
],
"setName" : "shard-b",
"setVersion" : 4,
"ismaster" : true,
"secondary" : false,
"primary" : "test1.lan:37017",
"me" : "test1.lan:37017",
"electionId" : ObjectId("7fffffff0000000000000001"),
"lastWrite" : {
"opTime" : {
"ts" : Timestamp(1493046429, 1),
"t" : NumberLong(1)
},
"lastWriteDate" : ISODate("2017-04-24T15:07:09Z")
},
"maxBsonObjectSize" : 16777216,
"maxMessageSizeBytes" : 48000000,
"maxWriteBatchSize" : 1000,
"localTime" : ISODate("2017-04-24T15:07:24.475Z"),
"maxWireVersion" : 5,
"minWireVersion" : 0,
"readOnly" : false,
"ok" : 1
}這樣 shard-a shard-b 兩個副本集已經配置完成
開始配置 config server,MongoDB 從3.2版本之后開始規定 config server 也必須要開啟副本集功能。
config server 的配置文件如下:config server 一般情況下是監聽在 27019 端口
# mongod.conf # for documentation of all options, see: # http://docs.mongodb.org/manual/reference/configuration-options/ # where to write logging data. systemLog: destination: file logAppend: true path: /var/log/mongodb/mongod.log # Where and how to store data. storage: dbPath: /var/lib/mongo journal: enabled: true # engine: # mmapv1: # wiredTiger: # how the process runs processManagement: fork: true # fork and run in background pidFilePath: /var/run/mongodb/mongod.pid # location of pidfile # network interfaces net: port: 27019 bindIp: 0.0.0.0 # Listen to local interface only, comment to listen on all interfaces. #security: #operationProfiling: replication: replSetName: cfgReplSet sharding: clusterRole: configsvr ## Enterprise-Only Options #auditLog: #snmp:
啟動三臺config server 的mongod 服務
[root@test4 ~]# service mongod start Starting mongod: [ OK ] [root@test5 ~]# service mongod start Starting mongod: [ OK ] [root@test6 ~]# service mongod start Starting mongod: [ OK ]
同樣,config server 的副本集配置如上文所示,這里的代碼就省略了。
配置啟動 mongos 路由主機
[root@test7 ~]# mongos --configdb cfgReplSet/test4.lan,test5.lan,test6.lan:27019 --logpath /var/log/mongodb/mongos.log --fork --port 30000 about to fork child process, waiting until server is ready for connections. forked process: 3338 child process started successfully, parent exiting
MongoDB 版本 >3.2 啟動mongos 的時候,需要跟上 config server 的副本集名稱 (這里是 cfgReplSet)
連接 mongos 測試
[root@test7 ~]# mongo test7.lan:30000 MongoDB shell version v3.4.4 connecting to: test7.lan:30000 MongoDB server version: 3.4.4 Server has startup warnings: 2017-04-24T23:30:47.285+0800 I CONTROL [main] 2017-04-24T23:30:47.285+0800 I CONTROL [main] ** WARNING: Access control is not enabled for the database. 2017-04-24T23:30:47.285+0800 I CONTROL [main] ** Read and write access to data and configuration is unrestricted. 2017-04-24T23:30:47.285+0800 I CONTROL [main] ** WARNING: You are running this process as the root user, which is not recommended. 2017-04-24T23:30:47.285+0800 I CONTROL [main] mongos> show dbs admin 0.000GB config 0.000GB mongos> use config switched to db config mongos> show collections chunks lockpings locks migrations mongos shards tags version mongos> db.shards.find() # 這里沒有返回文檔,也說明分片集群中并沒有添加可用分片集群。
配置分片集群:shard
mongos> sh.addShard("shard-a/test1.lan,test2.lan,test3.lan")
{ "shardAdded" : "shard-a", "ok" : 1 }
mongos> db.shards.find()
{ "_id" : "shard-a", "host" : "shard-a/test1.lan:27017,test2.lan:27017,test3.lan:27017", "state" : 1 }
mongos> sh.addShard("shard-b/test1.lan:37017,test2.lan:37017,test3.lan:37017")
{ "shardAdded" : "shard-b", "ok" : 1 }
mongos> db.shards.find()
{ "_id" : "shard-a", "host" : "shard-a/test1.lan:27017,test2.lan:27017,test3.lan:27017", "state" : 1 }
{ "_id" : "shard-b", "host" : "shard-b/test1.lan:37017,test2.lan:37017,test3.lan:37017", "state" : 1 }
# 檢查分片集群的分片副本集數量,方法一
mongos> db.getSiblingDB('config').shards.find()
{ "_id" : "shard-a", "host" : "shard-a/test1.lan:27017,test2.lan:27017,test3.lan:27017", "state" : 1 }
{ "_id" : "shard-b", "host" : "shard-b/test1.lan:37017,test2.lan:37017,test3.lan:37017", "state" : 1 }
# 檢查分片集群的分片副本集數量,方法二
mongos> use admin
switched to db admin
mongos> db.runCommand({listshards: 1})
{
"shards" : [
{
"_id" : "shard-a",
"host" : "shard-a/test1.lan:27017,test2.lan:27017,test3.lan:27017",
"state" : 1
},
{
"_id" : "shard-b",
"host" : "shard-b/test1.lan:37017,test2.lan:37017,test3.lan:37017",
"state" : 1
}
],
"ok" : 1
}
# 檢查分片集群的分片副本集數量,方法三配置分片集合:接下來的步驟就是在數據庫上啟動分片。分片不會自動完成,而是需要在數據庫里提前為集合做好設置才行。
mongos> sh.enableSharding("test2_db") # 該庫可以是已存在的,也可以是暫不存在的
{ "ok" : 1 }
mongos> db.getSiblingDB("config").databases.find()
{ "_id" : "test2_db", "primary" : "shard-a", "partitioned" : true }
# sharding 分片庫的配置庫 databases 集合已經有相應的配置記錄了。
mongos> sh.status()
--- Sharding Status ---
sharding version: {
"_id" : 1,
"minCompatibleVersion" : 5,
"currentVersion" : 6,
"clusterId" : ObjectId("58fe17e90b3df66581ff6b09")
}
shards:
{ "_id" : "shard-a", "host" : "shard-a/test1.lan:27017,test2.lan:27017,test3.lan:27017", "state" : 1 }
{ "_id" : "shard-b", "host" : "shard-b/test1.lan:37017,test2.lan:37017,test3.lan:37017", "state" : 1 }
active mongoses:
"3.4.4" : 1
autosplit:
Currently enabled: yes
balancer:
Currently enabled: yes
Currently running: no
Balancer lock taken at Mon Apr 24 2017 23:21:13 GMT+0800 (CST) by ConfigServer:Balancer
Failed balancer rounds in last 5 attempts: 0
Migration Results for the last 24 hours:
No recent migrations
databases:
{ "_id" : "test2_db", "primary" : "shard-a", "partitioned" : true }
# 分片狀態中更能方便的查看當前分片的狀態信息,包括分片集群成員 以及 分片數據庫,分片機制等。
mongos> sh.shardCollection("test2_db.users", {username: 1, _id: 1})
{ "collectionsharded" : "test2_db.users", "ok" : 1 }
# 此處,我們選擇 username _id 作為組合分片鍵,組合分片鍵必須是一個索引
# 如果集合為空,那么該行命令會自動在集合中創建該索引,如果集合已存在對應數據,且該組合鍵的索引沒有事先創建好,那么這條語句就會拋出錯誤
# 需要手動到集合創建該組合的索引,之后才能作為分片鍵
mongos> db.getSiblingDB("config").collections.find().pretty()
{
"_id" : "test2_db.users",
"lastmodEpoch" : ObjectId("58fe21de224dc86230e9a8f7"),
"lastmod" : ISODate("1970-02-19T17:02:47.296Z"),
"dropped" : false,
"key" : {
"username" : 1,
"_id" : 1
},
"unique" : false
}
# 配置完成后,config.collections 就存在了相應集合的分片鍵信息。來看看分片集合在單獨分片副本集中的存在形式
首先需要找到該庫已經被分配到了哪個分片之上(由于該庫之前并沒有數據,所以創建分片鍵的時候,會自動插入索引數據,自動按照默認配置路由到其中一個分片鍵集群之中)
mongos> sh.status()
--- Sharding Status ---
sharding version: {
"_id" : 1,
"minCompatibleVersion" : 5,
"currentVersion" : 6,
"clusterId" : ObjectId("58fe17e90b3df66581ff6b09")
}
shards:
{ "_id" : "shard-a", "host" : "shard-a/test1.lan:27017,test2.lan:27017,test3.lan:27017", "state" : 1 }
{ "_id" : "shard-b", "host" : "shard-b/test1.lan:37017,test2.lan:37017,test3.lan:37017", "state" : 1 }
active mongoses:
"3.4.4" : 1
autosplit:
Currently enabled: yes
balancer:
Currently enabled: yes
Currently running: no
Balancer lock taken at Mon Apr 24 2017 23:21:13 GMT+0800 (CST) by ConfigServer:Balancer
Failed balancer rounds in last 5 attempts: 0
Migration Results for the last 24 hours:
No recent migrations
databases:
{ "_id" : "test2_db", "primary" : "shard-a", "partitioned" : true }
test2_db.users
shard key: { "username" : 1, "_id" : 1 }
unique: false
balancing: true
chunks:
shard-a 1
{ "username" : { "$minKey" : 1 }, "_id" : { "$minKey" : 1 } } -->> { "username" : { "$maxKey" : 1 }, "_id" : { "$maxKey" : 1 } } on : shard-a Timestamp(1, 0)
# 最后這行 databases 看到了,該庫的該數據塊(chunks) 被分配到了 shard-a 副本集中,那么我們接下來就可以直接到 shard-a 中查看該庫中users集合的文檔信息。
# 登錄到 shard-a 副本集中進行查看
shard-a:PRIMARY> show dbs
admin 0.000GB
local 0.000GB
test2_db 0.000GB
shard-a:PRIMARY> use test2_db
switched to db test2_db
shard-a:PRIMARY> db.users.find() # 該集合暫時沒有文檔
shard-a:PRIMARY> db.users.getIndexes() # 查看該集合的索引配置信息
[
{
"v" : 2,
"key" : {
"_id" : 1
},
"name" : "_id_",
"ns" : "test2_db.users"
},
{
"v" : 2,
"key" : {
"username" : 1,
"_id" : 1
},
"name" : "username_1__id_1",
"ns" : "test2_db.users"
} # 查看到了兩個索引,第一個索引 _id 為系統默認添加的索引,第二個索引就是創建分片鍵的時候自動創建的組合鍵索引
]寫入數據到分片集群
# 首先創建一個數據對象,用來填充文檔大小
mongos> data = new Array(2049).join("abcd ")
mongos> data.length
10240
# data 大小為 1MB
mongos> for (var i=0; i < 100; i++){
... db.getSiblingDB("test2_db").users.insert({
... username: "Join" + i,
... age: i % 13 + 20,
... data: data }
... )
... }
WriteResult({ "nInserted" : 1 })
# 批量插入 100 條文檔,每個文檔約為 1MB 大小。
# 接下來看看有了這么多文檔過后,會怎么分片。
mongos> db.getSiblingDB("config").chunks.count()
3
# 插入這么多數據以后,就會發現多了幾個數據塊。我們可以通過檢查集合中的數據庫的數量來驗證這個猜想
mongos> db.getSiblingDB("config").chunks.find().pretty()
{
"_id" : "test2_db.users-username_MinKey_id_MinKey",
"lastmod" : Timestamp(2, 1),
"lastmodEpoch" : ObjectId("58fe21de224dc86230e9a8f7"),
"ns" : "test2_db.users",
"min" : {
"username" : { "$minKey" : 1 },
"_id" : { "$minKey" : 1 }
},
"max" : {
"username" : "Join1",
"_id" : ObjectId("58fe293756525c8a54e2a5af")
},
"shard" : "shard-a"
}
{
"_id" : "test2_db.users-username_\"Join1\"_id_ObjectId('58fe293756525c8a54e2a5af')",
"lastmod" : Timestamp(1, 2),
"lastmodEpoch" : ObjectId("58fe21de224dc86230e9a8f7"),
"ns" : "test2_db.users",
"min" : {
"username" : "Join1",
"_id" : ObjectId("58fe293756525c8a54e2a5af")
},
"max" : {
"username" : "Join2",
"_id" : ObjectId("58fe293756525c8a54e2a5b0")
},
"shard" : "shard-a"
}
{
"_id" : "test2_db.users-username_\"Join2\"_id_ObjectId('58fe293756525c8a54e2a5b0')",
"lastmod" : Timestamp(2, 0),
"lastmodEpoch" : ObjectId("58fe21de224dc86230e9a8f7"),
"ns" : "test2_db.users",
"min" : {
"username" : "Join2",
"_id" : ObjectId("58fe293756525c8a54e2a5b0")
},
"max" : {
"username" : { "$maxKey" : 1 },
"_id" : { "$maxKey" : 1 }
},
"shard" : "shard-b"
}
# 查看每個數據塊的詳細分片信息,發現有兩個塊被存儲在 shard-a 副本集中,還有一個數據塊被存儲在 shard-b 副本集中
# 我們也可以通過 sh.status() 來更直觀的看到相關信息。
mongos> sh.status()
--- Sharding Status ---
sharding version: {
"_id" : 1,
"minCompatibleVersion" : 5,
"currentVersion" : 6,
"clusterId" : ObjectId("58fe17e90b3df66581ff6b09")
}
shards:
{ "_id" : "shard-a", "host" : "shard-a/test1.lan:27017,test2.lan:27017,test3.lan:27017", "state" : 1 }
{ "_id" : "shard-b", "host" : "shard-b/test1.lan:37017,test2.lan:37017,test3.lan:37017", "state" : 1 }
active mongoses:
"3.4.4" : 1
autosplit:
Currently enabled: yes
balancer:
Currently enabled: yes
Currently running: no
Balancer lock taken at Mon Apr 24 2017 23:21:13 GMT+0800 (CST) by ConfigServer:Balancer
Failed balancer rounds in last 5 attempts: 0
Migration Results for the last 24 hours:
1 : Success
databases:
{ "_id" : "test2_db", "primary" : "shard-a", "partitioned" : true }
test2_db.users
shard key: { "username" : 1, "_id" : 1 }
unique: false
balancing: true
chunks:
shard-a 2
shard-b 1
{ "username" : { "$minKey" : 1 }, "_id" : { "$minKey" : 1 } } -->> { "username" : "Join1", "_id" : ObjectId("58fe293756525c8a54e2a5af") } on : shard-a Timestamp(2, 1)
{ "username" : "Join1", "_id" : ObjectId("58fe293756525c8a54e2a5af") } -->> { "username" : "Join2", "_id" : ObjectId("58fe293756525c8a54e2a5b0") } on : shard-a Timestamp(1, 2)
{ "username" : "Join2", "_id" : ObjectId("58fe293756525c8a54e2a5b0") } -->> { "username" : { "$maxKey" : 1 }, "_id" : { "$maxKey" : 1 } } on : shard-b Timestamp(2, 0)
# 這個方法會打印所有的數據庫信息,并且包含范圍信息。表象背后,MongoDB 底層依賴 2 個機制來保持集群的平衡:分割與遷移。
分割是把一個大的數據庫分割為 2 個更小的數據塊的過程。它只會在數據塊大小超過最大限制的時候才會發生,目前的默認設置是 64MB。分割是必須的,因為數據塊太大就難以在整個集合中分布。
遷移就是在分片之間移動數據塊的過程。當某些分片服務器包含的數據塊數量大大超過其他分片服務器就會觸發遷移過程,這個觸發器叫做遷移回合(migration round)。在一個遷移回合中,數據塊從某些分片服務器遷移到其他分片服務器,直到集群看起來相對平衡為止。我們可以想象一下這兩個操作,遷移比分割昂貴得多。
實際上,這些操作不應該影響我們,但是明白這一點非常有用,當遇到性能問題的時候就要想到可能它們正在遷移數據。如果插入的數據分布均勻,各個分片上的數據集應該差不多以相同的速度增長,則遷移應該不會頻繁發生。
免責聲明:本站發布的內容(圖片、視頻和文字)以原創、轉載和分享為主,文章觀點不代表本網站立場,如果涉及侵權請聯系站長郵箱:is@yisu.com進行舉報,并提供相關證據,一經查實,將立刻刪除涉嫌侵權內容。