一、副本集

1.创建多实例目录

2.配置多实例

replication:
  #类似于binlog,指定大小
  oplogSizeMB: 1024
  #副本集的名称,集群名称
  replSetName: dba

3.启动副本集

4.连接多实例

[mongo@redis03 ~]$ mongo 10.0.0.93:28017
[mongo@redis03 ~]$ mongo 10.0.0.93:28018
[mongo@redis03 ~]$ mongo 10.0.0.93:28019

5.初始化副本集

6.查看集群状态

rs.status()

7.测试主库创建数据

#主库插入数据
dba:PRIMARY> db.testtable.insertMany([{name:"gcc",tedian:"naocan"},{name:"qiudao",tedian:"tongshang"}])

#从库查查看数据,报错,因为从库不提供任何得读写
dba:SECONDARY> show dbs
2020-05-29T09:05:39.306+0800 E QUERY    [thread1] Error: listDatabases failed:{
    "operationTime" : Timestamp(1590714335, 1),
    "ok" : 0,
    "errmsg" : "not master and slaveOk=false",
    "code" : 13435,
    "codeName" : "NotMasterNoSlaveOk",
    "$clusterTime" : {
        "clusterTime" : Timestamp(1590714335, 1),
        "signature" : {
            "hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
            "keyId" : NumberLong(0)
        }
    }
} :
_getErrorWithCode@src/mongo/shell/utils.js:25:13
Mongo.prototype.getDBs@src/mongo/shell/mongo.js:67:1
shellHelper.show@src/mongo/shell/utils.js:860:19
shellHelper@src/mongo/shell/utils.js:750:15
@(shellhelp2):1:1

#如果我们想操作
dba:SECONDARY> rs.slaveOk()
dba:SECONDARY> show dbs
admin   0.000GB
config  0.000GB
local   0.000GB
test    0.000GB

#调用方法解决
[mongo@redis03 ~]$ vim .mongorc.js
rs.slaveOk()

#从库能看到主库间的数据
dba:SECONDARY> use test
switched to db test
dba:SECONDARY> show tables
testtable
dba:SECONDARY> db.testtable.find()
{ "_id" : ObjectId("5ed05e10310f1946a670fa57"), "name" : "gcc", "tedian" : "naocan" }
{ "_id" : ObjectId("5ed05e60310f1946a670fa59"), "name" : "qiudao", "tedian" : "tongshang" }

二、副本集自动切换主库

1.故障切换测试

#停掉主库
[mongo@redis03 ~]$ mongo localhost:28018
dba:PRIMARY> use admin
dba:PRIMARY> db.shutdownServer()

查看其它从库,会有一台变成主库

#恢复停掉得库,他会自己判断谁是主库,自动成为新的从库

#注意:三个节点得时候,只能坏一台节点

2.指定主库优先级

#查看优先级
dba:PRIMARY> rs.conf()
            #优先级
            "priority" : 1,

#临时设置配置文件
dba:PRIMARY> config=rs.conf()
#修改临时配置文件 id为0 的priority值为10
dba:PRIMARY> config.members[0].priority=10
10
#使临时配置文件永久生效
dba:PRIMARY> rs.reconfig(config)
{
    "ok" : 1,
    "operationTime" : Timestamp(1590716891, 1),
    "$clusterTime" : {
        "clusterTime" : Timestamp(1590716891, 1),
        "signature" : {
            "hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
            "keyId" : NumberLong(0)
        }
    }
}

#配置完优先级,主库直接进行切换,切换到优先级最高的节点

#手动降级主库
dba:PRIMARY> rs.stepDown()
dba:SECONDARY>

#恢复权重(优先级设置只是为了让我们指定的库变成主库,实现后一定要恢复优先级)
dba:PRIMARY> config=rs.conf()
dba:PRIMARY> config.members[0].priority=1
dba:PRIMARY> rs.reconfig(config)
dba:PRIMARY> rs.config()

三、扩容与删减节点

1.c创建一个新节点

#创建节点目录
[root@redis03 ~]# mkdir /server/mongodb/28016/{conf,data,logs,pid} -p

#配置节点
[root@redis03 ~]# cd /server/mongodb/
[root@redis03 mongodb]# cp 28017/conf/mongo.conf 28016/conf/
[root@redis03 mongodb]# sed -i 's#28017#28016#g' 28016/conf/mongo.conf

#启动新节点
[root@redis03 ~]# chown -R mongo.mongo /server/mongodb/
[root@redis03 ~]# su - mongo
Last login: Fri May 29 08:45:16 CST 2020 on pts/5
[mongo@redis03 ~]$ mongod -f /server/mongodb/28016/conf/mongo.conf
[mongo@redis03 ~]$ mongo 10.0.0.93:28016

2.将新节点加入集群

#主库操作
dba:PRIMARY> rs.add("10.0.0.93:28016")
{
    "ok" : 1,
    "operationTime" : Timestamp(1590719303, 1),
    "$clusterTime" : {
        "clusterTime" : Timestamp(1590719303, 1),
        "signature" : {
            "hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
            "keyId" : NumberLong(0)
        }
    }
}

#查看集群状态
dba:PRIMARY> rs.status()

3.删除节点

#主库操作
dba:PRIMARY> rs.remove("10.0.0.93:28016")
{
    "ok" : 1,
    "operationTime" : Timestamp(1590719822, 2),
    "$clusterTime" : {
        "clusterTime" : Timestamp(1590719822, 2),
        "signature" : {
            "hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
            "keyId" : NumberLong(0)
        }
    }
}

#查看集群状态
dba:PRIMARY> rs.status()

4.添加仲裁节点

#主库操作
dba:PRIMARY> rs.addArb("10.0.0.93:28015")
{
    "ok" : 1,
    "operationTime" : Timestamp(1590720548, 1),
    "$clusterTime" : {
        "clusterTime" : Timestamp(1590720548, 1),
        "signature" : {
            "hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
            "keyId" : NumberLong(0)
        }
    }
}

#查看集群状态
dba:PRIMARY> rs.status()

四、数据的备份与恢复

0.连接mongodb

[mongo@redis03 ~]$ mongo -uadmin -p123456 --host 10.0.0.93 --port 27017

-u:指明MongoDB的用户名
-p:指明MongoDB的密码
--host:指定主机IP
--port:指定端口

#指点关联库连接
[mongo@redis03 ~]$ mongo -uadmin -p123456 10.0.0.93:27017 --authenticationDatabase admin
[mongo@redis03 ~]$ mongo -uadmin -p123456 10.0.0.93:27017/admin

1.备份工具

1.mongoexport和mongoimport
2.mongodump和mongorestore

2.导出工具 mongoexport

1)常用参数

#常用参数
-h:指明MongoDB宿主机的IP
-u:指明MongoDB的用户名
-p:指明MongoDB的密码
-d:指明库的名字
-c:指明集合的名字
-f:指明要导出那些列
-o:指明到要导出的文件名
-q:指明导出数据的过滤条件
--port:指定端口

2)备份数据为json格式

#备份数据为json格式
[mongo@redis03 ~]$ mongoexport -uadmin -p123456 --port 27017 --authenticationDatabase admin -d write -c writetable -o /tmp/write.json
2020-05-29T11:05:04.316+0800    connected to: localhost:27017
2020-05-29T11:05:04.317+0800    exported 6 records

#查看数据
[mongo@redis03 ~]$ cat /tmp/write.json 
{"_id":{"$oid":"5ecf2a7e36b4570646352f65"},"name":"lhd","age":18.0}
{"_id":{"$oid":"5ecf2a7f36b4570646352f66"},"name":"lhd","age":18.0}
{"_id":{"$oid":"5ecf2a7f36b4570646352f67"},"name":"lhd","age":18.0}
{"_id":{"$oid":"5ecf2a8036b4570646352f68"},"name":"lhd","age":18.0}
{"_id":{"$oid":"5ecf2a8136b4570646352f69"},"name":"lhd","age":18.0}
{"_id":{"$oid":"5ecf2b5db531b98a91919504"},"name":"lhd","age":18.0}

3)备份成csv格式

[mongo@redis03 ~]$ mongoexport -uadmin -p123456 --port 27017 --authenticationDatabase admin --type=csv -f name,age -d write -c writetable -o /tmp/write.csv
2020-05-29T11:16:37.532+0800    connected to: localhost:27017
2020-05-29T11:16:37.533+0800    exported 6 records

#注意:导出csv格式数据时,-f 必须使用

#查看数据
[mongo@redis03 ~]$ cat /tmp/write.csv 
name,age
lhd,18
lhd,18
lhd,18
lhd,18
lhd,18
lhd,18

3.导入工具 mongoimport

1)导入json格式数据

#删除数据
> use write
switched to db write
> show tables
writetable
> db.writetable.drop()
true

#导入数据
[mongo@redis03 ~]$ mongoimport -uadmin -p123456 --authenticationDatabase admin --port 27017 -d write -c writetable /tmp/write.json 
2020-05-29T11:26:30.276+0800    connected to: localhost:27017
2020-05-29T11:26:30.298+0800    imported 6 documents

#查看数据
> use write
switched to db write
> show tables
writetable

2)导入csv格式数据

#删除数据

#导入数据
[mongo@redis03 tmp]$ mongoimport -uadmin -p123456 --authenticationDatabase admin --port 27017 -d write -c writetable --type=csv --headerline /tmp/write.csv
2020-05-29T11:30:45.869+0800    connected to: localhost:27017
2020-05-29T11:30:45.881+0800    imported 14 documents

#查看数据
> use write
switched to db write
> show tables
writetable
> db.writetable.find()

五、生产案例:数据库数据迁移到mongodb

1.搭建数据库

2.导入数据

3.配置数据库

[root@redis04 ~]# vim /etc/my.cnf

[mysqld]
basedir=/usr/local/mysql
datadir=/usr/local/mysql/data
secure-file-priv=/tmp

[root@redis04 ~]# systemctl restart mysql

4.将数据库数据导出成CSV格式

mysql> select * from world.city into outfile '/tmp/city1.csv' fields terminated by ',';
Query OK, 4079 rows affected (0.00 sec)

5.手动处理一下导出的csv文件

[root@redis04 ~]# vim /tmp/city1.csv 
ID,Name,CountryCode,District,Population
1,Kabul,AFG,Kabol,1780000

6.将数据导入mongo

[root@redis04 ~]# scp /tmp/city1.csv 172.16.1.93:/tmp/

[root@redis03 ~]# mongoimport -uadmin -p123456 --authenticationDatabase admin --port 27017 -d world -c city --type=csv --headerline /tmp/city1.csv
2020-05-29T12:13:35.575+0800    connected to: localhost:27017
2020-05-29T12:13:35.631+0800    imported 4079 documents

#查看数据

六、mongoDB误删除数据恢复

1.背景

公司每天凌晨1点备份mongodb数据
第二天早上10点误删除了核心库
恢复数据

#使用mongodump和mongorestore工具

2.模拟全备数据

#连接副本集的主库
[mongo@redis03 ~]$ mongo localhost:28019
dba:PRIMARY> use backup
dba:PRIMARY> db.backuptable.insertMany([{id:1},{id:2},{id:3}])
{
    "acknowledged" : true,
    "insertedIds" : [
        ObjectId("5ed08fdb450c3013c75b4eca"),
        ObjectId("5ed08fdb450c3013c75b4ecb"),
        ObjectId("5ed08fdb450c3013c75b4ecc")
    ]
}

dba:PRIMARY> db.backuptable.find()
{ "_id" : ObjectId("5ed08fdb450c3013c75b4eca"), "id" : 1 }
{ "_id" : ObjectId("5ed08fdb450c3013c75b4ecb"), "id" : 2 }
{ "_id" : ObjectId("5ed08fdb450c3013c75b4ecc"), "id" : 3 }

3.全备数据

[mongo@redis03 ~]$ mongodump --host 10.0.0.93 --port 28019 --oplog -o /tmp/
2020-05-29T12:33:31.042+0800    writing admin.system.version to 
2020-05-29T12:33:31.043+0800    done dumping admin.system.version (1 document)
2020-05-29T12:33:31.044+0800    writing test.testtable to 
2020-05-29T12:33:31.044+0800    writing backup.backuptable to 
2020-05-29T12:33:31.047+0800    done dumping test.testtable (5 documents)
2020-05-29T12:33:31.047+0800    done dumping backup.backuptable (3 documents)
2020-05-29T12:33:31.048+0800    writing captured oplog to 
2020-05-29T12:33:31.049+0800        dumped 1 oplog entry

[mongo@redis03 ~]$ ll /tmp/oplog.bson 
-rw-rw-r-- 1 mongo mongo 110 May 29 12:33 /tmp/oplog.bson

4.模拟新增数据

#连接副本集的主库
[mongo@redis03 ~]$ mongo localhost:28019
dba:PRIMARY> use backup
switched to db backup
dba:PRIMARY> db.backuptable.insertMany([{id:4},{id:5},{id:6}])
{
    "acknowledged" : true,
    "insertedIds" : [
        ObjectId("5ed0913c11f2f484e7b07592"),
        ObjectId("5ed0913c11f2f484e7b07593"),
        ObjectId("5ed0913c11f2f484e7b07594")
    ]
}
dba:PRIMARY> db.backuptable.find()
{ "_id" : ObjectId("5ed08fdb450c3013c75b4eca"), "id" : 1 }
{ "_id" : ObjectId("5ed08fdb450c3013c75b4ecb"), "id" : 2 }
{ "_id" : ObjectId("5ed08fdb450c3013c75b4ecc"), "id" : 3 }
{ "_id" : ObjectId("5ed0913c11f2f484e7b07592"), "id" : 4 }
{ "_id" : ObjectId("5ed0913c11f2f484e7b07593"), "id" : 5 }
{ "_id" : ObjectId("5ed0913c11f2f484e7b07594"), "id" : 6 }

5.删除数据

dba:PRIMARY> use backup
dba:PRIMARY> db.backuptable.drop()
true

6.介绍oplog

oplog是local下面的一个集合,从库是通过查看oplog的内容进行同步的,oplog里面有我们所有的操作记录

7.查找oplog里面删除的动作

#切换到local库
dba:PRIMARY> use local
#查看oplog内容
dba:PRIMARY> db.oplog.rs.find().pretty()

dba:PRIMARY> db.oplog.rs.find({ns:"backup.$cmd"}).pretty()
{
    #事件发生的时间点
    "ts" : Timestamp(1590727028, 2),
    "t" : NumberLong(13),
    "h" : NumberLong("-2257265319677762205"),
    "v" : 2,
    #操作类型 i代表insert u代表update d代表delet n代表没有操作
    "op" : "c",
    "ns" : "backup.$cmd",
    "wall" : ISODate("2020-05-29T04:37:08.426Z"),
    #动作
    "o" : {
        "dropDatabase" : 1
    }
}

{
    "ts" : Timestamp(1590727740, 1),
    "t" : NumberLong(13),
    "h" : NumberLong("627078301089140371"),
    "v" : 2,
    "op" : "c",
    "ns" : "backup.$cmd",
    "ui" : UUID("5a5f45b3-e9d6-4d0e-8a1e-5e52c892b1c4"),
    "wall" : ISODate("2020-05-29T04:49:00.606Z"),
    "o" : {
        "drop" : "backuptable"
    }
}


1590727028

8.把最新的oplog导出

[root@redis03 ~]# mongodump --port 28019 -d local -c oplog.rs -o /tmp/
2020-05-29T12:56:09.130+0800    writing local.oplog.rs to 
2020-05-29T12:56:09.134+0800    done dumping local.oplog.rs (889 documents)

[root@redis03 ~]# ll /tmp/local/oplog.rs.bson 
-rw-r--r-- 1 root root 102922 May 29 12:56 /tmp/local/oplog.rs.bson

9.把全备数据挪走,替换成新的oplog文件

[root@redis03 ~]# mkdir /backup
[root@redis03 ~]# mv /tmp/oplog.bson /backup/

[root@redis03 ~]# cp /tmp/local/oplog.rs.bson /tmp/oplog.bson

10.恢复数据

[root@redis03 ~]# chown -R mongo.mongo /tmp/oplog.bson

[root@redis03 ~]# mongorestore --port 28019 --oplogReplay  --oplogLimit="1590727028:2" --drop /tmp/
Copyright © 高程程 all right reserved,powered by Gitbook修订于: 2021-05-18 21:14:59

results matching ""

    No results matching ""