单Redis安装部署
#redis安装部署以及集群分片yum install -y tcltar zxvf redis-3.0.7.tar.gzcd redis-3.0.7make PREFIX=/usr/local/redis installmake testcp redis.conf /etc/redis.confcp utils/redis_init_script /etc/init.d/redis#修改redis配置文件vim /etc/redis.confdaemonize yes #调整为后台启动模式#设置启动脚本和开机启动vim /etc/init.d/redis#chkconfig: 2345 80 90 #在第一行添加chkconfig redis on #添加到服务service redis start #启动
多redis安装部署集群搭建
#说明:实验环境: 172.18.100.103 172.18.100.104 172.18.100.123 每台服务器安装两个实例redis6379/redis6380yum install -y tcltar zxvf redis-3.0.7.tar.gzcd redis-3.0.7make PREFIX=/usr/local/redis6379 installmake testmake PREFIX=/usr/local/redis6380 installmake testcp redis.conf /etc/redis79.confcp redis.conf /etc/redis80.confcp utils/redis_init_script /etc/init.d/redis6379cp utils/redis_init_script /etc/init.d/redis6380cp src/redis-trib.rb /usr/local/bin #该命令用于创建集群cp /usr/local/redis6379/bin/redis-* /usr/local/bin/ #redis命令###修改redis配置文件#修改配置文件/etc/redis6379.confdaemonize yes #设置成后台启动pidfile /var/run/redis_6379.pid #修改pid文件名requirepass xxx #设置密码修改配置文件/etc/redis6380.confdaemonize yes #设置成后台启动pidfile /var/run/redis_6380.pid #修改pid文件名port 6380 #修改端口,双redis不修改端口会导致端口冲突requirepass xxx #设置密码#修改启动文件vim /etc/init/redis6379#在第一行后添加#chkconfig: 2345 80 90#修改CONF="/etc/redis/${REDISPORT}.conf"CONF="/etc/redis6379.conf"vim /etc/init/redis6380#在第一行后添加#chkconfig: 2345 80 90#修改REDISPORT=6379REDISPORT=6380#修改CONF="/etc/redis/${REDISPORT}.conf"CONF="/etc/redis6379.conf"#添加到服务chkconfig redis6379 onchkconfig redis6380 on
配置redis集群
#修改redis的配置文件(redis.6379为例)vim /etc/redis6379.confcluster-enabled yescluster-config-file nodes-6379.conf #设置各集群节点的配置文件cluster-node-timeout 5000appendonly yesdir /data/redisAll #设置redis持久化和备份文件的默认存放地#安装ruby等支持模块yum install -y zlib ruby rubygemsgem install redisredis-trib.rb create --replicas 1 172.18.100.103:6379 172.18.100.103:6380 172.18.100.104:6379 172.18.100.104:6380 172.18.100.123:6379 172.18.100.123:6380#--repicas 1 表示希望为集群的每个主节点创建一个从节点 输入yes后当显示” [OK] All 16384 slots covered.”时,集群运行正常[root@kvm100-103 redis-3.0.7]# redis-trib.rb create --replicas 1 172.18.100.103:6379 172.18.100.103:6380 172.18.100.104:6379 172.18.100.104:6380 172.18.100.123:6379 172.18.100.123:6380>>> Creating cluster>>> Performing hash slots allocation on 6 nodes...Using 3 masters:172.18.100.104:6379172.18.100.103:6379172.18.100.123:6379Adding replica 172.18.100.103:6380 to 172.18.100.104:6379Adding replica 172.18.100.104:6380 to 172.18.100.103:6379Adding replica 172.18.100.123:6380 to 172.18.100.123:6379M: 9a64fb747b3e264fdc1118247477299d9ba18839 172.18.100.103:6379 slots:5461-10922 (5462 slots) masterS: 67b02bc4a70daf25adec2b2003779e1e1d9d742f 172.18.100.103:6380 replicates d5cbd79d097d8379a3454c3db55a151f6c3204c7M: d5cbd79d097d8379a3454c3db55a151f6c3204c7 172.18.100.104:6379 slots:0-5460 (5461 slots) masterS: 1c4d08531bfecf1a0f0b343c701a898fe382b76a 172.18.100.104:6380 replicates 9a64fb747b3e264fdc1118247477299d9ba18839M: 16592e56dbc4d195864d5f8a7a14e3b876c1a314 172.18.100.123:6379 slots:10923-16383 (5461 slots) masterS: 7eb0680fc9eb8efc1eb9b2a612f2d15d9c58f0af 172.18.100.123:6380 replicates 16592e56dbc4d195864d5f8a7a14e3b876c1a314Can I set the above configuration? (type 'yes' to accept): yes>>> Nodes configuration updated>>> Assign a different config epoch to each node>>> Sending CLUSTER MEET messages to join the clusterWaiting for the cluster to join...>>> Performing Cluster Check (using node 172.18.100.103:6379)M: 9a64fb747b3e264fdc1118247477299d9ba18839 172.18.100.103:6379 slots:5461-10922 (5462 slots) masterM: 67b02bc4a70daf25adec2b2003779e1e1d9d742f 172.18.100.103:6380 slots: (0 slots) master replicates d5cbd79d097d8379a3454c3db55a151f6c3204c7M: d5cbd79d097d8379a3454c3db55a151f6c3204c7 172.18.100.104:6379 slots:0-5460 (5461 slots) masterM: 1c4d08531bfecf1a0f0b343c701a898fe382b76a 172.18.100.104:6380 slots: (0 slots) master replicates 9a64fb747b3e264fdc1118247477299d9ba18839M: 16592e56dbc4d195864d5f8a7a14e3b876c1a314 172.18.100.123:6379 slots:10923-16383 (5461 slots) masterM: 7eb0680fc9eb8efc1eb9b2a612f2d15d9c58f0af 172.18.100.123:6380 slots: (0 slots) master replicates 16592e56dbc4d195864d5f8a7a14e3b876c1a314[OK] All nodes agree about slots configuration.>>> Check for open slots...>>> Check slots coverage...[OK] All 16384 slots covered.#查看集群节点[root@kvm100-103 redis-3.0.7]# redis-cli -c -p 6379 cluster nodesNOAUTH Authentication required.[root@kvm100-103 redis-3.0.7]# redis-cli -c -p 6379127.0.0.1:6379> auth xxxOK127.0.0.1:6379> cluster nodesd5cbd79d097d8379a3454c3db55a151f6c3204c7 172.18.100.104:6379 master - 0 1468488926113 3 connected 0-54609a64fb747b3e264fdc1118247477299d9ba18839 172.18.100.103:6379 myself,master - 0 0 1 connected 5461-1092216592e56dbc4d195864d5f8a7a14e3b876c1a314 172.18.100.123:6379 master - 0 1468488925612 5 connected 10923-163837eb0680fc9eb8efc1eb9b2a612f2d15d9c58f0af 172.18.100.123:6380 slave 16592e56dbc4d195864d5f8a7a14e3b876c1a314 0 1468488925613 6 connected1c4d08531bfecf1a0f0b343c701a898fe382b76a 172.18.100.104:6380 slave 9a64fb747b3e264fdc1118247477299d9ba18839 0 1468488924611 4 connected67b02bc4a70daf25adec2b2003779e1e1d9d742f 172.18.100.103:6380 slave d5cbd79d097d8379a3454c3db55a151f6c3204c7 0 1468488926113 3 connected127.0.0.1:6379> #---------#[ERR] Sorry, can't connect to node 172.18.100.103:6379#解决办法:#/usr/lib/ruby/gems/1.8/gems/redis-3.3.0/lib/redis/client.rb#修改成#:password => 'xxx'#---------####集群添加删除节点#添加主节点172.18.100.124:6379 #要添加的新主节点172.18.100.103:6379 #可以是集群已有的任一节点redis-trib.rb add-node 172.18.100.124:6379 172.18.100.103:6379#给刚添加的新节点添加从节点redis-cli -c -p cluster nodes |grep 172.18.100.124#添加从节点redis-trib.rb add-node --slave --master-id 4947227ad677a99becf636d951d1f397631d565c 172.18.100.124:6380 172.18.100.103:6379--slave #表示添加从节点--master-id #主节点的node-id172.18.100.124:6380 #从节点172.18.100.103:6379 #可以是集群已有的任一节点#填加新的节点后需重新分配slotredis-trib.rb reshard 172.18.100.103:6379How many slots do you want to move (from 1 to 16384)? 1000 #设置slot数1000 What is the receiving node ID? 4947227ad677a99becf636d951d1f397631d565c #新添加的节点node id Please enter all the source node IDs. Type 'all' to use all the nodes as source nodes for the hash slots. Type 'done' once you entered all the source nodes IDs. Source node #1:all #表示全部节点重新分配 Do you want to proceed with the proposed reshard plan (yes/no)? yes #确认重新分#说明:新添加的主节点如果没有slot,存储的数据就不会被选中#删除从节点redis-trib.rb del-node 172.18.100.124:6380 '5add38375571dbbb20f311553f4399e641e98f60' #'5add38375571dbbb20f311553f4399e641e98f60'要删除的从节点的node-id#删除主节点#主节点有从节点的先删从节点,参考删除从节点步骤#主节点有slot的,去掉分配的slot后才能删除主节点# redis-trib.rb reshard 172.18.100.124:6379 #取消分配的slot,下面是主要过程 How many slots do you want to move (from 1 to 16384)? 1000 #被删除master的所有slot数量,根据实际情况 What is the receiving node ID? 089275dc4d883c693b81ca11867dc7d94caf1159 #接收要删除主节点slot的其他主节点Please enter all the source node IDs. Type 'all' to use all the nodes as source nodes for the hash slots. Type 'done' once you entered all the source nodes IDs. Source node #1:4947227ad677a99becf636d951d1f397631d565c #被删除主节点的node-id Source node #2:done Do you want to proceed with the proposed reshard plan (yes/no)? yes #取消slot后,重新分配#删除主节点的slot信息后可删除该节点redis-trib.rb del-node 172.18.100.124:6379 '4947227ad677a99becf636d951d1f397631d565c'具体可参考http://www.redis.cn/topics/cluster-tutorial.html