# kerberos简介
众所周知,kerberos是大数据环境下最常用的安全通信的保障机制,是一种网络协议 本文不涉及kerberos原理,只涉及docker搭建kerberos环境
# kerberos 原理
- Kerberos 原理
Kerberos 原理 (opens new window)
- 使用 Kerberos 进行网络身份验证
使用 Kerberos 进行网络身份验证 (opens new window)
# Kerberos(KDC) 几个重要的概念:
- Principal:任何服务器所提供的用户、计算机、服务都将被定义成Principal。
- Instances:用于服务principals和特殊管理Principal。
- Realms:Kerberos安装提供的独特的域的控制,把它想象成你的主机和用户所属的主机或者组。官方约定这域需要大写。默认的,Ubuntu将把DNS域名转换为大写当成这里的域。 本例使用
- Key Distribution Center: (KDC)由三部分组成,一是principal数据库,认证服务器,和票据授予服务器。每个Realm至少要有一个。
- Ticket Granting Ticket:由认证服务器(AS)签发,Ticket Granting Ticket (TGT)使用用户的密码加密,这个密码只有用户和KDC知道。
- Ticket Granting Server: (TGS) 根据请求签发服务的票据。
- Tickets:确认两个Principal的身份。一个主体是用户,另一个是由用户请求的服务。门票会建立一个加
***,用于在身份验证会话中的安全通信。 - Keytab Files:从KDC主数据库中提取的文件,并且包含的服务或主机的加***。
# 创建配置文件
# 创建文件夹/home/ubuntu/docker/kerberos
mkdir /home/ubuntu/docker/kerberos
1
# 创建下面目录
.
├── ./conf
│ ├── ./conf/hosts
│ ├── ./conf/kadm5.acl
│ ├── ./conf/kdc.conf
│ └── ./conf/krb5.conf
├── ./Dockerfile
└── ./keytab
└── ./keytab/
1
2
3
4
5
6
7
8
9
2
3
4
5
6
7
8
9
# 修改配置文件
- ./conf/hosts
127.0.0.1 localhost
::1 localhost ip6-localhost ip6-loopback
fe00::0 ip6-localnet
ff00::0 ip6-mcastprefix
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters
172.18.0.4 ef6d55663265
127.0.0.1 kdc
1
2
3
4
5
6
7
8
2
3
4
5
6
7
8
- ./conf/kadm5.acl
cloudera-scm/admin@HADOOP.COM *
1
- ./conf/kdc.conf
[kdcdefaults]
kdc_ports = 88
kdc_tcp_ports = 88
[realms]
HADOOP.COM = {
#master_key_type = aes256-cts
acl_file = /var/kerberos/krb5kdc/kadm5.acl
dict_file = /usr/share/dict/words
admin_keytab = /var/kerberos/krb5kdc/kadm5.keytab
max_renewable_life = 7d
supported_enctypes = aes128-cts:normal des3-hmac-sha1:normal arcfour-hmac:normal camellia256-cts:normal camellia128-cts:normal des-hmac-sha1:normal des-cbc-md5:normal des-cbc-crc:normal
}
1
2
3
4
5
6
7
8
9
10
11
12
13
2
3
4
5
6
7
8
9
10
11
12
13
- ./conf/krb5.conf
includedir /etc/krb5.conf.d/
[logging]
default = FILE:/var/log/krb5libs.log
kdc = FILE:/var/log/krb5kdc.log
admin_server = FILE:/var/log/kadmind.log
[libdefaults]
dns_lookup_kdc = false
dns_lookup_realm = false
ticket_lifetime = 24h
renew_lifetime = 7d
forwardable = true
default_realm = HADOOP.COM
udp_preference_limit = 1
[realms]
HADOOP.COM = {
kdc = kdc
admin_server = kdc
}
[domain_realm]
.hadoop.com = HADOOP.COM
hadoop.com = HADOOP.COM
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
- ./Dockerfile
FROM centos:7
RUN yum install -y krb5-server krb5-libs krb5-auth-dialog krb5-workstation
CMD ["/usr/sbin/init"]
1
2
3
2
3
# 打包docker镜像
cd /home/ubuntu/docker/kerberos && sudo docker build -t kdc:1.0 .
1
等待打包构建镜像,因为涉及到kerberos的下载安装,可能会比较慢 使用docker images命令可以看到新生成的docker镜像
# docker启动及初始化
# 启动
docker run --privileged=true -p 88:88 -p 749:749 -p 750:750 -d --name="my_kdc" -v /home/ubuntu/docker/kerberos/conf/kdc.conf:/var/kerberos/krb5kdc/kdc.conf -v /home/ubuntu/docker/kerberos/conf/krb5.conf:/etc/krb5.conf -v /home/ubuntu/docker/kerberos/conf/hosts:/etc/hosts -v /home/ubuntu/docker/kerberos/conf/kadm5.acl:/var/kerberos/krb5kdc/kadm5.acl -v /home/ubuntu/docker/kerberos/keytab:/keytab/ kdc:1.0
1
- 首先进入容器的bash
sudo docker exec -it my_kdc bash
1
rm /var/kerberos/krb5kdc/principal*
- 初始化数据库
kdb5_util create -s -r HADOOP.COM
# password => password
kdb5_util create -s -r HADOOP.COM -P password
1
2
3
4
2
3
4
- 启动kdc
systemctl start kadmin krb5kdc
1
- 查看状态
systemctl status kadmin
systemctl status krb5kdc
1
2
2
# 添加管理权限
- 进入kadmin交互命令行
kadmin.local
1
- 增加管理员用户并设置密码
addprinc cloudera-scm/admin@HADOOP.COM
# password => password
addprinc -pw password cloudera-scm/admin@HADOOP.COM
1
2
3
4
2
3
4
- 测试连接是否成功
kinit cloudera-scm/admin@HADOOP.COM
1
- 增加普通用户并设置密码
addprinc hdfs/kdc@HADOOP.COM
# password => password
addprinc -pw password hdfs/kdc@HADOOP.COM
1
2
3
4
2
3
4
- 测试连接是否成功
kinit hdfs/kdc@HADOOP.COM
1
- 查看用户列表
listprincs
1
- 删除用户
# 使用管理员用户
kinit cloudera-scm/admin@HADOOP.COM
kadmin
delete_principal test/kdc@HADOOP.COM
1
2
3
4
5
2
3
4
5
- 导出keytab文件到指定目录
ktadd -k /keytab/admin.keytab -norandkey cloudera-scm/admin@HADOOP.COM
ktadd -k /keytab/hdfs.keytab -norandkey hdfs/kdc@HADOOP.COM
1
2
2
# kerberos client
# 安装客户端(ubuntu)
apt-get install krb5-user -y
1
# 安装客户端(centos)
yum install krb5-workstation krb5-libs -y
1
# 设置配置/etc/krb5.conf配置和server端保持一致
cat > /etc/krb5.conf << EOF
[libdefaults]
default_realm = CLOUDERA
dns_lookup_kdc = false
dns_lookup_realm = false
ticket_lifetime = 86400
renew_lifetime = 604800
forwardable = true
default_tgs_enctypes = aes128-cts
default_tkt_enctypes = aes128-cts
permitted_enctypes = aes128-cts
udp_preference_limit = 1
kdc_timeout = 3000
[realms]
CLOUDERA = {
# kdc = quickstart.cloudera
kdc = 110.42.214.104
admin_server = quickstart.cloudera
}
HADOOP.COM = {
#kdc = kdc
kdc = 110.40.137.191
admin_server = kdc
}
EOF
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
# 使用用户名和密码的方式验证kerberos配置在客户端通过用户名和密码认证
kinit hdfs/kdc@HADOOP.COM
# password => password
1
2
2
# 通过密钥登陆
kinit -kt /keytab/hdfs.keytab hdfs/kdc@HADOOP.COM
1
# 查看principal
klist -k hdfs.keytab
1
# windows
# 安装客户端
安装 MIT Kerberos Ticket Manager
# 设置配置和server端保持一致
C:\ProgramData\MIT\Kerberos5\krb5.ini
[libdefaults]
default_realm = CLOUDERA
dns_lookup_kdc = false
dns_lookup_realm = false
ticket_lifetime = 86400
renew_lifetime = 604800
forwardable = true
default_tgs_enctypes = aes128-cts
default_tkt_enctypes = aes128-cts
permitted_enctypes = aes128-cts
udp_preference_limit = 1
kdc_timeout = 3000
[realms]
CLOUDERA = {
kdc = quickstart.cloudera
#kdc = kdc
admin_server = quickstart.cloudera
}
HADOOP.COM = {
kdc = kdc
admin_server = kdc
}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
# 设置host
# C:\Windows\System32\drivers\etc\hosts
# kdc host
192.168.50.28 kdc
1
2
3
2
3
# 使用用户名和密码的方式验证kerberos配置在客户端通过用户名和密码认证
admin user cloudera-scm/admin@HADOOP.COM password
hdfs usrr
hdfs/kdc@HADOOP.COM password