1. 摘要:此处假定你已经熟悉LDAP协议,Openldap和Mysql Cluster的基本概念。我们可以用Openldap+Mysql Cluster来构建企业级或者电信级统一的用户数据管理,以下是理想的逻辑模型:Load Balancer可以采用F5或者LVS等,Openldap本身不直接提供对NDB访问的Distribution Access Layer机制。          
  2. 为简化起见,本次实验只会用到下面的实验模型:一台32位Linux虚拟机, 由于Mysql Cluster是内存数据库,带有4G内存的Laptop中只能启动2个Data Node,而且所有的实验节点都在同一台VM
  3. 安装Mysql Cluster
  • Extract mysql-cluster-gpl-7.2.12-linux2.6-i686.tar.gz to “/usr/local/mysql-cluster-gpl-7.2.12-linux2.6-i686”
ln -s /usr/local/mysql-cluster-gpl-7.2.12-linux2.6-i686 /usr/local/mysqlc
# 将/usr/local/mysqlc/bin/:/usr/local/mysqlc/scripts/添加到PATH
echo "/usr/local/mysqlc/lib" >> /etc/ld.so.conf
ldconfig -v
  1. 安装Openldap
  • 安装openssl-1.0.1e
cd ./openssl-1.0.1e
./Configure --prefix=/usr/local/ssl
make
make install
  • 安装BerkeleyDB db-5.3.21.gz
cd ./db-5.3.21
cd build_unix
../dist/configure --prefix=/usr/local/BerkeleyDB
make
make install
echo "/usr/local/BerkeleyDB/lib" >> /etc/ld.so.conf
ldconfig -v
  • 安装Openldap mysql-cluster-gpl-7.2.12-linux2.6-i686.tar.gz
cd ./openldap-2.4.26
export CPPFLAGS="-I/usr/local/BerkeleyDB/include -I/usr/local/ssl/include -I/usr/local/ssl/include/openssl" LDFLAGS="-L/usr/local/BerkeleyDB/lib -L/usr/local/ssl/lib"

./configure --with-tls=openssl --enable-ndb --enable-meta --enable-ldap --enable-dynamic --prefix=/usr/local/openldap

make depend
make
make install
# 将 /usr/local/openldap/bin/:/usr/local/openldap/sbin/:/usr/local/openldap/libexec/ 添加到PATH
  1. 配置Mysql Cluster
  • 本实验在同一台机器VM上配置2套Mysql Cluster, 鉴于内存有限,每一套Mysql Cluster配置1一个Data Node,3个API Node(2个也可以),一个MGM Node.  以root用户启动Openldap和Mysql Cluster
  • 配置第一套Mysql Cluster作为Master
# Config master cluster
mkdir -p /root/ws/mysql_cluster/test3/sql1/conf
mkdir -p /root/ws/mysql_cluster/test3/sql1/log
mkdir -p /root/ws/mysql_cluster/test3/sql1/mgmt_data
mkdir -p /root/ws/mysql_cluster/test3/sql1/mysqld_data
mkdir -p /root/ws/mysql_cluster/test3/sql1/ndb_data
# cat  /root/ws/mysql_cluster/test3/sql1/conf/config.ini
[ndb_mgmd default]
datadir=/root/ws/mysql_cluster/test3/sql1/mgmt_data

[ndb_mgmd] 
hostname=localhost 
logdestination=FILE:filename=/root/ws/mysql_cluster/test3/sql1/log/megm.log
NodeId=1 
portnumber=1186

[ndbd default] 
noofreplicas=1 
datadir=/root/ws/mysql_cluster/test3/sql1/ndb_data 
LockPagesInMainMemory=1
DataMemory=16M
IndexMemory=15M
FragmentLogFileSize=8M
NoOfFragmentLogFiles=8

[ndbd] 
hostname=localhost 
NodeId=3 

[mysqld] 
hostname=localhost
NodeId=50 
[mysqld] 
hostname=localhost
NodeId=51 
[mysqld] 
hostname=localhost
NodeId=52
# cat /root/ws/mysql_cluster/test3/sql1/conf/my.cnf
[mysqld] 
ndbcluster 
datadir=/root/ws/mysql_cluster/test3/sql1/mysqld_data 
basedir=/usr/local/mysqlc 
port=3306 
socket=/root/ws/mysql_cluster/test3/sql1/mysqld_data/mysql.socket

general_log=1
general_log_file=/root/ws/mysql_cluster/test3/sql1/log/query.log
log_error=/root/ws/mysql_cluster/test3/sql1/log/error.log

log-bin=/root/ws/mysql_cluster/test3/sql1/mysqld_data/binlogs
#Database name which can be replicated. Please keep it as the same in Openldap config file mentioned later.
binlog-do-db=openldap
  • 配置第二套Mysql Cluster作为Slave
# Config master cluster
mkdir -p /root/ws/mysql_cluster/test3/sql2/conf
mkdir -p /root/ws/mysql_cluster/test3/sql2/log
mkdir -p /root/ws/mysql_cluster/test3/sql2/mgmt_data
mkdir -p /root/ws/mysql_cluster/test3/sql2/mysqld_data
mkdir -p /root/ws/mysql_cluster/test3/sql2/ndb_data
# cat /root/ws/mysql_cluster/test3/sql2/conf/config.ini
[ndb_mgmd default]
datadir=/root/ws/mysql_cluster/test3/sql2/mgmt_data

[ndb_mgmd] 
hostname=localhost 
logdestination=FILE:filename=/root/ws/mysql_cluster/test3/sql2/log/megm.log
NodeId=1 
#Here we installed 2 clusters on the same VM, so it's mandatory to change the second cluster mgm port from default 1186 to other port like 1286
portnumber=1286

[ndbd default] 
noofreplicas=1 
datadir=/root/ws/mysql_cluster/test3/sql2/ndb_data 
LockPagesInMainMemory=1
DataMemory=16MM
IndexMemory=15M
FragmentLogFileSize=8M
NoOfFragmentLogFiles=8

[ndbd] 
hostname=localhost 
NodeId=3 

[mysqld] 
hostname=localhost
NodeId=50 
[mysqld] 
hostname=localhost
NodeId=51 
[mysqld] 
hostname=localhost
NodeId=52
# cat /root/ws/mysql_cluster/test3/sql2/conf/my.cnf
[mysqld] 
ndbcluster 
datadir=/root/ws/mysql_cluster/test3/sql2/mysqld_data 
basedir=/usr/local/mysqlc 
# Here we installed 2 clusters on the same VM, so it's mandatory to change mysqld listening port from default 3306 to other ports like 3406
port=3406 
socket=/root/ws/mysql_cluster/test3/sql2/mysqld_data/mysql.socket

general_log=1
general_log_file=/root/ws/mysql_cluster/test3/sql2/log/query.log
log_error=/root/ws/mysql_cluster/test3/sql2/log/error.log
# Replicate database "openldap" from master to slave
replicate-do-db=openldap

skip-slave-start
slave_allow_batching=1
relay-log-purge=1
slave-net-timeout=10
  1. 启动Master Cluster
  • Start the Management Node and Data Node
  • #cd /usr/local/mysqlc
    #./scripts/mysql_install_db --no-defaults --datadir=/root/ws/mysql_cluster/test3/sql1/mysqld_data
    # Start Management node
    #ndb_mgmd --skip-config-cache --ndb-nodeid=1 --initial -f /root/ws/mysql_cluster/test3/sql1/conf/config.ini
    # Start ndb node
    #ndbmtd --ndb-nodeid=3 --ndb-connectstring=localhost:1186 --initial-start
    # Please wait node node to be from "starting" state to "started" as below:
    # ndb_mgm -e show
    Connected to Management Server at: localhost:1186
    Cluster Configuration
    ---------------------
    [ndbd(NDB)] 1 node(s)
    id=3 @127.0.0.1 (mysql-5.5.30 ndb-7.2.12, Nodegroup: 0, Master)

    [ndb_mgmd(MGM)] 1 node(s)
    id=1 @127.0.0.1 (mysql-5.5.30 ndb-7.2.12)

    [mysqld(API)] 3 node(s)
    id=50 (not connected, accepting connect from localhost)
    id=51 (not connected, accepting connect from localhost)
    id=52 (not connected, accepting connect from localhost)

  • After Ndb started, please Start one API node(Actually Openldap tool will use it for provision data and as master to handle replication)
# mysqld --defaults-file=/root/ws/mysql_cluster/test3/sql1/conf/my.cnf  --port=3306  --skip-name-resolve --ndb-nodeid=50 --ndb-connectstring=localhost:1186  --user=root --datadir=/root/ws/mysql_cluster/test3/sql1/mysqld_data  --server-id=20001 &

# ndb_mgm -e show
Connected to Management Server at: localhost:1186
Cluster Configuration
---------------------
[ndbd(NDB)]     1 node(s)
id=3    @127.0.0.1  (mysql-5.5.30 ndb-7.2.12, Nodegroup: 0, Master)

[ndb_mgmd(MGM)] 1 node(s)
id=1    @127.0.0.1  (mysql-5.5.30 ndb-7.2.12)

[mysqld(API)]   3 node(s)
id=50   @127.0.0.1  (mysql-5.5.30 ndb-7.2.12)
id=51 (not connected, accepting connect from localhost)
id=52 (not connected, accepting connect from localhost)
# you can use below commands to log in to the cluster to check status and grant privilege to slave
mysql -uroot --socket /root/ws/mysql_cluster/test3/sql1/mysqld_data/mysql.socket
mysql -uroot -h 127.0.0.1 -P 3306

mysql> grant all privileges on *.* to 'root'@'<your VM IP>' identified by '';
mysql> grant all privileges on *.* to 'root'@'127.0.0.1' identified by '';
mysql> grant all privileges on *.* to 'root'@'localhost' identified by '';
  • Check Master status
# mysql -uroot -h 127.0.0.1 -P 3306
mysql> show master status;
+----------------+----------+--------------+------------------+
| File           | Position | Binlog_Do_DB | Binlog_Ignore_DB |
+----------------+----------+--------------+------------------+
| binlogs.000001 |      112 | openldap     |                  |
+----------------+----------+--------------+------------------+
1 row in set (0.00 sec)

mysql> show databases;
+--------------------+
| Database           |
+--------------------+
| information_schema |
| mysql              |
| ndbinfo            |
| performance_schema |
| test               |
+--------------------+
5 rows in set (0.00 sec)
  1. 启动Slave Cluster
  • Start the Management Node and Data Node
#cd /usr/local/mysqlc
#./scripts/mysql_install_db --no-defaults --datadir=/root/ws/mysql_cluster/test3/sql2/mysqld_data

# Start slave mgm node
#ndb_mgmd --skip-config-cache --ndb-nodeid=1 --initial  -f /root/ws/mysql_cluster/test3/sql2/conf/config.ini

#Start slave ndb node
ndbmtd --ndb-nodeid=3 --ndb-connectstring=localhost:1286  --initial-start

# Please wait node node to be from "starting" state to "started" as below:
# ndb_mgm -c localhost:1286 -e show
Connected to Management Server at: localhost:1286
Cluster Configuration
---------------------
[ndbd(NDB)]     1 node(s)
id=3    @127.0.0.1  (mysql-5.5.30 ndb-7.2.12, Nodegroup: 0, Master)

[ndb_mgmd(MGM)] 1 node(s)
id=1    @127.0.0.1  (mysql-5.5.30 ndb-7.2.12)

[mysqld(API)]   3 node(s)
id=50 (not connected, accepting connect from localhost)
id=51 (not connected, accepting connect from localhost)
id=52 (not connected, accepting connect from localhost)
  • After Ndb started, please Start one API node(as slave to do replication)
# mysqld --defaults-file=/root/ws/mysql_cluster/test3/sql2/conf/my.cnf  --port=3316  --skip-name-resolve --ndb-nodeid=50 --ndb-connectstring=localhost:1286  --user=root --datadir=/root/ws/mysql_cluster/test3/sql2/mysqld_data  --server-id=20011 &

# ndb_mgm -c localhost:1286 -e show
Connected to Management Server at: localhost:1286
Cluster Configuration
---------------------
[ndbd(NDB)]     1 node(s)
id=3    @127.0.0.1  (mysql-5.5.30 ndb-7.2.12, Nodegroup: 0, Master)

[ndb_mgmd(MGM)] 1 node(s)
id=1    @127.0.0.1  (mysql-5.5.30 ndb-7.2.12)

[mysqld(API)]   3 node(s)
id=50   @127.0.0.1  (mysql-5.5.30 ndb-7.2.12)
id=51 (not connected, accepting connect from localhost)
id=52 (not connected, accepting connect from localhost)
# you can use below commands to log in to the cluster to check status and grant privilege to slave
mysql -uroot --socket /root/ws/mysql_cluster/test3/sql2/mysqld_data/mysql.socket
mysql -uroot -h 127.0.0.1 -P 3316  

mysql> grant all privileges on *.* to 'root'@'<your VM IP>' identified by '';
mysql> grant all privileges on *.* to 'root'@'127.0.0.1' identified by '';
mysql> grant all privileges on *.* to 'root'@'localhost' identified by '';
  • Change master configuration on  Slave cluster as 
# mysql -uroot -h 127.0.0.1 -P 3316
mysql> change master to master_host='localhost', master_port=3306, master_user='root';
mysql> start slave;
mysql> show slave status\G
*************************** 1. row ***************************
               Slave_IO_State: Waiting for master to send event
                  Master_Host: localhost
                  Master_User: root
                  Master_Port: 3306
                Connect_Retry: 60
              Master_Log_File: binlogs.000001
          Read_Master_Log_Pos: 112
               Relay_Log_File: Ubuntu-VM1-relay-bin.000002
                Relay_Log_Pos: 261
        Relay_Master_Log_File: binlogs.000001
             Slave_IO_Running: Yes
            Slave_SQL_Running: Yes
              Replicate_Do_DB: openldap
          Replicate_Ignore_DB: 
           Replicate_Do_Table: 
       Replicate_Ignore_Table: 
      Replicate_Wild_Do_Table: 
  Replicate_Wild_Ignore_Table: 
                   Last_Errno: 0
                   Last_Error: 
                 Skip_Counter: 0
          Exec_Master_Log_Pos: 112
              Relay_Log_Space: 427
              Until_Condition: None
               Until_Log_File: 
                Until_Log_Pos: 0
           Master_SSL_Allowed: No
           Master_SSL_CA_File: 
           Master_SSL_CA_Path: 
              Master_SSL_Cert: 
            Master_SSL_Cipher: 
               Master_SSL_Key: 
        Seconds_Behind_Master: 0
Master_SSL_Verify_Server_Cert: No
                Last_IO_Errno: 0
                Last_IO_Error: 
               Last_SQL_Errno: 0
               Last_SQL_Error: 
  Replicate_Ignore_Server_Ids: 
             Master_Server_Id: 20001
                  Master_Bind: 
1 row in set (0.00 sec)
  1. 启动Openldap Server
  • Schema 准备,这里不创建新的Schema文件,使用Openldap自带的Schema
  • 配置slapd.conf(Openldap Server 配置文件):
1 root@Ubuntu-VM1:~/ws/ldap/test4# cat slapd.conf
 2 # slapd.conf - Configuration file for LDAP SLAPD
 3 ##########
 4 # Basics #
 5 ##########
 6 include /usr/local/openldap/etc/openldap/schema/core.schema
 7 include /usr/local/openldap/etc/openldap/schema/cosine.schema
 8 include /usr/local/openldap/etc/openldap/schema/inetorgperson.schema
 9 include /usr/local/openldap/etc/openldap/schema/nis.schema
10 pidfile /usr/local/openldap/var/run/slapd.pid
11 argsfile /usr/local/openldap/var/run/slapd.args
12 loglevel 255 
13 #modulepath /usr/local/openldap/lib 
14 # modulepath /usr/local/libexec/openldap
15 #moduleload back_hdb
16 ##########################
17 # Database Configuration #
18 ##########################
19 database ndb
20 suffix "dc=example,dc=com"
21 rootdn "cn=Manager,dc=example,dc=com"
22 rootpw secret
23 dbconnect localhost
24 dbhost  127.0.0.1 
25 dbname openldap
26 #dbsocket /tmp/mysql.sock
27 dbport 3306 
28 dbuser root 
29 dbpass "" 
30 #dbflag 0 
31 dbconnections 1
32 directory /usr/local/openldap/var/openldap-data
  • 初始数据规划
    ##共5条DN记录Initial Data
  • 数据文件 test.ldif
##共5条DN记录
root@Ubuntu-VM1:~/ws/ldap/test4# cat test.ldif 
dn: dc=example,dc=com
objectClass: dcObject
objectClass: organizationalUnit
dc: example
ou: Example Dot Com

dn: ou=people,dc=example,dc=com
objectClass: organizationalUnit
ou: people

dn: ou=groups,dc=example,dc=com
objectClass: organizationalUnit
ou: groups

dn: cn=example,ou=groups,dc=example,dc=com
objectClass: posixGroup
cn: example
gidNumber: 10000

dn: uid=lionel,ou=people,dc=example,dc=com
objectClass: inetOrgPerson
objectClass: posixAccount
objectClass: shadowAccount
uid: lionel
sn: Porcheron
givenName: Lionel
cn: Lionel Porcheron
displayName: Lionel Porcheron
uidNumber: 1000
gidNumber: 10000
gecos: Lionel Porcheron
loginShell: /bin/bash
homeDirectory: /home/lionel
shadowExpire: -1
shadowFlag: 0
shadowWarning: 7
shadowMin: 8
shadowMax: 999999
shadowLastChange: 10877
mail: lionel.porcheron@example.com
postalCode: 31000
l: Toulouse
o: Example
mobile: +33 (0)6 xx xx xx xx
homePhone: +33 (0)5 xx xx xx xx
title: System Administrator
postalAddress:
initials: LP
  • 用Debug模式启动Openldap server(这里建议开始时启动Debug模式,配置一切OK以后再重新启动Daemon进程)
# slapd -d 255 -f slapd.conf
#以下为控制台输出,表示成功连接Master NDB
518c9177 slapd startup: initiated.
518c9177 backend_startup_one: starting "cn=config"
518c9177 config_back_db_open
Backend ACL: access to *
        by * none

518c9177 config_back_db_open: line 0: warning: cannot assess the validity of the ACL scope within backend naming context
518c9177 config_back_db_open: No explicit ACL for back-config configured. Using hardcoded default
518c9177 config_build_entry: "cn=config"
518c9177 config_build_entry: "cn=schema"
518c9177 >>> dnNormalize: <cn={0}core>
518c9177 <<< dnNormalize: <cn={0}core>
518c9177 config_build_entry: "cn={0}core"
518c9177 >>> dnNormalize: <cn={1}cosine>
518c9177 <<< dnNormalize: <cn={1}cosine>
518c9177 config_build_entry: "cn={1}cosine"
518c9177 >>> dnNormalize: <cn={2}inetorgperson>
518c9177 <<< dnNormalize: <cn={2}inetorgperson>
518c9177 config_build_entry: "cn={2}inetorgperson"
518c9177 >>> dnNormalize: <cn={3}nis>
518c9177 <<< dnNormalize: <cn={3}nis>
518c9177 config_build_entry: "cn={3}nis"
518c9177 config_build_entry: "olcDatabase={-1}frontend"
518c9177 config_build_entry: "olcDatabase={0}config"
518c9177 config_build_entry: "olcDatabase={1}ndb"
518c9177 backend_startup_one: starting "dc=example,dc=com"
518c9177 ndb_db_open: "dc=example,dc=com"
518c9178 slapd starting
518c9178 daemon: added 4r listener=(nil)
518c9178 daemon: added 7r listener=0x8396740
518c9178 daemon: added 8r listener=0x83a0638
518c9178 daemon: epoll: listen=7 active_threads=0 tvp=NULL
518c9178 daemon: epoll: listen=8 active_threads=0 tvp=NULL
518c9178 daemon: activity on 1 descriptor
518c9178 daemon: activity on:518c9178 
518c9178 daemon: epoll: listen=7 active_threads=0 tvp=NULL
518c9178 daemon: epoll: listen=8 active_threads=0 tvp=NULL
##检查Master的状态
#mysql -uroot -h 127.0.0.1 -P 3306
mysql> show databases;
+--------------------+
| Database           |
+--------------------+
| information_schema |
| mysql              |
| ndbinfo            |
| openldap           |
| performance_schema |
| test               |
+--------------------+
6 rows in set (0.00 sec)

mysql> show master status;
+----------------+----------+--------------+------------------+
| File           | Position | Binlog_Do_DB | Binlog_Ignore_DB |
+----------------+----------+--------------+------------------+
| binlogs.000001 |     1708 | openldap     |                  |
+----------------+----------+--------------+------------------+
1 row in set (0.00 sec)

mysql> use openldap;
Reading table information for completion of table and column names
You can turn off this feature to get a quicker startup with -A

Database changed
mysql> show tables;
+--------------------+
| Tables_in_openldap |
+--------------------+
| OL_dn2id           |
| OL_nextid          |
| OL_opattrs         |
+--------------------+
3 rows in set (0.00 sec)

mysql> use openldap
mysql> select count(*) from OL_dn2id, OL_nextid, OL_opattrs;
+----------+
| count(*) |
+----------+
|        0 |
+----------+
1 row in set (0.00 sec)
##检查Slave Cluster的状态
#mysql -uroot -h 127.0.0.1 -P 3316
mysql> show databases;
+--------------------+
| Database           |
+--------------------+
| information_schema |
| mysql              |
| ndbinfo            |
| openldap           |
| performance_schema |
| test               |
+--------------------+
6 rows in set (0.03 sec)

mysql> show slave status\G
*************************** 1. row ***************************
               Slave_IO_State: Waiting for master to send event
                  Master_Host: localhost
                  Master_User: root
                  Master_Port: 3306
                Connect_Retry: 60
              Master_Log_File: binlogs.000001
          Read_Master_Log_Pos: 1708
               Relay_Log_File: Ubuntu-VM1-relay-bin.000002
                Relay_Log_Pos: 1857
        Relay_Master_Log_File: binlogs.000001
             Slave_IO_Running: Yes
            Slave_SQL_Running: Yes
              Replicate_Do_DB: openldap
          Replicate_Ignore_DB: 
           Replicate_Do_Table: 
       Replicate_Ignore_Table: 
      Replicate_Wild_Do_Table: 
  Replicate_Wild_Ignore_Table: 
                   Last_Errno: 0
                   Last_Error: 
                 Skip_Counter: 0
          Exec_Master_Log_Pos: 1708
              Relay_Log_Space: 2023
              Until_Condition: None
               Until_Log_File: 
                Until_Log_Pos: 0
           Master_SSL_Allowed: No
           Master_SSL_CA_File: 
           Master_SSL_CA_Path: 
              Master_SSL_Cert: 
            Master_SSL_Cipher: 
               Master_SSL_Key: 
        Seconds_Behind_Master: 0
Master_SSL_Verify_Server_Cert: No
                Last_IO_Errno: 0
                Last_IO_Error: 
               Last_SQL_Errno: 0
               Last_SQL_Error: 
  Replicate_Ignore_Server_Ids: 
             Master_Server_Id: 20001
                  Master_Bind: 
1 row in set (0.00 sec)

mysql> use openldap
mysql> show tables;
+--------------------+
| Tables_in_openldap |
+--------------------+
| OL_dn2id           |
| OL_nextid          |
| OL_opattrs         |
+--------------------+
3 rows in set (0.00 sec)

mysql> select count(*) from OL_dn2id, OL_nextid, OL_opattrs;
+----------+
| count(*) |
+----------+
|        0 |
+----------+
1 row in set (0.01 sec)

mysql> desc OL_dn2id;
+----------------+---------------------+------+-----+---------+-------+
| Field          | Type                | Null | Key | Default | Extra |
+----------------+---------------------+------+-----+---------+-------+
| eid            | bigint(20) unsigned | NO   | UNI | NULL    |       |
| object_classes | varchar(1024)       | NO   |     | NULL    |       |
| a0             | varchar(128)        | NO   | PRI |         |       |
| a1             | varchar(128)        | NO   | PRI |         |       |
| a2             | varchar(128)        | NO   | PRI |         |       |
| a3             | varchar(128)        | NO   | PRI |         |       |
| a4             | varchar(128)        | NO   | PRI |         |       |
| a5             | varchar(128)        | NO   | PRI |         |       |
| a6             | varchar(128)        | NO   | PRI |         |       |
| a7             | varchar(128)        | NO   | PRI |         |       |
| a8             | varchar(128)        | NO   | PRI |         |       |
| a9             | varchar(128)        | NO   | PRI |         |       |
| a10            | varchar(128)        | NO   | PRI |         |       |
| a11            | varchar(128)        | NO   | PRI |         |       |
| a12            | varchar(128)        | NO   | PRI |         |       |
| a13            | varchar(128)        | NO   | PRI |         |       |
| a14            | varchar(128)        | NO   | PRI |         |       |
| a15            | varchar(128)        | NO   | PRI |         |       |
+----------------+---------------------+------+-----+---------+-------+
18 rows in set (0.00 sec)
  1. Provision Data via LDAP client
  • Openldap提供了一些Tool来Provison 数据,例如ldapadd(运行它之前必须先启动Openldap Server的进程“slapd”),  slapadd(不需要启动slapd, 该工具将直接将数据导入NDB, 命令形式如slapadd -d 255 -f  slapd.conf -l test.ldif), 这里我们用ldapadd, 所以我们在前一步已经启动了slapd进程

    root@Ubuntu-VM1:~/ws/ldap/test4# ldapadd -x -D "cn=Manager,dc=example,dc=com" -w secret -d 256 -f test.ldif
    adding new entry "dc=example,dc=com"
    adding new entry "ou=people,dc=example,dc=com"
    adding new entry "ou=groups,dc=example,dc=com"
    adding new entry "cn=example,ou=groups,dc=example,dc=com"
    adding new entry "uid=lionel,ou=people,dc=example,dc=com"
  • 用ldapsearch查询我们插入的数据test.ldif是否成功
root@Ubuntu-VM1:~/ws/ldap/test4# ldapsearch -xLLL -b "dc=example,dc=com"
dn: dc=example,dc=com
objectClass: dcObject
objectClass: organizationalUnit
dc: example
ou: Example Dot Com

dn: ou=groups,dc=example,dc=com
objectClass: organizationalUnit
ou: groups

dn: cn=example,ou=groups,dc=example,dc=com
objectClass: posixGroup
cn: example
gidNumber: 10000

dn: ou=people,dc=example,dc=com
objectClass: organizationalUnit
ou: people

dn: uid=lionel,ou=people,dc=example,dc=com
objectClass: inetOrgPerson
objectClass: posixAccount
objectClass: shadowAccount
postalCode: 31000
l: Toulouse
cn: Lionel Porcheron
gidNumber: 10000
sn: Porcheron
title: System Administrator
displayName: Lionel Porcheron
givenName: Lionel
homePhone: +33 (0)5 xx xx xx xx
initials: LP
mail: lionel.porcheron@example.com
mobile: +33 (0)6 xx xx xx xx
o: Example
uid: lionel
uidNumber: 1000
homeDirectory: /home/lionel
loginShell: /bin/bash
gecos: Lionel Porcheron
shadowLastChange: 10877
shadowMin: 8
shadowMax: 999999
shadowWarning: 7
shadowExpire: -1
shadowFlag: 0
  • 查看后台Mysql Cluster变化:
#检查Master的状态
#mysql -uroot -h 127.0.0.1 -P 3306 
mysql> show master status;
+----------------+----------+--------------+------------------+
| File           | Position | Binlog_Do_DB | Binlog_Ignore_DB |
+----------------+----------+--------------+------------------+
| binlogs.000001 |     9987 | openldap     |                  |
+----------------+----------+--------------+------------------+
1 row in set (0.00 sec)

mysql> show databases;
+--------------------+
| Database           |
+--------------------+
| information_schema |
| mysql              |
| ndbinfo            |
| openldap           |
| performance_schema |
| test               |
+--------------------+
6 rows in set (0.00 sec)

mysql> use openldap;
Database changed
mysql> show tables;
+----------------------+
| Tables_in_openldap   |
+----------------------+
| OL_dn2id             |
| OL_nextid            |
| OL_opattrs           |
| dcObject             |
| inetOrgPerson        |
| organizationalPerson |
| organizationalUnit   |
| person               |
| posixAccount         |
| posixGroup           |
| shadowAccount        |
+----------------------+
11 rows in set (0.02 sec)

mysql> select count(*) from OL_dn2id; ##此处说明我们插入了5条数据,想想前面的初始数据规划,我们有5条DN记录
+----------+
| count(*) |
+----------+
| 5 |
+----------+
1 row in set (0.03 sec)

#检查Slave的状态
#mysql -uroot -h 127.0.0.1 -P 3316
mysql> show slave status\G
*************************** 1. row ***************************
               Slave_IO_State: Waiting for master to send event
                  Master_Host: localhost
                  Master_User: root
                  Master_Port: 3306
                Connect_Retry: 60
              Master_Log_File: binlogs.000001
          Read_Master_Log_Pos: 9987
               Relay_Log_File: Ubuntu-VM1-relay-bin.000002
                Relay_Log_Pos: 10136
        Relay_Master_Log_File: binlogs.000001
             Slave_IO_Running: Yes
            Slave_SQL_Running: Yes
              Replicate_Do_DB: openldap
          Replicate_Ignore_DB: 
           Replicate_Do_Table: 
       Replicate_Ignore_Table: 
      Replicate_Wild_Do_Table: 
  Replicate_Wild_Ignore_Table: 
                   Last_Errno: 0
                   Last_Error: 
                 Skip_Counter: 0
          Exec_Master_Log_Pos: 9987
              Relay_Log_Space: 10302
              Until_Condition: None
               Until_Log_File: 
                Until_Log_Pos: 0
           Master_SSL_Allowed: No
           Master_SSL_CA_File: 
           Master_SSL_CA_Path: 
              Master_SSL_Cert: 
            Master_SSL_Cipher: 
               Master_SSL_Key: 
        Seconds_Behind_Master: 0
Master_SSL_Verify_Server_Cert: No
                Last_IO_Errno: 0
                Last_IO_Error: 
               Last_SQL_Errno: 0
               Last_SQL_Error: 
  Replicate_Ignore_Server_Ids: 
             Master_Server_Id: 20001
                  Master_Bind: 
1 row in set (0.00 sec)

mysql> show databases;
+--------------------+
| Database           |
+--------------------+
| information_schema |
| mysql              |
| ndbinfo            |
| openldap           |
| performance_schema |
| test               |
+--------------------+
6 rows in set (0.00 sec)

mysql> use openldap;
mysql> show tables;
+----------------------+
| Tables_in_openldap   |
+----------------------+
| OL_dn2id             |
| OL_nextid            |
| OL_opattrs           |
| dcObject             |
| inetOrgPerson        |
| organizationalPerson |
| organizationalUnit   |
| person               |
| posixAccount         |
| posixGroup           |
| shadowAccount        |
+----------------------+
11 rows in set (0.03 sec)
  1. 数据转换分析,对照NDB表和Provisioned test.ldif
  • DN 记录到DB表OL_dn2id的转换:
#mysql -uroot -h 127.0.0.1 -P 3306
mysql>use openldap;
mysql> select * from OL_dn2id;
+------+------------------------------------------------------------------------------+--------+------------+-----------+------------+----+----+----+----+----+----+-----+-----+-----+-----+-----+-----+
| eid  | object_classes                                                               | a0     | a1         | a2        | a3         | a4 | a5 | a6 | a7 | a8 | a9 | a10 | a11 | a12 | a13 | a14 | a15 |
+------+------------------------------------------------------------------------------+--------+------------+-----------+------------+----+----+----+----+----+----+-----+-----+-----+-----+-----+-----+
|    1 |  dcObject organizationalUnit @ top                                           | dc=com | dc=example |           |            |    |    |    |    |    |    |     |     |     |     |     |     |
|    4 |  inetOrgPerson posixAccount shadowAccount @ top person organizationalPerson  | dc=com | dc=example | ou=people | uid=lionel |    |    |    |    |    |    |     |     |     |     |     |     |
|    3 |  organizationalUnit @ top                                                    | dc=com | dc=example | ou=groups |            |    |    |    |    |    |    |     |     |     |     |     |     |
|    2 |  organizationalUnit @ top                                                    | dc=com | dc=example | ou=people |            |    |    |    |    |    |    |     |     |     |     |     |     |
| 1001 |  posixGroup @ top                                                            | dc=com | dc=example | ou=groups | cn=example |    |    |    |    |    |    |     |     |     |     |     |     |
+------+------------------------------------------------------------------------------+--------+------------+-----------+------------+----+----+----+----+----+----+-----+-----+-----+-----+-----+-----+
5 rows in set (0.07 sec)
mysql> desc OL_dn2id;
+----------------+---------------------+------+-----+---------+-------+
| Field          | Type                | Null | Key | Default | Extra |
+----------------+---------------------+------+-----+---------+-------+
| eid            | bigint(20) unsigned | NO   | UNI | NULL    |       |
| object_classes | varchar(1024)       | NO   |     | NULL    |       |
| a0             | varchar(128)        | NO   | PRI |         |       |
| a1             | varchar(128)        | NO   | PRI |         |       |
| a2             | varchar(128)        | NO   | PRI |         |       |
| a3             | varchar(128)        | NO   | PRI |         |       |
| a4             | varchar(128)        | NO   | PRI |         |       |
| a5             | varchar(128)        | NO   | PRI |         |       |
| a6             | varchar(128)        | NO   | PRI |         |       |
| a7             | varchar(128)        | NO   | PRI |         |       |
| a8             | varchar(128)        | NO   | PRI |         |       |
| a9             | varchar(128)        | NO   | PRI |         |       |
| a10            | varchar(128)        | NO   | PRI |         |       |
| a11            | varchar(128)        | NO   | PRI |         |       |
| a12            | varchar(128)        | NO   | PRI |         |       |
| a13            | varchar(128)        | NO   | PRI |         |       |
| a14            | varchar(128)        | NO   | PRI |         |       |
| a15            | varchar(128)        | NO   | PRI |         |       |
+----------------+---------------------+------+-----+---------+-------+
18 rows in set (0.00 sec)

Openldap 将一条DN的信息例如dn: cn=example,ou=groups,dc=example,dc=com 作为一条记录插入DB 表OL_dn2id中

#从客户端Provision时test.ldif 的部分数据
dn: cn=example,ou=groups,dc=example,dc=com
objectClass: posixGroup
cn: example
gidNumber: 10000

#DN记录转换规则
将DN的每个RDN从Root到Leaf转成OL_dn2id表中16个字段(a0,a1,a2,a3,....,a15),也就是说如果配置NDB作为LDAP的Backend,DIT(Data information Tree)的最大深度是16.

#DN记录的ObjectClass及属性转换
将DN关联的Objceclass作为字符串记录到OL_dn2id表中字段object_classes,此处DB表中存储的是"posixGroup @ top"(@表示ObjectClass的继承关系)
进一步查看DB表posixGroup的数据:

  mysql> select * from posixGroup;
  +------+-----+------------------+-----------+-----------+
  | eid  | vid | cn               | gidNumber | memberUid |
  +------+-----+------------------+-----------+-----------+
  | 4    | 0   | Lionel Porcheron | 10000     | NULL      |
  | 1001 | 0   | example          | 10000     | NULL      |
  +------+-----+------------------+-----------+-----------+
  2 rows in set (0.02 sec)

  此处vid在的作用是防止LDAP中一个属性对应多个Value(例如cn的值可以有多个)

 

mysql> desc posixGroup;
  +-----------+---------------------+------+-----+---------+-------+
  | Field     | Type                | Null | Key | Default | Extra |
  +-----------+---------------------+------+-----+---------+-------+
  | eid       | bigint(20) unsigned | NO   | PRI | NULL    |       |
  | vid       | int(10) unsigned    | NO   | PRI | NULL    |       |
  | cn        | varchar(128)        | YES  |     | NULL    |       |
  | gidNumber | varchar(128)        | YES  |     | NULL    |       |
  | memberUid | varchar(128)        | YES  |     | NULL    |       |
  +-----------+---------------------+------+-----+---------+-------+
  5 rows in set (0.04 sec)
  • 相关源码:在openldap-2.4.35/servers/slapd/back-ndb中有相关的转换代码 
-rw-rw-r-- 1 2000 2000   12189 Mar 28 23:41 tools.cpp
-rw-rw-r-- 1 2000 2000     100 Mar 28 23:41 TODO
-rw-rw-r-- 1 2000 2000   21638 Mar 28 23:41 search.cpp
-rw-rw-r-- 1 2000 2000    3481 Mar 28 23:41 proto-ndb.h
-rw-rw-r-- 1 2000 2000   41251 Mar 28 23:41 ndbio.cpp
-rw-rw-r-- 1 2000 2000   14667 Mar 28 23:41 modrdn.cpp
-rw-rw-r-- 1 2000 2000   17815 Mar 28 23:41 modify.cpp
-rw-rw-r-- 1 2000 2000    1643 Mar 28 23:41 Makefile.in
-rw-rw-r-- 1 2000 2000   11441 Mar 28 23:41 init.cpp
-rw-rw-r-- 1 2000 2000    8370 Mar 28 23:41 delete.cpp
-rw-rw-r-- 1 2000 2000    9865 Mar 28 23:41 config.cpp
-rw-rw-r-- 1 2000 2000    4212 Mar 28 23:41 compare.cpp
-rw-rw-r-- 1 2000 2000    3884 Mar 28 23:41 bind.cpp
-rw-rw-r-- 1 2000 2000    4186 Mar 28 23:41 back-ndb.h
-rw-rw-r-- 1 2000 2000     772 Mar 28 23:41 attrsets.conf
-rw-rw-r-- 1 2000 2000    9191 Mar 28 23:41 add.cpp

# cat init.cpp
static int
ndb_db_open( BackendDB *be, ConfigReply *cr )
{
        struct ndb_info *ni = (struct ndb_info *) be->be_private;
        char sqlbuf[BUFSIZ], *ptr;
        int rc, i;

        if ( be->be_suffix == NULL ) {
                snprintf( cr->msg, sizeof( cr->msg ),
                        "ndb_db_open: need suffix" );
                Debug( LDAP_DEBUG_ANY, "%s\n",
                        cr->msg, 0, 0 );
                return -1;
        }

        Debug( LDAP_DEBUG_ARGS,
                LDAP_XSTRING(ndb_db_open) ": \"%s\"\n",
                be->be_suffix[0].bv_val, 0, 0 );

        if ( ni->ni_nconns < 1 )
                ni->ni_nconns = 1;

        ni->ni_cluster = (Ndb_cluster_connection **)ch_calloc( ni->ni_nconns, sizeof( Ndb_cluster_connection *));
        for ( i=0; i<ni->ni_nconns; i++ ) {
                ni->ni_cluster[i] = new Ndb_cluster_connection( ni->ni_connectstr );
                rc = ni->ni_cluster[i]->connect( 20, 5, 1 );
                if ( rc ) {
                        snprintf( cr->msg, sizeof( cr->msg ),
                                "ndb_db_open: ni_cluster[%d]->connect failed (%d)",
                                i, rc );
                        goto fail;
                }
        }
        for ( i=0; i<ni->ni_nconns; i++ ) {
                rc = ni->ni_cluster[i]->wait_until_ready( 30, 30 );
                if ( rc ) {
                        snprintf( cr->msg, sizeof( cr->msg ),
                                "ndb_db_open: ni_cluster[%d]->wait failed (%d)",
                                i, rc );
                        goto fail;
                }
        }

        mysql_init( &ni->ni_sql );
        if ( !mysql_real_connect( &ni->ni_sql, ni->ni_hostname, ni->ni_username, ni->ni_password,
                "", ni->ni_port, ni->ni_socket, ni->ni_clflag )) {
                snprintf( cr->msg, sizeof( cr->msg ),
                        "ndb_db_open: mysql_real_connect failed, %s (%d)",
                        mysql_error(&ni->ni_sql), mysql_errno(&ni->ni_sql) );
                rc = -1;
                goto fail;
        }

        sprintf( sqlbuf, "CREATE DATABASE IF NOT EXISTS %s", ni->ni_dbname );
        rc = mysql_query( &ni->ni_sql, sqlbuf );
        if ( rc ) {
                snprintf( cr->msg, sizeof( cr->msg ),
                        "ndb_db_open: CREATE DATABASE %s failed, %s (%d)",
                        ni->ni_dbname, mysql_error(&ni->ni_sql), mysql_errno(&ni->ni_sql) );
                goto fail;
        }

        sprintf( sqlbuf, "USE %s", ni->ni_dbname );
        rc = mysql_query( &ni->ni_sql, sqlbuf );
        if ( rc ) {
                snprintf( cr->msg, sizeof( cr->msg ),
                        "ndb_db_open: USE DATABASE %s failed, %s (%d)",
                        ni->ni_dbname, mysql_error(&ni->ni_sql), mysql_errno(&ni->ni_sql) );
                goto fail;
        }

        ptr = sqlbuf;
        ptr += sprintf( ptr, "CREATE TABLE IF NOT EXISTS " DN2ID_TABLE " ("
                "eid bigint unsigned NOT NULL, "
                "object_classes VARCHAR(1024) NOT NULL, "
                "a0 VARCHAR(128) NOT NULL DEFAULT '', "
                "a1 VARCHAR(128) NOT NULL DEFAULT '', "
                "a2 VARCHAR(128) NOT NULL DEFAULT '', "
                "a3 VARCHAR(128) NOT NULL DEFAULT '', "
                "a4 VARCHAR(128) NOT NULL DEFAULT '', "
                "a5 VARCHAR(128) NOT NULL DEFAULT '', "
                "a6 VARCHAR(128) NOT NULL DEFAULT '', "
                "a7 VARCHAR(128) NOT NULL DEFAULT '', "
                "a8 VARCHAR(128) NOT NULL DEFAULT '', "
                "a9 VARCHAR(128) NOT NULL DEFAULT '', "
                "a10 VARCHAR(128) NOT NULL DEFAULT '', "
                "a11 VARCHAR(128) NOT NULL DEFAULT '', "
                "a12 VARCHAR(128) NOT NULL DEFAULT '', "
                "a13 VARCHAR(128) NOT NULL DEFAULT '', "
                "a14 VARCHAR(128) NOT NULL DEFAULT '', "
                "a15 VARCHAR(128) NOT NULL DEFAULT '', "
                "PRIMARY KEY (a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15), "
                "UNIQUE KEY eid (eid) USING HASH" );
        /* Create index columns */
        if ( ni->ni_attridxs ) {
                ListNode *ln;
                int newcol = 0;

                *ptr++ = ',';
                *ptr++ = ' ';
                for ( ln = ni->ni_attridxs; ln; ln=ln->ln_next ) {
                        NdbAttrInfo *ai = (NdbAttrInfo *)ln->ln_data;
                        ptr += sprintf( ptr, "`%s` VARCHAR(%d), ",
                                ai->na_name.bv_val, ai->na_len );
                }
                ptr = lutil_strcopy(ptr, "KEY " INDEX_NAME " (" );

                for ( ln = ni->ni_attridxs; ln; ln=ln->ln_next ) {
                        NdbAttrInfo *ai = (NdbAttrInfo *)ln->ln_data;
                        if ( newcol ) *ptr++ = ',';
                        *ptr++ = '`';
                        ptr = lutil_strcopy( ptr, ai->na_name.bv_val );
                        *ptr++ = '`';
                        ai->na_ixcol = newcol + 18;
                        newcol++;
                }
                *ptr++ = ')';
        }
        strcpy( ptr, ") ENGINE=ndb" );
        rc = mysql_query( &ni->ni_sql, sqlbuf );
        if ( rc ) {
                snprintf( cr->msg, sizeof( cr->msg ),
                        "ndb_db_open: CREATE TABLE " DN2ID_TABLE " failed, %s (%d)",
                        mysql_error(&ni->ni_sql), mysql_errno(&ni->ni_sql) );
                goto fail;
        }

        rc = mysql_query( &ni->ni_sql, "CREATE TABLE IF NOT EXISTS " NEXTID_TABLE " ("
                "a bigint unsigned AUTO_INCREMENT PRIMARY KEY ) ENGINE=ndb" );
        if ( rc ) {
                snprintf( cr->msg, sizeof( cr->msg ),
                        "ndb_db_open: CREATE TABLE " NEXTID_TABLE " failed, %s (%d)",
                        mysql_error(&ni->ni_sql), mysql_errno(&ni->ni_sql) );
                goto fail;
        }

        {
                NdbOcInfo *oci;

                rc = ndb_aset_get( ni, &ndb_optable, ndb_opattrs, &oci );
                if ( rc ) {
                        snprintf( cr->msg, sizeof( cr->msg ),
                                "ndb_db_open: ndb_aset_get( %s ) failed (%d)",
                                ndb_optable.bv_val, rc );
                        goto fail;
                }
                for ( i=0; ndb_oplens[i] >= 0; i++ ) {
                        if ( ndb_oplens[i] )
                                oci->no_attrs[i]->na_len = ndb_oplens[i];
                }
                rc = ndb_aset_create( ni, oci );
                if ( rc ) {
                        snprintf( cr->msg, sizeof( cr->msg ),
                                "ndb_db_open: ndb_aset_create( %s ) failed (%d)",
                                ndb_optable.bv_val, rc );
                        goto fail;
                }
                ni->ni_opattrs = oci;
        }
        /* Create attribute sets */
        {
                ListNode *ln;

                for ( ln = ni->ni_attrsets; ln; ln=ln->ln_next ) {
                        NdbOcInfo *oci = (NdbOcInfo *)ln->ln_data;
                        rc = ndb_aset_create( ni, oci );
                        if ( rc ) {
                                snprintf( cr->msg, sizeof( cr->msg ),
                                        "ndb_db_open: ndb_aset_create( %s ) failed (%d)",
                                        oci->no_name.bv_val, rc );
                                goto fail;
                        }
                }
        }
        /* Initialize any currently used objectClasses */
        {
                Ndb *ndb;
                const NdbDictionary::Dictionary *myDict;

                ndb = new Ndb( ni->ni_cluster[0], ni->ni_dbname );
                ndb->init(1024);

                myDict = ndb->getDictionary();
                ndb_oc_read( ni, myDict );
                delete ndb;
        }

#ifdef DO_MONITORING
        /* monitor setup */
        rc = ndb_monitor_db_open( be );
        if ( rc != 0 ) {
                goto fail;
        }
#endif

        return 0;

fail:
        Debug( LDAP_DEBUG_ANY, "%s\n",
                cr->msg, 0, 0 );
        ndb_db_close( be, NULL );
        return rc;
}