记录一次工作简单的数据仓库建设流程。

其实需求逻辑很简单,构建一个用户画像系统的底层数据供应,本次我仅仅只要将后端的数据从数据库中拿出来存进入到ES中就好了,具体而言,分为。

1. 梳理业务数据,整理excel,形成数据字典和一系列规范,可以理解成是给数据定性,梳理元数据和数据血缘,知道数据从哪些分表汇聚到主表,业务数据分类等等。
2. 从关系型数据库中导入数据到Hive的ods层
3. 从Hive的ods层导入到cdw层
4. cdw层到ads,cdw层数据同步到es,hive外表构建和es的对应关键。

完成以上4部,就完成了这次的工作。中间SQL逻辑仅仅只是结构转换,将标注数据(宽列表)转成高表。总体而言比较简单。

sqoop:



#!/bin/sh
su - hive <<EOF
/opt/sqoop-1.4.6.bin__hadoop-2.0.4-alpha/bin/sqoop import --connect jdbc:mysql://IP地址:3306/库名 --username 你 --password 你 --table 表名 --null-string ''  --null-non-string ''  --lines-terminated-by 'n' --fields-terminated-by 't' --delete-target-dir --num-mappers 1 --hive-import --hive-database 库名 --hive-table 表名;
EOF



建立hive外表关联ES



DROP TABLE IF EXISTS 库名.表名;
CREATE EXTERNAL TABLE 库名.表名 (
  id string COMMENT 'id',
  bm string comment 'bm',
  hbryndgx string comment '业务字段'  ,
  effect_on_date string comment '生效日',
   create_date string COMMENT '创建时间',
   creator string COMMENT '创建人',
   sjly string comment '数据来源'	  
)COMMENT '提示语'
STORED BY 'org.elasticsearch.hadoop.hive.EsStorageHandler' 
TBLPROPERTIES(
'es.resource' = 'eeeeeee_hddl', 
'es.nodes'='ip地址',
'es.port'='9200',
'es.mapping.id' = 'id',
'es.index.auto.create' = 'true',
'es.write.operation'='upsert',
'es.net.http.auth.user'='账号',
'es.net.http.auth.pass'='密码',
'es.mapping.date.rich'='false',
'es.index.read.missing.as.empty'='yes');



部分主SQL



load data local inpath '/var/lib/hive/某个模块/某.txt' into 库名.表名;

insert overwrite table 库名.表名
select *
  from 库名.表名 a
     lateral view explode(split(bh, '_')) b as bh --1 数据导入到ODS



部分SQL



INSERT INTO TABLE 库名.表名
 SELECT * FROM 库名.表名;



truncate table 库名.表名;
--2 逻辑加工 字典值转换  合并数据 并将数据插入临时表中
DROP TABLE IF EXISTS 临时_TEMP;
CREATE TEMPORARY TABLE 临时_TEMP AS 
SELECT bm AS BM, --拼接主键  

       CAST(NULL AS STRING) AS GRSBH,
      
       FROM_UNIXTIME(UNIX_TIMESTAMP(), 'yyyy-MM-dd') AS EFFECT_ON_DATE,

  FROM 表名 ODS
  LEFT JOIN (SELECT *
               FROM 库名.表名
              WHERE EFFECT_FLAG = '1') AS CDW
    ON BM =
       CDW.BM
  LEFT JOIN 库名.表名 DEATH  
        ON ODS.bh=DEATH.BH 
 WHERE ODS.DELETE_FLAG <> '1';


--3 跟新cdw  删除 跟新数据
UPDATE 库名.表名
   SET DELETE_FLAG     = 'Y',
       EFFECT_OFF_DATE = FROM_UNIXTIME(UNIX_TIMESTAMP(), 'yyyy-MM-dd'),
       EFFECT_FLAG     = '0'
 WHERE BM IN (SELECT  BM
                FROM 库名.表名 ODS
               WHERE DELETE_FLAG = 'Y')
   AND EFFECT_FLAG = '1';


--4 跟新cdw  重复上传的数据
UPDATE 表
   SET EFFECT_OFF_DATE = FROM_UNIXTIME(UNIX_TIMESTAMP(), 'yyyy-MM-dd'),
       EFFECT_FLAG     = '0'
 WHERE BM IN (SELECT BM FROM 表)
   AND EFFECT_FLAG = '1';




--6 将数据插入到 CDW表
INSERT INTO TABLE 表名
SELECT 
*
FROM 临时表;

--7 将新增数据和修改的数据 上传到外表
--将临时表中数据插入到HIVE和ES的外表中

 
 --释放temp表
 truncate table temp临时表;



查询语言

GET /_cat/indices?v 
  
GET /ssqk_hddl/_mapping 
  
GET /ssqk_hddl/_search 
  
 { 
  
 "query": { 
  
 "wildcard": { 
  
 "xb": "女" 
  
 } 
  
 } 
  
 }

GET /ssqk_hddl/_mapping

java代码:



<dependency>
            <groupId>org.elasticsearch.client</groupId>
            <artifactId>transport</artifactId>
            <version>7.3.2</version>
        </dependency>

        <dependency>
            <groupId>org.elasticsearch.client</groupId>
            <artifactId>elasticsearch-rest-high-level-client</artifactId>
            <version>7.3.2</version>
        </dependency>
        <dependency>
            <groupId>log4j</groupId>
            <artifactId>log4j</artifactId>
            <version>1.2.17</version>
        </dependency>



public RestHighLevelClient connectES() {
        //初始化ES操作客户端
        final CredentialsProvider credentialsProvider = new BasicCredentialsProvider();
        credentialsProvider.setCredentials(AuthScope.ANY,
                new UsernamePasswordCredentials(""));
//                new UsernamePasswordCredentials("elastic", "123456"));  //es账号密码 测试环境
//                new UsernamePasswordCredentials("root", "jkyjy2019"));  //es账号密码 生产环境
        esClient = new RestHighLevelClient(
                RestClient.builder(
//                        new HttpHost(host, 9200)
                        new HttpHost(host, 9210)
                ).setHttpClientConfigCallback(new RestClientBuilder.HttpClientConfigCallback() {
                    public HttpAsyncClientBuilder customizeHttpClient(HttpAsyncClientBuilder httpClientBuilder) {
                        httpClientBuilder.disableAuthCaching();
                        return httpClientBuilder.setDefaultCredentialsProvider(credentialsProvider);
                    }
                })
        );
        return esClient;
    }

    /**
     * 判断索引是否存在
     *
     * @param index
     * @return
     */
    public boolean indexExists(String index) {
        GetIndexRequest request = new GetIndexRequest(index);
        try {
            return esClient.indices().exists(request, RequestOptions.DEFAULT);
        } catch (Exception e) {
            e.printStackTrace();
        }
        return false;
    }

    /**
     * 删除索引数据
     *
     * @param index
     */
    public void deleteIndex(String index) {
        DeleteIndexRequest request = new DeleteIndexRequest(index);
        AcknowledgedResponse deleteIndexResponse = null;
        try {
            deleteIndexResponse = esClient.indices().delete(request, RequestOptions.DEFAULT);
        } catch (IOException e) {
            e.printStackTrace();
        }
        boolean acknowledged = deleteIndexResponse.isAcknowledged();
    }

public void createMapping(String index, List<Map<String, String>> fieldList) {

        System.out.println( "索引名: "+index);
        try {
            CreateIndexRequest createIndexRequest = new CreateIndexRequest(index);
            esClient.indices().create(createIndexRequest, RequestOptions.DEFAULT);

            XContentBuilder builder = XContentFactory.jsonBuilder();
            builder.startObject();
//            builder.startObject(index);
            builder.startObject("properties");

            for (Map<String, String> entity :
                    fieldList) {
                String dataType = entity.get("data_type");
                String dev_mapping = entity.get("dev_mapping");
                String sksy = entity.get("sksy");
                builder.startObject(dev_mapping);
                if ("字符类型".equals(dataType)) {
                    builder.field("type", "keyword").endObject();
                } else if ("整型".equals(dataType)) {
                    builder.field("type", "integer").endObject();
                } else if ("浮点型".equals(dataType)) {
                    builder.field("type", "double").endObject();
                } else if ("日期型".equals(dataType)) {
                    builder.field("type", "date").field("format", sksy).endObject();
                } else {
                    builder.field("type", "keyword").endObject();
                }

            }
            builder.startObject("id");
            builder.field("type","keyword").endObject();
            builder.startObject("bm");
            builder.field("type","keyword").endObject();
            builder.startObject("effect_on_date");
            builder.field("type","keyword").endObject();
            builder.startObject("create_date");
            builder.field("type","keyword").endObject();
            builder.startObject("creator");
            builder.field("type","keyword").endObject();
            builder.startObject("sjly");
            builder.field("type","keyword").endObject();

            builder.endObject();
            builder.endObject();

            PutMappingRequest putMappingRequest = new PutMappingRequest(index);
            putMappingRequest.source(builder);
            esClient.indices().putMapping(putMappingRequest, RequestOptions.DEFAULT);

        } catch (Exception e) {
            e.printStackTrace();
            System.out.println("失败的索引名称:"+ index);
        }

        return;
    }

public Boolean existIndex(String index_name) {
        // 如果索引名存在则报错提醒
        GetRequest getRequest = new GetRequest();
        getRequest.id("1");
        getRequest.index(index_name);
        try {
            boolean exists = esClient.exists(getRequest, RequestOptions.DEFAULT);
            if (exists) {
                //索引 已经存在
                logger.error("索引已经存在! " + index_name);
                return true;
            }

        } catch (IOException e) {
            e.printStackTrace();
            logger.error("索引已经存在! " + index_name);
            return true;

        }
        return false;
    }

    public void initMapping2(MysqlUtils mysqlConn) {
        String table = mysqlConn.getTable();

        // 获取索引,字段,类型
        List<MappingEntity> list = mysqlConn.getModelData("SELECT INDEX_NAME,DATA_TYPE,DEV_MAPPING,SKSY FROM  " + table +
                "  WHERE 表名 IS NOT NULL ORDER BY ID ");

        String index_name = null;
        List<Map<String, String>> fieldList = new ArrayList<Map<String, String>>();
        for (MappingEntity mappingEntity : list) {
            //初始化索引名
            if (index_name == null) {
                index_name = mappingEntity.getIndex_name();
            }

            if (index_name.equals(mappingEntity.getIndex_name())) {

                Map<String, String> fields = new HashMap<String, String>();
                fields.put("data_type", mappingEntity.getDataType());
                fields.put("dev_mapping", mappingEntity.getDevMapping());
                fields.put("sksy", mappingEntity.getsKSY());
                fieldList.add(fields);
            } else {
                // 是否存在索引名
                if (!existIndex(index_name)) {
                    // 创建索引
                    createMapping(index_name, fieldList);
                }
                index_name = mappingEntity.getIndex_name();
                fieldList.clear();
                Map<String, String> fields = new HashMap<String, String>();
                fields.put("data_type", mappingEntity.getDataType());
                fields.put("dev_mapping", mappingEntity.getDevMapping());
                fields.put("sksy", mappingEntity.getsKSY());
                fieldList.add(fields);
            }
        }

    }
}



MysqlUtils mysqlConn = new MysqlUtils("HDDL");
        mysqlConn.mysqlConnect();

        List<MappingEntity> resultList = mysqlConn.getModelData(SQL);
        List<Map<String, String>> fieldsList = new ArrayList<>();

        for (MappingEntity mappingEntity :
                resultList) {
            Map<String, String> fields = new HashMap<String, String>();
            fields.put("data_type", mappingEntity.getDataType());
            fields.put("dev_mapping", mappingEntity.getDevMapping());
            fields.put("sksy", mappingEntity.getsKSY());
            fieldsList.add(fields);
        }


        ESUtils esUtils = new ESUtils();
        esUtils.connectES();

        esUtils.createMapping(table, fieldsList);
        mysqlConn.mysqlClose();
        esUtils.closeES();



主main



// 通过java -jar jar文件 [hive|sqoop]命令执行相关的生成分区表或者生成sqoop脚本程序
        // 此处临时方案是mysql对倒
        if (args.length < 1) {
            logger.info("输入参数错误");
            System.exit(-1);
        }
        try {
            long startTime=System.currentTimeMillis();            //获得当前时
            // 判断子命令进而利用反射执行不同类的启动方法
            if (Constants.MainParam.MYSQL.equals(args[0].toLowerCase())) {
                logger.info("enter sub command: mysql");
                Class<RunMySQLApp2> clazz = RunMySQLApp2.class;
                Method method = clazz.getDeclaredMethod("main", String[].class);
                method.setAccessible(true);
                method.invoke(clazz.newInstance(), new Object[]{args});
            }

            long endTime=System.currentTimeMillis();                //获得当前时间
            System.out.println((endTime-startTime)/1000 +" 秒" );
        }catch (Exception e) {
            logger.error(e.getMessage());
            e.printStackTrace();
        }



DataBaseUtil工具类



private static void initDatasource(Properties props) {
        try {
            dataSource = (DruidDataSource) DruidDataSourceFactory.createDataSource(props);
        } catch (Exception e) {
            e.printStackTrace();
        }
    }

    private static void initHiveDatasource(Properties props) {
        try {
            hiveDatasource = (DruidDataSource) DruidDataSourceFactory.createDataSource(props);
        } catch (Exception e) {
            e.printStackTrace();
        }
    }

    public static DruidPooledConnection getConn(String url, String username, String password) {
        DruidPooledConnection connection = null;
        Properties props = new Properties();
        props.setProperty("url", url);
        props.setProperty("username", username);
        props.setProperty("password", password);
        initDatasource(props);
        try {
            connection = dataSource.getConnection();
        } catch (SQLException e) {
            e.printStackTrace();
        }
        return connection;
    }


    public static DruidPooledConnection getHiveConn(String url, String username, String password) {
        DruidPooledConnection connection = null;
        Properties props = new Properties();
        props.setProperty("url", url);
        props.setProperty("username", username);
        props.setProperty("password", password);
        initHiveDatasource(props);
        try {
            connection = hiveDatasource.getConnection();
        } catch (SQLException e) {
            e.printStackTrace();
        }
        return connection;
    }

    public static Statement getStmt(String url, String username, String password, DruidPooledConnection connection) {
        try {
            statement = connection.createStatement();
        } catch (SQLException e) {
            e.printStackTrace();
        }
        return statement;
    }

    public static Statement getHiveStmt(String url, String username, String password) {
        DruidPooledConnection connection = getHiveConn(url, username, password);
        try {
            statement = connection.createStatement();
        } catch (SQLException e) {
            e.printStackTrace();
        }
        return hiveStatement;
    }

    public static boolean execSQL(String url, String username, String password, String sql, DruidPooledConnection conn) {
        boolean flag = false;
        Statement statement = getStmt(url, username, password, conn);
        try {
            flag = statement.execute(sql);
        } catch (SQLException e) {
            e.printStackTrace();
        }
        return flag;
    }

    public static ResultSet execMySql(Statement getStmt, String sql) {
        ResultSet resultSet = null;
        Statement statement = getStmt;
        try {
            resultSet = statement.executeQuery(sql);
        } catch (SQLException e) {
            e.printStackTrace();
        }
        return resultSet;
    }


    public static ResultSet execSQLForResult(String url, String username, String password, String sql, DruidPooledConnection connection) {
        ResultSet resultSet = null;
        Statement statement = getStmt(url, username, password, connection);
        try {
            resultSet = statement.executeQuery(sql);
        } catch (SQLException e) {
            e.printStackTrace();
        }
        return resultSet;
    }

    public static boolean execSQL(Statement getStmt, String sql) {
        boolean flag = false;
        Statement statement = getStmt;
        try {
            flag = statement.execute(sql);
        } catch (SQLException e) {
            e.printStackTrace();
        }
        return flag;
    }


    public static boolean execHiveSQL(String url, String username, String password, String sql) {
        boolean flag = false;
        Statement statement = getHiveStmt(url, username, password);
        try {
            flag = statement.execute(sql);
        } catch (SQLException e) {
            e.printStackTrace();
        }
        return flag;
    }

    public static ResultSet execHiveSQLForResult(String url, String username, String password, String sql) {
        ResultSet resultSet = null;
        Statement statement = getHiveStmt(url, username, password);
        try {
            resultSet = statement.executeQuery(sql);
        } catch (SQLException e) {
            e.printStackTrace();
        }
        return resultSet;
    }

    public static void recycleConn(Connection connection) {
        if (dataSource != null) {
            dataSource.discardConnection(connection);
        }
    }

    public static void recyclePgConn(Connection connection) {
        if (pgDatasource != null) {
            pgDatasource.discardConnection(connection);
        }
    }

    public static void recycleHiveConn(Connection connection) {
        if (hiveDatasource != null) {
            hiveDatasource.discardConnection(connection);
        }
    }

    public static Boolean existTable(String url, String username, String password, String databases, String table) {
        Boolean existTable = false;
        DruidPooledConnection connection = getConn(url, username, password);
        try {
            ResultSet rsTables = connection.getMetaData().getTables(databases, null, table,
                    new String[]{"TABLE"});
            while (rsTables.next()) {
                String tableName = rsTables.getString("TABLE_NAME");
                if (StringUtils.equals(tableName, table)) {
                    existTable = true;
                }
            }
            rsTables.close();
        } catch (SQLException throwables) {
            throwables.printStackTrace();
        } finally {
            if (null != connection) {
                recycleConn(connection);
            }

        }
        return existTable;
    }

    public static Map<String, String> getTableFieldType(String url, String username, String password, String databases, String table) {

        Map<String, String> map = new HashMap<>();

        String sql = "select * from " + table + " limit 1";
        PreparedStatement pStmt = null;    //定义盛装SQL语句的载体pStmt
        ResultSet rs = null;    //定义查询结果集rs
        DruidPooledConnection connection = getConn(url, username, password);

        try {
            pStmt = connection.prepareStatement(sql);
            rs = pStmt.executeQuery();

            ResultSetMetaData metaData = rs.getMetaData();
            int count = metaData.getColumnCount();
            while (rs.next()) {
                for (int i = 1; i < count; i++) {
                    String columnType = metaData.getColumnTypeName(i);
                    map.put(metaData.getColumnName(i), columnType);
                }
            }

        } catch (SQLException throwables) {
            throwables.printStackTrace();
        } finally {
            try {
                if (pStmt != null) {
                    pStmt.close();
                }
                if (null != connection) {
                    connection.close();
                }
            } catch (SQLException e) {
                e.printStackTrace();
            }
        }
        return map;
    }


    public static void close(Connection conn, ResultSet rs, PreparedStatement state) {
        if (conn != null) {
            recycleConn(conn);
        }
        if (rs != null) {
            try {
                rs.close();
            } catch (SQLException e) {
                // TODO Auto-generated catch block
                e.printStackTrace();
            }
        }
        if (state != null) {
            try {
                state.close();
            } catch (SQLException e) {
                // TODO Auto-generated catch block
                e.printStackTrace();
            }
        }
    }



DROP TABLE IF EXISTS JSSMQK_UPLOAD_CDW;
CREATE TABLE
    JSSMQK_UPLOAD_CDW
    (
    BM STRING COMMENT '被调查人项目内编号',
    CREATOR STRING COMMENT '创建人',
    EFFECT_ON_DATE STRING COMMENT '生效日',
    EFFECT_OFF_DATE STRING COMMENT '失效日',
    EFFECT_FLAG STRING COMMENT '有效标记',
    SJLY STRING COMMENT '数据来源'
)COMMENT '提示'
CLUSTERED BY (BM) INTO 100 BUCKETS
ROW FORMAT DELIMITED  
FIELDS TERMINATED BY 't' 
LINES TERMINATED BY 'n' 
STORED AS ORC
TBLPROPERTIES ('transactional'='true');





hive分层电影分析 hive分层建模_数据


答案:注意在打通Hive和ES的关联交互互通和同步的时候,需要找到如下jar包

1. commons-httpclient-3.1.jar (用户Hive和ES通信,本质像是curl或者直接访问ES的开发Http的端口)

缺少该jar包会报错:

Caused by: java.lang.NoClassDefFoundError: org/apache/commons/httpclient/protocol/ProtocolSocketFactory at

2. elasticsearch-hadoop-7.3.2的jar包 (注意只要放在hive的lib目录下面就可以了,不要放在aux的目录下面,否则会报错jar包冲突,记得要重启Hive生效,如果是CDH或者Ambari安装的话,注意找到对应的安装目录下)

3. 重点注意你的集群安装了几个Hive MetaStore,如果是两台的话,必须在两台服务器上都安装如上的jar包,否则会报错。

4. 最后应该是在hive用户下执行所有命令,不是root


hive分层电影分析 hive分层建模_表名_02


curl -H "Content-Type: application/json" -XPUT xx.xx.xx.xx:9200/indexName(要修改的索引名)/_settings -d '{"index.max_result_window":"100000"}'


conda 安装指南


1)conda list 查看安装了哪些包。

  2)conda env list 或 conda info -e 查看当前存在哪些虚拟环境



##### `conda create -n new_env --clone old_env`复制既有环境

conda remove --name python36 --all 卸载环境


py.test测试python框架

pytest -v -k 函数名测试部分函数

anaconda安装过程中问题参考如下:


客户流失率


insert into ads.cus_m_cust_lost (customer_lost_num,customer_area,year,month,create_time)
SELECT count(*),customer_area,2020 ,11 , now() FROM 
(SELECT customer_standard_id, customer_area, ST_SALE_CONTRACT_CODE ,Z1.TIME FROM (
SELECT customer_standard_id,customer_area,MAX(DT_STAMP) AS TIME FROM
(SELECT DISTINCT(A.customer_standard_id),A.customer_area,C.DT_STAMP,C.ST_SALE_CONTRACT_CODE,C.ST_STATUS FROM 
(SELECT customer_standard_id,customer_area from ads.`cus_d_ads_cust_info` GROUP BY customer_standard_id,customer_area) A LEFT JOIN ads.`cus_d_ads_contract` C ON A.customer_standard_id = C.ST_CUSTOMER_ID) ta GROUP BY customer_standard_id,customer_area
) Z1 , ads.`cus_d_ads_contract` Z2 WHERE Z1.TIME = Z2.DT_STAMP AND year("2020-11-30")-year(Z1.TIME)>5 AND Z2.ST_STATUS=9 ) Z3 GROUP BY customer_area


汉字转拼音首字母简拼的SQL代码


CREATE DEFINER=`root`@`%` FUNCTION `F_GETPY`(in_string VARCHAR(100)) RETURNS varchar(100) CHARSET utf8
BEGIN 
DECLARE tmp_str VARCHAR(100) CHARSET gbk DEFAULT '' ;
DECLARE tmp_char VARCHAR(100) CHARSET gbk DEFAULT '' ; 
DECLARE V_LEN INT;
DECLARE V_I INT;
DECLARE V_PY VARCHAR(100);
SET V_LEN=CHAR_LENGTH(in_string);
SET V_I=1;
SET V_PY='';
DROP TEMPORARY TABLE IF EXISTS TT_PYZD;
CREATE TEMPORARY TABLE TT_PYZD (chr char(2) ,letter char(2)) DEFAULT CHARSET gbk;
INSERT INTO TT_PYZD
SELECT '吖 ', 'A ' UNION ALL SELECT '八 ', 'B ' UNION ALL 
SELECT '嚓 ', 'C ' UNION ALL SELECT '咑 ', 'D ' UNION ALL 
SELECT '妸 ', 'E ' UNION ALL SELECT '发 ', 'F ' UNION ALL 
SELECT '旮 ', 'G ' UNION ALL SELECT '铪 ', 'H ' UNION ALL 
SELECT '丌 ', 'J ' UNION ALL SELECT '咔 ', 'K ' UNION ALL 
SELECT '垃 ', 'L ' UNION ALL SELECT '嘸 ', 'M ' UNION ALL 
SELECT '拏 ', 'N ' UNION ALL SELECT '噢 ', 'O ' UNION ALL 
SELECT '妑 ', 'P ' UNION ALL SELECT '七 ', 'Q ' UNION ALL 
SELECT '呥 ', 'R ' UNION ALL SELECT '仨 ', 'S ' UNION ALL 
SELECT '他 ', 'T ' UNION ALL SELECT '屲 ', 'W ' UNION ALL 
SELECT '夕 ', 'X ' UNION ALL SELECT '丫 ', 'Y ' UNION ALL 
SELECT '帀 ', 'Z ' ;
WHILE V_I<=V_LEN DO 
SET tmp_str = substring(in_string,V_I,1);
IF ASCII(tmp_str)<127 
THEN
SET tmp_char=UPPER(tmp_str);
ELSE
SELECT letter INTO tmp_char FROM TT_PYZD WHERE chr<=tmp_str ORDER BY chr DESC LIMIT 1;
END IF ;
SET V_I=V_I+1;
SET V_PY=CONCAT(V_PY,tmp_char);
END WHILE;
DROP TEMPORARY TABLE IF EXISTS TT_PYZD;
RETURN V_PY;
END