附带可视化-visualdl

# copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#    http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import math

import paddle
import paddle.fluid as fluid
from paddle.fluid.param_attr import ParamAttr

__all__ = ["ResNet", "ResNet18", "ResNet34", "ResNet50", "ResNet101", "ResNet152"]

train_parameters = {
    "input_size": [3, 224, 224],
    "input_mean": [0.485, 0.456, 0.406],
    "input_std": [0.229, 0.224, 0.225],
    "learning_strategy": {
        "name": "piecewise_decay",
        "batch_size": 256,
        "epochs": [30, 60, 90],
        "steps": [0.1, 0.01, 0.001, 0.0001]
    }
}


class ResNet():
    def __init__(self, layers=50):
        self.params = train_parameters
        self.layers = layers

    def net(self, input, class_dim=1000):
        layers = self.layers
        supported_layers = [18, 34, 50, 101, 152]
        assert layers in supported_layers, \
            "supported layers are {} but input layer is {}".format(supported_layers, layers)

        if layers == 18:
            depth = [2, 2, 2, 2]
        elif layers == 34 or layers == 50:
            depth = [3, 4, 6, 3]
        elif layers == 101:
            depth = [3, 4, 23, 3]
        elif layers == 152:
            depth = [3, 8, 36, 3]
        num_filters = [64, 128, 256, 512]

        #param_attr=fluid.initializer.Xavier(uniform=False)
        conv = self.conv_bn_layer(
            input=input, num_filters=64, filter_size=7, stride=2, act='relu', name="conv1")
        conv = fluid.layers.pool2d(
            input=conv,
            pool_size=3,
            pool_stride=2,
            pool_padding=1,
            pool_type='max')
        if layers >= 50:
            for block in range(len(depth)):
                for i in range(depth[block]):
                    if layers in [101, 152] and block == 2:
                        if i == 0:
                            conv_name = "res" + str(block + 2) + "a"
                        else:
                            conv_name = "res" + str(block + 2) + "b" + str(i)
                    else:
                        conv_name = "res" + str(block + 2) + chr(97 + i)
                    conv = self.bottleneck_block(
                        input=conv,
                        num_filters=num_filters[block],
                        stride=2 if i == 0 and block != 0 else 1, name=conv_name)

            pool = fluid.layers.pool2d(
                input=conv, pool_size=7, pool_type='avg', global_pooling=True)
            stdv = 1.0 / math.sqrt(pool.shape[1] * 1.0)
            out = fluid.layers.fc(input=pool,
                                  size=class_dim,
                                  param_attr=fluid.param_attr.ParamAttr(
                                      initializer=fluid.initializer.Uniform(-stdv, stdv)))
        else:
            for block in range(len(depth)):
                for i in range(depth[block]):
                    conv_name = "res" + str(block + 2) + chr(97 + i)
                    conv = self.basic_block(
                        input=conv,
                        num_filters=num_filters[block],
                        stride=2 if i == 0 and block != 0 else 1,
                        is_first=block == i == 0,
                        name=conv_name)

            pool = fluid.layers.pool2d(
                input=conv, pool_size=7, pool_type='avg', global_pooling=True)
            stdv = 1.0 / math.sqrt(pool.shape[1] * 1.0)
            out = fluid.layers.fc(input=pool,
                                  size=class_dim,
                                  param_attr=fluid.param_attr.ParamAttr(
                                      initializer=fluid.initializer.Uniform(-stdv, stdv)))

        #self._init_params(out)

        return out

    def conv_bn_layer(self,
                      input,
                      num_filters,
                      filter_size,
                      stride=1,
                      groups=1,
                      act=None,
                      name=None):
        conv = fluid.layers.conv2d(
            input=input,
            num_filters=num_filters,
            filter_size=filter_size,
            stride=stride,
            padding=(filter_size - 1) // 2,
            groups=groups,
            act=None,
            param_attr=ParamAttr(name=name + "_weights"),
            bias_attr=False,
            name=name + '.conv2d.output.1')

        if name == "conv1":
            bn_name = "bn_" + name
        else:
            bn_name = "bn" + name[3:]
        return fluid.layers.batch_norm(input=conv,
                                       act=act,
                                       name=bn_name + '.output.1',
                                       param_attr=ParamAttr(name=bn_name + '_scale'),
                                       bias_attr=ParamAttr(bn_name + '_offset'),
                                       moving_mean_name=bn_name + '_mean',
                                       moving_variance_name=bn_name + '_variance', )

    def shortcut(self, input, ch_out, stride, is_first, name):
        ch_in = input.shape[1]
        if ch_in != ch_out or stride != 1 or is_first == True:
            return self.conv_bn_layer(input, ch_out, 1, stride, name=name)
        else:
            return input

    def bottleneck_block(self, input, num_filters, stride, name):
        conv0 = self.conv_bn_layer(
            input=input, num_filters=num_filters, filter_size=1, act='relu', name=name + "_branch2a")
        conv1 = self.conv_bn_layer(
            input=conv0,
            num_filters=num_filters,
            filter_size=3,
            stride=stride,
            act='relu',
            name=name + "_branch2b")
        conv2 = self.conv_bn_layer(
            input=conv1, num_filters=num_filters * 4, filter_size=1, act=None, name=name + "_branch2c")

        short = self.shortcut(input, num_filters * 4, stride, is_first=False, name=name + "_branch1")

        return fluid.layers.elementwise_add(x=short, y=conv2, act='relu', name=name + ".add.output.5")

    def basic_block(self, input, num_filters, stride, is_first, name):
        conv0 = self.conv_bn_layer(input=input, num_filters=num_filters, filter_size=3, act='relu', stride=stride,
                                   name=name + "_branch2a")
        conv1 = self.conv_bn_layer(input=conv0, num_filters=num_filters, filter_size=3, act=None,
                                   name=name + "_branch2b")
        short = self.shortcut(input, num_filters, stride, is_first, name=name + "_branch1")
        return fluid.layers.elementwise_add(x=short, y=conv1, act='relu')



    #fc = fluid.layers.fc(
    #    input=queries, size=10,
    #    param_attr=fluid.initializer.Xavier(uniform=False))
    #def _init_params(self,net):
    #    for name, module in net.desc:
    #        if isinstance(module, fluid.layers.conv2d):
    #            module.param_attr = fluid.initializer.Xavier(uniform=False)
    #            if module.param_attr.bias is not None:
    #                fluid.initializer.ConstantInitializer(0)

def ResNet18():
    model = ResNet(layers=18)
    return model


def ResNet34():
    model = ResNet(layers=34)
    return model


def ResNet50():
    model = ResNet(layers=50)
    return model


def ResNet101():
    model = ResNet(layers=101)
    return model


def ResNet152():
    model = ResNet(layers=152)
    return model
# -*-coding:utf-8-*-
from __future__ import print_function

import os
import argparse
from PIL import Image
import numpy
import paddle
import paddle.fluid as fluid

from visualdl import LogWriter
# 创建一个LogWriter,第一个参数是指定存放数据的路径,
# 第二个参数是指定多少次写操作执行一次内存到磁盘的数据持久化
logw = LogWriter("./paddle_log", sync_cycle=10000)
with logw.mode('train') as logger:
    trainTag = logger.scalar("损失指标")#训练损失的标签 对应效果图的“train”
with logw.mode('test') as logger:
    testTag = logger.scalar("损失指标")#测试损失的标签 对应效果图的“test”


import sys
class Logger(object):
    def __init__(self, filename="Default.log"):
        self.terminal = sys.stdout
        self.log = open(filename, "a")

    def write(self, message):
        self.terminal.write(message)
        self.log.write(message)

    def flush(self):
        pass


def parse_args():
    parser = argparse.ArgumentParser("mnist")
    parser.add_argument(
        '--enable_ce',
        action='store_true',
        help="If set, run the task with continuous evaluation logs.")
    parser.add_argument(
        '--use_gpu',
        type=bool,
        default=True,
        help="Whether to use GPU or not.")
    parser.add_argument(
        '--num_epochs', type=int, default=100, help="number of epochs.")
    args = parser.parse_args()
    return args


def loss_net(prediction, label):
    #交叉熵损失函数
    loss = fluid.layers.cross_entropy(input=prediction, label=label)
    avg_loss = fluid.layers.mean(loss)
    acc = fluid.layers.accuracy(input=prediction, label=label)
    return prediction, avg_loss, acc


def multilayer_perceptron(img, label):
    img = fluid.layers.fc(input=img, size=200, act='tanh')
    hidden = fluid.layers.fc(input=img, size=200, act='tanh')
    prediction = fluid.layers.fc(input=hidden, size=10, act='softmax')
    return loss_net(prediction, label)


def softmax_regression(img, label):
    prediction = fluid.layers.fc(input=img, size=10, act='softmax')
    return loss_net(prediction, label)


def resnet_pre_train_10class(img, label):
    import resnet
    prediction = resnet.ResNet18().net(input=img, class_dim=10)
    #resnet50中的最后fc层没有使用激活函数softmax,应该加上然后使用交叉熵损失函数
    prediction_softmax = fluid.layers.softmax(input=prediction, use_cudnn=True)
    return loss_net(prediction_softmax, label)



#-------训练期间通过调用一个handler函数来监控训练进度---------
from paddle.utils.plot import Ploter
train_prompt = "Train cost"
test_prompt = "Test cost"
cost_ploter = Ploter(train_prompt, test_prompt)
# 将训练过程绘图表示
def event_handler_plot(ploter_title, step, cost):
    cost_ploter.append(ploter_title, step, cost)
    cost_ploter.plot("./plot/paddle_plot")
# 打印训练的中间结果,训练轮次,batch数,损失函数
def event_handler(pass_id, batch_id, cost):
    print("Pass %d, Batch %d, Cost %f" % (pass_id,batch_id, cost))


def train(nn_type,
          use_cuda,
          save_dirname=None,
          model_filename=None,
          params_filename=None):

    #不理解啥意思,因为函数返回为false所以一定会继续执行,可能判断是否paddle安装或者编译成功
    if use_cuda and not fluid.core.is_compiled_with_cuda():
        return

    #定义了创建模型参数,输入输出,以及模型中可学习参数的初始化等各种操作
    startup_program = fluid.default_startup_program()
    #定义了神经网络模型,前向反向计算,以及优化算法对网络中可学习参数的更新
    main_program = fluid.default_main_program()

    #是否固定random_seed,即是否保证模型可复现,通过打印出的acc,avg等验证
    if args.enable_ce:
        train_reader = paddle.batch(
            paddle.dataset.mnist.train(), batch_size=BATCH_SIZE)
        test_reader = paddle.batch(
            paddle.dataset.mnist.test(), batch_size=BATCH_SIZE)
        startup_program.random_seed = 90
        main_program.random_seed = 90
    #数据提取操作,提取-打乱-打包batch
    else:
        train_reader = paddle.batch(
            paddle.reader.shuffle(paddle.dataset.mnist.train(), buf_size=500),
            batch_size=BATCH_SIZE)
        test_reader = paddle.batch(
            paddle.dataset.mnist.test(), batch_size=BATCH_SIZE)

    #定义网络输入,image and label
    img = fluid.layers.data(name='img', shape=[1, 28, 28], dtype='float32')
    label = fluid.layers.data(name='label', shape=[1], dtype='int64')

    #选择网络主干
    if nn_type == 'softmax_regression':
        net_conf = softmax_regression
    elif nn_type == 'multilayer_perceptron':
        net_conf = multilayer_perceptron
    else:
        net_conf = resnet_pre_train_10class

    #网络组合,输入(image+label)+主干+loss(loss类型,以及输出的loss格式-平均avg和准确率)
    prediction, avg_loss, acc = net_conf(img, label)

    #程序克隆-#定义了神经网络模型,前向反向计算,以及优化算法对网络中可学习参数的更新
    test_program = main_program.clone(for_test=True)
    #选择优化器
    optimizer = fluid.optimizer.Adam(learning_rate=0.001)
    #选择优化目标-avg_loss
    optimizer.minimize(avg_loss)

    #提前定义好测试函数 在训练时候使用
    def train_test(train_test_program, train_test_feed, train_test_reader):

        #test_program通过main_program进行克隆,防止混淆,因为主程序中有梯度等参数
        acc_set = []
        avg_loss_set = []
        for test_data in train_test_reader():
            acc_np, avg_loss_np = exe.run(
                program=train_test_program,
                feed=train_test_feed.feed(test_data),
                fetch_list=[acc, avg_loss])
            acc_set.append(float(acc_np))
            avg_loss_set.append(float(avg_loss_np))
        # get test acc and loss
        acc_val_mean = numpy.array(acc_set).mean()
        avg_loss_val_mean = numpy.array(avg_loss_set).mean()
        #返回全部测试集的平均loss和平均准确率
        return avg_loss_val_mean, acc_val_mean

    #place属性由用户定义,代表程序将在哪里执行
    place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
    #创建一个Executor执行器
    exe = fluid.Executor(place)

    #定义DataFeeder数据读取器,网络读取方式,指定网络的输入(网络通过graph搭建,需要指定输入)
    feeder = fluid.DataFeeder(feed_list=[img, label], place=place)

    #这里需要注意,在初始化前,把loss,优化器,执行器等都要定义好
    #正式进行网络训练前,需先执行参数初始化。其中 defalut_startup_program 中定义了创建模型参数,输入输出,以及模型中可学习参数的初始化等各种操作。
    exe.run(startup_program)
    #训练epochs次数
    epochs = [epoch_id for epoch_id in range(PASS_NUM)]

    #list用于保存 epoch_id, avg_loss_val, acc_val
    lists = []
    step = 0
    #epochs循环操作
    for epoch_id in epochs:
        # train_reader数据输入
        for step_id, data in enumerate(train_reader()):
            #由于传入数据与传出数据存在多列,因此 fluid 通过 feed 映射定义数据的传输数据,
            #通过 fetch_list 取出期望结果:
            metrics = exe.run(
                main_program,
                feed=feeder.feed(data),
                fetch_list=[avg_loss, acc])
            #若batch提取次数为100,画出loss曲线,打印出avg_loss等参数
            if step % 100 == 0:
                event_handler_plot(train_prompt, step, metrics[0])
                print("Pass %d, Epoch %d, Cost %f" % (step, epoch_id, metrics[0]))
            step += 1

        #paddle-visualdl #输入数据
        trainTag.add_record(step, metrics[0])

        #每迭代一次,进行一次测试工作
        # test for epoch
        avg_loss_val, acc_val = train_test(
            train_test_program=test_program,
            train_test_reader=test_reader,
            train_test_feed=feeder)
        #打印epoch测试结果
        print("Test with Epoch %d, avg_cost: %s, acc: %s" % (epoch_id, avg_loss_val, acc_val))
        #输出loss图-avg_loss
        event_handler_plot(test_prompt, step, avg_loss_val)

        #paddle-visualdl #输入数据
        testTag.add_record(step, avg_loss_val)

        #保存每次测试结果,方便后续找到最优测试结果
        lists.append((epoch_id, avg_loss_val, acc_val))
        #如果给定路径,每次epoch都保存模型且覆盖之前的模型
        #保存模型时候应该指定输入input--["img"]
        #["img"]--预测(inference)需要 feed 的数据
        #[prediction]--保存预测(inference)结果的 Variables
        #exe--executor 保存 inference model
        if save_dirname is not None:
            fluid.io.save_inference_model(
                save_dirname, ["img"], [prediction],
                exe,
                model_filename=model_filename,
                params_filename=params_filename)

    #epochs迭代完毕
    #模型可复现,打印出训练的avg_loss,测试的avg_loss_val和acc_val
    if args.enable_ce:
        print("kpis\ttrain_cost\t%f" % metrics[0])
        print("kpis\ttest_cost\t%s" % avg_loss_val)
        print("kpis\ttest_acc\t%s" % acc_val)

    #找到最优的测试结果,打印出最优结果 epoch_id  avg_loss_val
    # find the best pass
    best = sorted(lists, key=lambda list: float(list[1]))[0]
    print('Best pass is %s, testing Avgcost is %s' % (best[0], best[1]))
    #打印分类的最优准确率
    print('The classification accuracy is %.2f%%' % (float(best[2]) * 100))


def infer(use_cuda,
          save_dirname=None,
          model_filename=None,
          params_filename=None):

    #网络是否保存了模型
    if save_dirname is None:
        return

    #place属性由用户定义,代表程序将在哪里执行 cpu or gpu
    place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
    #定义执行器
    exe = fluid.Executor(place)

    #定义图像载入函数 通过PIL打开,resize到网络输入大小,进行归一化操作
    #定义N×C×H×W and float32
    #这些操作在mnist中已经定义,所以预测要一样
    def load_image(file):
        im = Image.open(file).convert('L')
        im = im.resize((28, 28), Image.ANTIALIAS)
        im = numpy.array(im).reshape(1, 1, 28, 28).astype(numpy.float32)
        im = im / 255.0 * 2.0 - 1.0
        return im

    #获取当前程序运行路径
    cur_dir = os.path.dirname(os.path.realpath(__file__))
    #载入测试图像
    tensor_img = load_image(cur_dir + '/image/mnist_train_20.png')

    inference_scope = fluid.core.Scope()
    with fluid.scope_guard(inference_scope):
        # Use fluid.io.load_inference_model to obtain the inference program desc,
        # the feed_target_names (the names of variables that will be feeded
        # data using feed operators), and the fetch_targets (variables that
        # we want to obtain data from using fetch operators).

        #载入模型
        #返回:(Program,feed_target_names, fetch_targets)
        #Program 是一个 Program ,它是预测 Program。
        #feed_target_names 是一个str列表,它包含需要在预测 Program 中提供数据的变量的名称。
        #fetch_targets 是一个 Variable 列表,从中我们可以得到推断结果。
        [inference_program, feed_target_names, fetch_targets] = fluid.io.load_inference_model(
             save_dirname, exe, model_filename, params_filename)

        # Construct feed as a dictionary of {feed_target_name: feed_target_data}
        # and results will contain a list of data corresponding to fetch_targets.

        #下列操作对应训练阶段的
        '''
        metrics = exe.run(
        main_program,
        feed=feeder.feed(data),
        fetch_list=[avg_loss, acc])
        '''
        results = exe.run(
            inference_program,
            feed={feed_target_names[0]: tensor_img},
            fetch_list=fetch_targets)
        #results=list[array([list[1,2,3,4,5,6,7,8,9,10]])]
        #按升序排列
        #所以lab[0][0][-1]
        lab = numpy.argsort(results)
        print("Inference result of image/mnist_train_20.png is: %d" % lab[0][0][-1])


def main(use_cuda, nn_type):
    # 保存参数,若为None,则默认为: __model__
    #model_filename = None
    model_filename = "paddle_model"
    #保存参数,若为None,则保存为分离状态的参数 batch_norm_0.b_0 conv2d_0.b_0 等等
    #params_filename = None
    params_filename = "paddle_params"
    #参数和模型的保存路径
    save_dirname = "recognize_digits_" + nn_type + ".inference.model"

    #保存所有print输出到log文件
    sys.stdout = Logger('paddle_log.txt')

    # call train() with is_local argument to run distributed train
    #训练程序和测试程序
    train(
        nn_type=nn_type,
        use_cuda=use_cuda,
        save_dirname=save_dirname,
        model_filename=model_filename,
        params_filename=params_filename)
    #使用保存的模型前向传播测试
    infer(
        use_cuda=use_cuda,
        save_dirname=save_dirname,
        model_filename=model_filename,
        params_filename=params_filename)


if __name__ == '__main__':
    #定义输入args参数
    args = parse_args()
    BATCH_SIZE = 64
    PASS_NUM = args.num_epochs
    use_cuda = args.use_gpu
    # predict = 'softmax_regression' # uncomment for Softmax
    # predict = 'multilayer_perceptron' # uncomment for MLP
    #选择使用的主干网络
    predict = 'convolutional_neural_network'  # uncomment for LeNet5
    main(use_cuda=use_cuda, nn_type=predict)