cnn处理数据

记录一下这段时间学习用的代码 这段代码主要结构是来自网上 我对其进行了部分修改
这是cnn部分

import os
# os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from random import sample
# 创建计算图会话
sess = tf.Session()
# 设置模型参数

batch_size = 300  # 批量训练图像张数
initial_learning_rate = 0.003  # 学习率
global_step = tf.Variable(0, trainable=False);
learning_rate = tf.train.exponential_decay(initial_learning_rate,
                                           global_step=global_step,
                                           decay_steps=30, decay_rate=0.9)

evaluation_size = 300  # 测试图像张数
image_width = 23  # 图像的宽和高
image_height = 300
target_size = 3  # 图像的目标为0~9共10个目标
num_channels = 1  # 灰度图,颜色通道为1
generations = 2000  # 迭代1200次
evaluation_step = 10  # 每训练十次进行一次测试
conv1_features = 64  # 卷积层的特征个数filters
conv2_features = 128
max_pool_size1 = 8  # 池化层大小
max_pool_size2 = 8
fully_connected_size = 128  # 全连接层的神经元个数

# 声明占位符
x_input_shape = [batch_size, image_height, image_width, num_channels]
x_input = tf.placeholder(tf.float32, shape=x_input_shape)
y_target = tf.placeholder(tf.int32, shape=[batch_size])

evaluation_input_shape = [evaluation_size, image_height, image_width, num_channels]
evaluation_input = tf.placeholder(tf.float32, shape=evaluation_input_shape)
evaluation_target = tf.placeholder(tf.int32, shape=[evaluation_size])

# 声明卷积层的权重和偏置
# 卷积层1
# 采用滤波器为4X4滤波器,输入通道为1,输出通道为25
conv1_weight = tf.Variable(tf.truncated_normal([21,23,1,conv1_features], stddev=0.1, dtype=tf.float32))
conv1_bias = tf.Variable(tf.truncated_normal([conv1_features], stddev=0.1, dtype=tf.float32))

# 卷积层2
# 采用滤波器为4X4滤波器,输入通道为25,输出通道为50
conv2_weight = tf.Variable(tf.truncated_normal([21, 1, conv1_features, conv2_features], stddev=0.1, dtype=tf.float32))
conv2_bias = tf.Variable(tf.truncated_normal([conv2_features], stddev=0.1, dtype=tf.float32))

# 声明全连接层权重和偏置
# 卷积层过后图像的宽和高
conv_output_width = 1 # //表示整除
conv_output_height = 55

# 全连接层的输入大小
full1_input_size = conv_output_width * conv_output_height * conv2_features
full1_weight = tf.Variable(tf.truncated_normal([full1_input_size, fully_connected_size], stddev=0.1, dtype=tf.float32))
full1_bias = tf.Variable(tf.truncated_normal([fully_connected_size], stddev=0.1, dtype=tf.float32))
full2_weight = tf.Variable(tf.truncated_normal([fully_connected_size, target_size], stddev=0.1, dtype=tf.float32))
full2_bias = tf.Variable(tf.truncated_normal([target_size], stddev=0.1, dtype=tf.float32))

def my_conv_net(input_data):
    # 第一层:Conv-ReLU-MaxPool
    conv1 = tf.nn.conv2d(input_data, conv1_weight, strides=[1, 1, 1, 1], padding='VALID')
    relu1 = tf.nn.relu(tf.nn.bias_add(conv1, conv1_bias))
    print(conv1)
    max_pool1 = tf.nn.max_pool(relu1, ksize=[1,10, 1, 1],
                               strides=[1,2 , 1, 1], padding='VALID')
    print(max_pool1)
    # 第二层:Conv-ReLU-MaxPool
    conv2 = tf.nn.conv2d(max_pool1, conv2_weight, strides=[1, 1, 1, 1], padding='VALID')
    print(conv2)
    relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2_bias))
    max_pool2 = tf.nn.max_pool(relu2, ksize=[1, 8, 1, 1],
                               strides=[1, 2, 1, 1], padding='VALID')
    print(max_pool2)
    # 全连接层
    # 先将数据转化为1*N的形式
    # 获取数据大小
    conv_output_shape = max_pool2.get_shape().as_list()
    # 全连接层输入数据大小
    fully_input_size = conv_output_shape[1] * conv_output_shape[2] * conv_output_shape[3]  # 这三个shape就是图像的宽高和通道数
    full1_input_data = tf.reshape(max_pool2,
                                  [conv_output_shape[0], fully_input_size])  # 转化为batch_size*fully_input_size二维矩阵
    # 第一层全连接
    fully_connected1 = tf.nn.relu(tf.add(tf.matmul(full1_input_data, full1_weight), full1_bias))
    # 第二层全连接输出
    model_output = tf.nn.relu(
        tf.add(tf.matmul(fully_connected1, full2_weight), full2_bias))  # shape = [batch_size,target_size]
    return model_output

model_output = my_conv_net(x_input)
test_model_output = my_conv_net(evaluation_input)

# 损失函数
reg = 0.0001
# loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=model_output, labels=y_target))
# loss1 = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=test_model_output, labels=evaluation_target))
loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=model_output, labels=y_target))+tf.nn.l2_loss(conv1_weight)*reg+\
       tf.nn.l2_loss(conv2_weight)*reg+tf.nn.l2_loss(full1_weight)*reg+tf.nn.l2_loss(full2_weight)*reg
loss1 = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=test_model_output, labels=evaluation_target))+tf.nn.l2_loss(conv1_weight)*reg+\
       tf.nn.l2_loss(conv2_weight)*reg+tf.nn.l2_loss(full1_weight)*reg+tf.nn.l2_loss(full2_weight)*reg
# loss = tf.reduce_mean(tf.losses.softmax_cross_entropy(logits=model_output, onehot_labels=y_target))

# 预测与评估
prediction = tf.nn.softmax(model_output)
test_prediction = tf.nn.softmax(test_model_output)

def get_accuracy(logits, targets):
    batch_predictions = np.argmax(logits, axis=1)  # 返回每行最大的数所在位置
    print(batch_predictions)
    print(targets)
    num_correct = np.sum(np.equal(batch_predictions, targets))
    return 100 * num_correct / batch_predictions.shape[0]


def get_ABC_num(logits, targets):
    judegeA, judgeB, judgeC = {0: 0, 1: 0, 2: 0}, {0: 0, 1: 0, 2: 0}, {0: 0, 1: 0, 2: 0}
    numA,numB,numC = 0,0,0
    batch_predictions = np.argmax(logits, axis=1)  # 返回每行最大的数所在位置
    for i in range(len(batch_predictions)):
        if targets[i] == 0:
            numA+=1
            judegeA[batch_predictions[i]]+=1
        elif targets[i] == 1:
            numB += 1
            judgeB[batch_predictions[i]]+=1
        else:
            numC += 1
            judgeC[batch_predictions[i]]+=1
    print('==========对比============')
    print(judegeA, judgeB, judgeC)

# 创建优化器
opt = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
train_step = opt.minimize(loss)

# 初始化变量
init = tf.initialize_all_variables()
sess.run(init)

def mainCnn(train_List,train_judge,test_List,test_judge):
    train_xdata = np.array([np.reshape(x, [300, 23]) for x in train_List])
    test_xdata = np.array([np.reshape(x, [300, 23]) for x in test_List])

    train_labels = np.array(train_judge)
    test_labels = np.array(test_judge)
    # 开始训练
    train_loss = []
    test_loss = []
    test_loss2 = []
    train_acc = []
    test_acc = []
    test_acc2 = []
    Learning_rate_vec = []
    for i in range(generations):
        # rand_index = sample(np.random.permutation(np.arange(len(train_xdata))).tolist(),batch_size)
        # print(rand_index)
        rand_index = np.random.choice(len(train_xdata), size=batch_size)
        rand_x = train_xdata[rand_index]
        rand_x = np.expand_dims(rand_x, 3)
        rand_y = train_labels[rand_index]
        num0, num1, num2 = 0, 0, 0
        for k in range(len(rand_y)):
            if (rand_y[k] == 0):
                num0 += 1
            if rand_y[k] == 1:
                num1 += 1
            if rand_y[k] == 2:
                num2 += 1
        print(num0, num1, num2)
        Learning_rate_vec.append(sess.run(learning_rate, feed_dict={global_step: i}))
        train_dict = {x_input: rand_x, y_target: rand_y}

        sess.run(train_step, feed_dict={x_input: rand_x, y_target: rand_y, global_step: i})
        temp_train_loss = sess.run(loss, feed_dict=train_dict)
        temp_train_prediction = sess.run(prediction, feed_dict=train_dict)
        temp_train_acc = get_accuracy(temp_train_prediction, rand_y)
        print('第',i+1,'准确率为',temp_train_acc,'学习率',Learning_rate_vec)
        # 测试集
        if ((i + 1) % evaluation_step == 0) |(i == 0):
            eval_index = np.random.choice(len(test_xdata), size=evaluation_size)
            eval_x = test_xdata[eval_index]
            eval_x = np.expand_dims(eval_x, 3)
            eval_y = test_labels[eval_index]

            test_dict = {evaluation_input: eval_x, evaluation_target: eval_y}
            temp_test_preds,temp_test_loss = sess.run([test_prediction,loss1] ,feed_dict=test_dict)
            # temp_test_preds = sess.run(test_prediction, feed_dict=test_dict)


            temp_test_acc = get_accuracy(temp_test_preds, eval_y)
            get_ABC_num(temp_test_preds, eval_y)
            print('测试',temp_test_acc)
            test_acc.append(temp_test_acc)
            test_acc2.append(temp_test_acc)
            test_loss.append(temp_test_loss)
            test_loss2.append(temp_test_loss)
        else:
            test_acc2.append(0)
            test_loss.append(0)
        train_acc.append(temp_train_acc)
        train_loss.append(temp_train_loss)

    index = np.arange(start=0, stop=generations + 1, step=evaluation_step)
    # 画损失曲线
    fig = plt.figure()
    ax = fig.add_subplot(111)
    ax.plot(train_loss, 'k-',label='Train loss')
    ax.plot(index,test_loss2,'r--',label='Test loss')
    ax.set_xlabel('Generation')
    ax.set_ylabel('Softmax Loss')
    fig.suptitle('Softmax Loss per Generation')
    # 写入数据进list
    result1 = open('trainData.xls', 'w', encoding='gbk')
    for i in range(0, len(train_acc)):
        result1.write(str(train_acc[i]))
        result1.write('\n')
    result1.close()
    result2 = open('testData.xls', 'w', encoding='gbk')
    for i in range(0, len(test_acc2)):
        result2.write(str(test_acc2[i]))
        result2.write('\n')
    result2.close()
    result3 = open('losstrainData.xls', 'w', encoding='gbk')
    for i in range(0, len(train_loss)):
        result3.write(str(train_loss[i]))
        result3.write('\n')
    result3.close()
    result4 = open('losstestData.xls', 'w', encoding='gbk')
    for i in range(0, len(test_loss)):
        result4.write(str(test_loss[i]))
        result4.write('\n')
    result4.close()


    # 画准确度曲线

    fig2 = plt.figure()
    ax2 = fig2.add_subplot(111)
    #写入数据进list
    result = open('result.xls', 'w', encoding='gbk')
    print(train_acc)
    for i in range(0,len(train_acc)):
        result.write(str(train_acc[i]))
        result.write('\n')
    result.close()
    ax2.plot(train_acc, 'k-', label='Train Set Accuracy')
    ax2.plot(index, test_acc, 'r--', label='Test Set Accuracy')
    ax2.set_xlabel('Generation')
    ax2.set_ylabel('Accuracy')
    fig2.suptitle('Train and Test Set Accuracy')
    ax2.set_ylim(bottom=0.)

    # 画学习率
    fig4 = plt.figure()
    ax4 = fig4.add_subplot(111)
    ax4.plot(Learning_rate_vec, 'k-')
    ax4.set_xlabel('step')
    ax4.set_ylabel('Learning_rate')
    fig4.suptitle('Learning_rate')

    plt.show()

这是数据预处理部分

import pandas as pd
import numpy as np
from numpy import *
import random
from newCnnTensorflow import mainCnn
import operator

def splitFrame(data):
    num = data.index.values[0]
    split1 = []
    for i in range(len(data.index.values)):
        if i != len(data.index.values) - 1:
            if 1 + data.index.values[i] != data.index.values[i + 1]:
                split1.append([num,data.index.values[i]])
                num = data.index.values[i + 1]
                # print(split1)

        else:
            split1.append([num,data.index.values[i]])
            # print(data.loc[2404:2406])

    return split1


#传入start-end的数据 将其按照一定步长转化为300行一块
def dataDeal(data,start,end):
    print(data.loc[start,'judge'])
    #判断标签
    if data.loc[start,'judge']<200:
        num = 0
    elif 200<data.loc[start,'judge']<300:
        num = 1
    else:
        num = 2
    lenth = end-start+1
    x_data = data.drop(['judge'], axis=1)
    dataList,judge = [],[]
    for i in range(1+(lenth-300)//1):
        a= x_data.loc[start+i*1:start+i*1+300-1].values
        # judge.append(np.eye(3)[num])
        judge.append(num)
        midList = []
        for j in range(len(a)):
            for k in range(23):
                midList.append(a[j][k])
        dataList.append(midList)
    return dataList,judge

def fileDataDeal(data):
    a1 = data[data['judge'] == 101]
    b1 = data[data['judge'] == 201]
    c1 = data[data['judge'] == 301]
    a2 = data[data['judge'] == 102]
    b2 = data[data['judge'] == 202]
    c2 = data[data['judge'] == 302]
    # print(a1.shape)
    # print(b1.shape)
    # print(c1.shape)
    #处理训练数据
    dataSplit = []
    for i in [a1,b1,c1]:
        dataSplit.extend(splitFrame(i))
    trainList,trainJudge = [],[]
    for i in dataSplit:
        midList,midJudge = dataDeal(data,i[0],i[1])
        trainList.extend(midList)
        trainJudge.extend(midJudge)
    #处理测试数据
    testSplit = []
    for i in [a2,b2,c2]:
        testSplit.extend(splitFrame(i))
    testList,testJudge = [],[]
    for i in testSplit:
        midList,midJudge = dataDeal(data,i[0],i[1])
        testList.extend(midList)
        testJudge.extend(midJudge)
    # shuffle_ix = np.random.permutation(np.arange(len(trainJudge)))
    # trainList = (np.array(trainList)[shuffle_ix]).tolist()
    # trainJudge = (np.array(trainJudge)[shuffle_ix]).tolist()
    # shuffle_iy = np.random.permutation(np.arange(len(testJudge)))
    # testList = np.array(testList)[shuffle_iy]
    # testJudge = np.array(testJudge)
    num0,num1,num2 = 0,0,0
    for i in range(len(trainJudge)):
        if (trainJudge[i] == 0):
            num0 += 1
        if trainJudge[i] == 1:
            num1 += 1
        if trainJudge[i] == 2:
            num2 += 1
    print(num0,num1,num2)
    mainCnn(trainList,trainJudge,testList,testJudge)

if __name__ == '__main__':
    a = r'C:\Users\we2swing\Desktop\data.xls'
    b = r'C:\Users\we2swing\Desktop\new.xls'
    data = pd.read_excel(a)
    fileDataDeal(data)
最后编辑于
©著作权归作者所有,转载或内容合作请联系作者
  • 序言:七十年代末,一起剥皮案震惊了整个滨河市,随后出现的几起案子,更是在滨河造成了极大的恐慌,老刑警刘岩,带你破解...
    沈念sama阅读 159,117评论 4 362
  • 序言:滨河连续发生了三起死亡事件,死亡现场离奇诡异,居然都是意外死亡,警方通过查阅死者的电脑和手机,发现死者居然都...
    沈念sama阅读 67,328评论 1 293
  • 文/潘晓璐 我一进店门,熙熙楼的掌柜王于贵愁眉苦脸地迎上来,“玉大人,你说我怎么就摊上这事。” “怎么了?”我有些...
    开封第一讲书人阅读 108,839评论 0 243
  • 文/不坏的土叔 我叫张陵,是天一观的道长。 经常有香客问我,道长,这世上最难降的妖魔是什么? 我笑而不...
    开封第一讲书人阅读 44,007评论 0 206
  • 正文 为了忘掉前任,我火速办了婚礼,结果婚礼上,老公的妹妹穿的比我还像新娘。我一直安慰自己,他们只是感情好,可当我...
    茶点故事阅读 52,384评论 3 287
  • 文/花漫 我一把揭开白布。 她就那样静静地躺着,像睡着了一般。 火红的嫁衣衬着肌肤如雪。 梳的纹丝不乱的头发上,一...
    开封第一讲书人阅读 40,629评论 1 219
  • 那天,我揣着相机与录音,去河边找鬼。 笑死,一个胖子当着我的面吹牛,可吹牛的内容都是我干的。 我是一名探鬼主播,决...
    沈念sama阅读 31,880评论 2 313
  • 文/苍兰香墨 我猛地睁开眼,长吁一口气:“原来是场噩梦啊……” “哼!你这毒妇竟也来了?” 一声冷哼从身侧响起,我...
    开封第一讲书人阅读 30,593评论 0 198
  • 序言:老挝万荣一对情侣失踪,失踪者是张志新(化名)和其女友刘颖,没想到半个月后,有当地人在树林里发现了一具尸体,经...
    沈念sama阅读 34,313评论 1 243
  • 正文 独居荒郊野岭守林人离奇死亡,尸身上长有42处带血的脓包…… 初始之章·张勋 以下内容为张勋视角 年9月15日...
    茶点故事阅读 30,575评论 2 246
  • 正文 我和宋清朗相恋三年,在试婚纱的时候发现自己被绿了。 大学时的朋友给我发了我未婚夫和他白月光在一起吃饭的照片。...
    茶点故事阅读 32,066评论 1 260
  • 序言:一个原本活蹦乱跳的男人离奇死亡,死状恐怖,灵堂内的尸体忽然破棺而出,到底是诈尸还是另有隐情,我是刑警宁泽,带...
    沈念sama阅读 28,392评论 2 253
  • 正文 年R本政府宣布,位于F岛的核电站,受9级特大地震影响,放射性物质发生泄漏。R本人自食恶果不足惜,却给世界环境...
    茶点故事阅读 33,052评论 3 236
  • 文/蒙蒙 一、第九天 我趴在偏房一处隐蔽的房顶上张望。 院中可真热闹,春花似锦、人声如沸。这庄子的主人今日做“春日...
    开封第一讲书人阅读 26,082评论 0 8
  • 文/苍兰香墨 我抬头看了看天上的太阳。三九已至,却和暖如春,着一层夹袄步出监牢的瞬间,已是汗流浃背。 一阵脚步声响...
    开封第一讲书人阅读 26,844评论 0 195
  • 我被黑心中介骗来泰国打工, 没想到刚下飞机就差点儿被人妖公主榨干…… 1. 我叫王不留,地道东北人。 一个月前我还...
    沈念sama阅读 35,662评论 2 274
  • 正文 我出身青楼,却偏偏与公主长得像,于是被迫代替她去往敌国和亲。 传闻我的和亲对象是个残疾皇子,可洞房花烛夜当晚...
    茶点故事阅读 35,575评论 2 270

推荐阅读更多精彩内容