tensorflow 用于从图像回归的模型结构

idfiyjo8  于 8个月前  发布在  其他
关注(0)|答案(3)|浏览(62)

我试图建立一个用于分析棋盘游戏的tensorflow模型,所以我从一个更简单的2D数据集开始。我生成了1000张这样的黑色条纹图像:

我想这将是一个很好的练习,试图恢复平面的Angular 。我将这两个示例图像标记为210.474°和147.593°。
我得到的结果是可怕的。所有对测试数据的预测都大致为180°,大概接近标签的平均值。
有没有人给予我一些建议,告诉我如何改进我的模型架构或者改进我的结果?如果所有的输入数据都是布尔型像素,我是否需要将其规格化?
我这样创建模型:

def build_and_compile_model():
    num_channels = 200
    kernel_size = 3
    image_height = 64
    image_width = 64
    regularizer = regularizers.l2(0.0001)

    model = keras.Sequential(
        [layers.Conv2D(num_channels,
                       kernel_size,
                       padding='same',
                       activation='relu',
                       input_shape=(image_height, image_width, 1),
                       activity_regularizer=regularizer),
         layers.Dense(64, activation='relu'),
         layers.Dense(64, activation='relu'),
         layers.Dense(1)])

    model.compile(loss='mean_absolute_error',
                  optimizer=tf.keras.optimizers.Adam(0.001))
    return model

当我尝试拟合模型时,它会改进几个时期,然后稳定在一个高误差。

下面是完整的示例:

import math
import shutil
import typing
from datetime import datetime
from pathlib import Path

import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from PIL import Image, ImageDraw
import tensorflow as tf
from space_tracer import LivePillowImage
from tensorflow import keras
from tensorflow.python.keras import layers, regularizers

def build_and_compile_model():
    num_channels = 200
    kernel_size = 3
    image_height = 64
    image_width = 64
    regularizer = regularizers.l2(0.0001)

    model = keras.Sequential(
        [layers.Conv2D(num_channels,
                       kernel_size,
                       padding='same',
                       activation='relu',
                       input_shape=(image_height, image_width, 1),
                       activity_regularizer=regularizer),
         layers.Dense(64, activation='relu'),
         layers.Dense(64, activation='relu'),
         layers.Dense(1)])

    model.compile(loss='mean_absolute_error',
                  optimizer=tf.keras.optimizers.Adam(0.001))
    return model

def main():
    image_folder = Path(__file__).parent / 'circle_images'
    num_images = 1000
    image_data, label_data = read_input_data(num_images, image_folder)

    # Make NumPy printouts easier to read.
    np.set_printoptions(precision=3, suppress=True)

    image_count = image_data.shape[0]
    image_data = image_data.reshape(image_data.shape + (1, ))

    train_size = math.floor(image_count * 0.8)
    train_dataset = image_data[:train_size, :, :]
    test_dataset = image_data[train_size:, :, :]
    train_labels = label_data[:train_size]
    test_labels = label_data[train_size:]

    test_results = {}

    dnn_model = build_and_compile_model()

    print('training dataset:', train_dataset.shape)
    print('training labels:', train_labels.shape)

    start = datetime.now()
    history = dnn_model.fit(
        train_dataset,
        train_labels,
        validation_split=0.2,
        verbose=0, epochs=25)
    print('Trained for', datetime.now() - start)

    test_results['dnn_model'] = dnn_model.evaluate(test_dataset, test_labels, verbose=0)
    print(pd.DataFrame(test_results, index=['Mean absolute error [game value]']).T)

    test_predictions = dnn_model.predict(test_dataset).flatten()
    print(test_labels[:10])
    print(test_predictions[:10])

    plot_loss(history)

def create_images(num_images: int, image_folder: Path) -> None:
    print(f'Creating {num_images} images.')
    image_folder.mkdir()
    start_angles = np.random.random(num_images)
    start_angles *= 360
    rng = np.random.default_rng()
    rng.shuffle(start_angles)
    for i, start_angle in enumerate(start_angles):
        image_path = image_folder / f'image{i}.png'
        image = create_image(start_angle)
        image.save(image_path)
    label_text = '\n'.join(str(start_angle) for start_angle in start_angles)
    (image_folder / 'labels.csv').write_text(label_text)

def create_image(start_angle: float) -> Image.Image:
    image = Image.new('1', (64, 64))  # B&W 64x64
    drawing = ImageDraw.Draw(image)
    drawing.rectangle((0, 0, 64, 64), fill='white')
    drawing.pieslice(((0, 0), (63, 63)),
                     -start_angle,
                     -start_angle+180,
                     fill='black')
    return image

def read_input_data(num_images: int, image_folder: Path) -> typing.Tuple[
        np.ndarray,
        np.ndarray]:
    """ Read input data from the image folder.

    :returns: (images, labels)
    """
    labels = []
    if image_folder.exists():
        with (image_folder / 'labels.csv').open() as f:
            for line in f:
                labels.append(float(line))
    image_count = len(labels)
    if image_count != num_images:
        # Size has changed, so recreate the input data.
        shutil.rmtree(image_folder, ignore_errors=True)
        create_images(num_images, image_folder)
        return read_input_data(num_images, image_folder)
    label_data = np.array(labels)
    images = np.zeros((image_count, 64, 64))
    for i, image_path in enumerate(sorted(image_folder.glob('*.png'))):
        image = Image.open(image_path)
        bits = np.array(image)
        images[i, :, :] = bits
    return images, label_data

def plot_loss(history):
    plt.plot(history.history['loss'], label='loss')
    plt.plot(history.history['val_loss'], label='val_loss')
    plt.ylim(bottom=0)
    plt.xlabel('Epoch')
    plt.ylabel('Error [angle]')
    plt.legend()
    plt.grid(True)
    plt.show()

def demo():
    image = create_image(226.634)
    LivePillowImage(image).display()

if __name__ == '__main__':
    main()
elif __name__ == '__live_coding__':
    demo()

最后,我看到了这样的输出:

Trained for 0:00:09.155005
           Mean absolute error [game value]
dnn_model                         92.051697
7/7 [==============================] - 0s 4ms/step
[210.474 147.593 327.796 120.112 163.402 178.04  333.604 342.488 119.694
 240.8  ]
[177.15  181.242 181.242 181.242 181.242 181.242 181.242 181.242 181.242
 181.242]

你可以看到所有的预测都接近180°。

xiozqbni

xiozqbni1#

问题在于你处理数据的方式。一般来说,依赖ML模型的某些文件排序是一个非常不安全的想法。相反,将输入和相应的标签存储在某种数据库中的一个位置。

for i, image_path in enumerate(sorted(image_folder.glob('*.png'))):
        image = Image.open(image_path)
        bits = np.array(image)
        images[i, :, :] = bits

这个特定的循环是错误的,因为字符串排序数字排序不一样。因此,如果您对文件名进行排序,
image234.png <image3.png
因为这是字典式排序。
因此,你的整个数据已经完全打乱了标签,因此你的模型除了预测一个均值(你现在看到的)之外什么也不能学习。如果你只生成12张图像,你最终会得到这样的结果:

Image1  -> Label1
  Image10 -> Label2
  Image11 -> Label3
  Image12 -> Label4
  Image2  -> Label5
  Image3  -> Label6
  Image4  -> Labe7
  Image5  -> Label8
  Image6  -> Label9
  Image7  -> Label10
  Image8  -> Label11
  Image9  -> Label12

一种解决方法是将上面的循环改为

for i in range(len(label_data)):
        image_path = image_folder / f"image{i}.png" # some logic here to point into the correct file using i
        image = Image.open(image_path)
        bits = np.array(image)
        images[i, :, :] = bits

在修复它之后,你应该能够学习你的Map,即使是一个很小的MLP,你甚至不需要卷积。

model = keras.Sequential(
        [
            layers.Flatten(),
            layers.Dense(32),
            layers.Dense(1)])

300个纪元的训练

hts6caw3

hts6caw32#

多亏了lejlot's patient answer,我找到了我愚蠢的排序错误。然而,我的模型仍然不起作用。

model = keras.Sequential(
    [layers.Conv2D(num_channels,
                   kernel_size,
                   padding='same',
                   activation='relu',
                   input_shape=(image_height, image_width, 1),
                   activity_regularizer=regularizer),
     layers.Dense(64, activation='relu'),
     layers.Dense(64, activation='relu'),
     layers.Dense(1)])

他的模型要简单得多,但我想学习如何使用Conv2D层来解决更复杂的问题。在阅读了another tutorial关于使用它们进行分类的文章后,我尝试在Dense层之前添加一个Flatten层。

model = keras.Sequential(
    [layers.Conv2D(num_channels,
                   kernel_size,
                   padding='same',
                   activation='relu',
                   input_shape=(image_height, image_width, 1),
                   activity_regularizer=regularizer),
     layers.Flatten(),
     layers.Dense(64, activation='relu'),
     layers.Dense(64, activation='relu'),
     layers.Dense(1)])

这非常有效,可以在25个时期内拟合数据。

Trained for 0:00:09.873203
           Mean absolute error [angle]
dnn_model                     4.833055
7/7 [==============================] - 0s 3ms/step
[210.474 147.593 327.796 120.112 163.402 178.04  333.604 342.488 119.694
 240.8  ]
[206.194 147.967 317.917 120.808 161.737 177.634 327.112 342.938 120.33
 231.055]

lejlot推荐的模型要简单得多:

model = keras.Sequential(
    [
        layers.Flatten(),
        layers.Dense(32),
        layers.Dense(1)])

但它的表现并不好,即使在300个时期之后。

Trained for 0:00:10.439293
           Mean absolute error [angle]
dnn_model                     21.22843
7/7 [==============================] - 0s 833us/step
[210.474 147.593 327.796 120.112 163.402 178.04  333.604 342.488 119.694
 240.8  ]
[225.25  156.989 325.547 123.832 111.674  89.614 308.756 331.706 122.509
 237.6  ]
kr98yfug

kr98yfug3#

做一个更深层次的模型

model = keras.Sequential(
    [layers.Conv2D(32,
                   3,
                   padding='same',
                   activation='relu',
                   input_shape=(image_height, image_width, 1),
                   activity_regularizer=regularizer),
     layers.Conv2D(64,
                   3,
                   padding='same',
                   activation='relu',
                   input_shape=(image_height, image_width, 1),
                   activity_regularizer=regularizer),
     layers.Dense(128, activation='relu'),
     layers.Dense(64, activation='relu'),
     layers.Dense(1)])

相关问题