web-dev-qa-db-ja.com

tensorflow ValueError:機能は `Tensor`の辞書である必要があります。指定されたタイプ:<class'tensorflow.python.framework.ops.Tensor '>

これが私のコードです!私のテンソルフローのバージョンは1.6.0、pythonバージョンは3.6.4です。データセットを使用してcsvファイルを読み取るように指示すると、トレーニングできますが、間違いはありません。しかし、csvファイルをtfrecordsファイルに変換します。それは間違っています。私はインターネットでグーグルで検索し、ほとんどの人がテンソルフローを更新する必要があると言っていますが、私にはうまくいきません。

import tensorflow as tf

tf.logging.set_verbosity(tf.logging.INFO)

feature_names = [
    'SepalLength',
    'SepalWidth',
    'PetalLength',
    'PetalWidth'
]


def my_input_fn(is_shuffle=False, repeat_count=1):
    dataset = tf.data.TFRecordDataset(['csv.tfrecords'])  # filename is a list

    def parser(record):
        keys_to_features = {
            'label': tf.FixedLenFeature((), dtype=tf.int64),
            'features': tf.FixedLenFeature(shape=(4,), dtype=tf.float32),
        }
        parsed = tf.parse_single_example(record, keys_to_features)
        return parsed['features'], parsed['label']

    dataset = dataset.map(parser)
    if is_shuffle:
        # Randomizes input using a window of 256 elements (read into memory)
        dataset = dataset.shuffle(buffer_size=256)
    dataset = dataset.batch(32)
    dataset = dataset.repeat(repeat_count)
    iterator = dataset.make_one_shot_iterator()
    features, labels = iterator.get_next()
    return features, labels

feature_columns = [tf.feature_column.numeric_column(k) for k in feature_names]

classifier = tf.estimator.DNNClassifier(
    feature_columns=feature_columns,  # The input features to our model
    hidden_units=[10, 10],  # Two layers, each with 10 neurons
    n_classes=3,
    model_dir='iris_model_2')  # Path to where checkpoints etc are stored

classifier.train(input_fn=lambda: my_input_fn(is_shuffle=True, repeat_count=100))

そして、それはこの間違ったメッセージを返します!

INFO:tensorflow:Using default config.
INFO:tensorflow:Using config: {'_model_dir': 'iris_model_2', '_tf_random_seed': None, '_save_summary_steps': 100, '_save_checkpoints_steps': None, '_save_checkpoints_secs': 600, '_session_config': None, '_keep_checkpoint_max': 5, '_keep_checkpoint_every_n_hours': 10000, '_log_step_count_steps': 100, '_service': None, '_cluster_spec': <tensorflow.python.training.server_lib.ClusterSpec object at 0x1163d9f28>, '_task_type': 'worker', '_task_id': 0, '_global_id_in_cluster': 0, '_master': '', '_evaluation_master': '', '_is_chief': True, '_num_ps_replicas': 0, '_num_worker_replicas': 1}
INFO:tensorflow:Calling model_fn.
Traceback (most recent call last):
  File "/Users/huanghelin/Desktop/TFrecord/try2.py", line 45, in <module>
    classifier.train(input_fn=lambda: my_input_fn(is_shuffle=True, repeat_count=100))
  File "/Users/huanghelin/anaconda3/envs/tensorflow/lib/python3.6/site-packages/tensorflow/python/estimator/estimator.py", line 352, in train
    loss = self._train_model(input_fn, hooks, saving_listeners)
  File "/Users/huanghelin/anaconda3/envs/tensorflow/lib/python3.6/site-packages/tensorflow/python/estimator/estimator.py", line 812, in _train_model
    features, labels, model_fn_lib.ModeKeys.TRAIN, self.config)
  File "/Users/huanghelin/anaconda3/envs/tensorflow/lib/python3.6/site-packages/tensorflow/python/estimator/estimator.py", line 793, in _call_model_fn
    model_fn_results = self._model_fn(features=features, **kwargs)
  File "/Users/huanghelin/anaconda3/envs/tensorflow/lib/python3.6/site-packages/tensorflow/python/estimator/canned/dnn.py", line 354, in _model_fn
    config=config)
  File "/Users/huanghelin/anaconda3/envs/tensorflow/lib/python3.6/site-packages/tensorflow/python/estimator/canned/dnn.py", line 161, in _dnn_model_fn
    'Given type: {}'.format(type(features)))
ValueError: features should be a dictionary of `Tensor`s. Given type: <class 'tensorflow.python.framework.ops.Tensor'>
7
LIN

Google開発者ブログTensorFlow機能列の紹介

この記事はあなたに理解させることができます! def parser(record)3行を追加するだけです。以下のように:my_features = {} for idx, names in enumerate(feature_names): my_features[names] = parsed['features'][idx]

import tensorflow as tf

tf.logging.set_verbosity(tf.logging.INFO)

feature_names = [
    'SepalLength',
    'SepalWidth',
    'PetalLength',
    'PetalWidth'
]


def my_input_fn(is_shuffle=False, repeat_count=1):
    dataset = tf.data.TFRecordDataset(['csv.tfrecords'])  # filename is a list

    def parser(record):
        keys_to_features = {
            'label': tf.FixedLenFeature((), dtype=tf.int64),
            'features': tf.FixedLenFeature(shape=(4,), dtype=tf.float32),
        }
        parsed = tf.parse_single_example(record, keys_to_features)
        my_features = {}
        for idx, names in enumerate(feature_names):
            my_features[names] = parsed['features'][idx]
        return my_features, parsed['label']

    dataset = dataset.map(parser)
    if is_shuffle:
        # Randomizes input using a window of 256 elements (read into memory)
        dataset = dataset.shuffle(buffer_size=256)
    dataset = dataset.batch(32)
    dataset = dataset.repeat(repeat_count)
    iterator = dataset.make_one_shot_iterator()
    features, labels = iterator.get_next()
    return features, labels

feature_columns = [tf.feature_column.numeric_column(k) for k in feature_names]

classifier = tf.estimator.DNNClassifier(
    feature_columns=feature_columns,  # The input features to our model
    hidden_units=[10, 10],  # Two layers, each with 10 neurons
    n_classes=3,
    model_dir='lalalallal')  # Path to where checkpoints etc are stored

classifier.train(input_fn=lambda: my_input_fn(is_shuffle=True, repeat_count=100))
3
LIN

Tensorflowの例から

線形推定量へのリンク例

def make_input_fn(data_df, label_df, num_epochs=10, shuffle=True, batch_size=32):
  def input_function():
    ds = tf.data.Dataset.from_tensor_slices((dict(data_df), label_df))
    if shuffle:
      ds = ds.shuffle(1000)
    ds = ds.batch(batch_size).repeat(num_epochs)
    return ds
  return input_function
0
Abhishek kumar