images = tf.constant(["train/img1.png", "train/img2.png", "train/img3.png", "train/img4.png", "train/img5.png", "train/img6.png"]) labels = tf.constant([0, 0, 0, 1, 1, 1]) def preprocess(image_path, label): image_data = tf.read_file(image_path) image = tf.image.decode_image(image_data, channels=3) return image, label def train_input_fn(): dataset = tf.data.Dataset.from_tensor_slices((images, labels)) dataset = dataset.map(preprocess) dataset = dataset.shuffle(6) dataset = dataset.repeat() dataset = dataset.batch(2) iter = dataset.make_one_shot_iterator() return iter.get_next()
def model_fn(features, labels, mode, params): hidden_activations = tf.layers.dense(features["inputs"], 256) logits = tf.layers.dense(hidden_activations, params["num_classes"]) predicted_classes = tf.argmax(logits, 1) # For PREDICT, the predicted classes and probabilities are needed. if mode == tf.estimator.ModeKeys.PREDICT: predictions = {"class_ids": predicted_classes[:, tf.newaxis], "probabilities": tf.nn.softmax(logits), "logits": logits} return EstimatorSpec(mode, predictions=predictions) # For TRAIN and EVAL, compute the loss. loss = sparse_softmax_cross_entropy(labels, logits) # For EVAL, compute the evaluation metrics. if mode == tf.estimator.ModeKeys.EVAL: accuracy = tf.metrics.accuracy(labels, predicted_classes) metrics = {"accuracy": accuracy} return EstimatorSpec(mode, loss=loss, eval_metric_ops=metrics) # For TRAIN, return also the optimizer. optimizer = tf.train.AdagradOptimizer(learning_rate=0.1) train_op = optimizer.minimize(loss) return EstimatorSpec(mode, loss=loss, train_op=train_op)