Module keras.distribute.distribute_strategy_test

Tests for tf.keras models using tf.distribute.Strategy.

Expand source code
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.keras models using tf.distribute.Strategy."""

import tensorflow.compat.v2 as tf

import os

from absl.testing import parameterized
import numpy as np

import keras
from tensorflow.python.distribute.cluster_resolver import SimpleClusterResolver
from keras import backend
from keras import testing_utils
from keras.distribute import distributed_training_utils
from keras.distribute import distributed_training_utils_v1
from keras.distribute import multi_worker_testing_utils
from keras.distribute import optimizer_combinations
from keras.distribute.strategy_combinations import all_strategies
from keras.distribute.strategy_combinations import multi_worker_mirrored_strategies
from keras.distribute.strategy_combinations import strategies_minus_default_minus_tpu
from keras.distribute.strategy_combinations import strategies_minus_tpu
from keras.distribute.strategy_combinations import tpu_strategies
from keras.engine import base_layer_utils
from keras.mixed_precision import policy
from keras.optimizer_v2 import gradient_descent as gradient_descent_keras
from keras.utils import losses_utils
from keras.utils import np_utils

_RANDOM_SEED = 1337
_TRAIN_SIZE = 200
_INPUT_SIZE = (10,)
_NUM_CLASS = 2

# Note: Please make sure the tests in this file are also covered in
# keras_backward_compat_test for features that are supported with both APIs.

# TODO(anjalisridhar): Add a decorator that will allow us to run these tests as
# part of the tf.keras unit tests suite.


def simple_sequential_model():
  model = keras.models.Sequential()
  model.add(keras.layers.Dense(16, activation='relu', input_shape=_INPUT_SIZE))
  model.add(keras.layers.Dropout(0.1))
  model.add(keras.layers.Dense(_NUM_CLASS, activation='softmax'))
  return model


def simple_subclassed_model(num_labels=_NUM_CLASS):

  class _SimpleMLP(keras.Model):

    def __init__(self, num_labels):
      super(_SimpleMLP, self).__init__()
      self.dense = keras.layers.Dense(num_labels)

    def call(self, inputs):
      return self.dense(inputs)

  return _SimpleMLP(num_labels)


def simple_multi_inputs_multi_outputs_model():
  input_a = keras.layers.Input(shape=(16,), name='input_a')
  input_b = keras.layers.Input(shape=(16,), name='input_b')

  merged = keras.layers.concatenate([input_a, input_b], name='merge')
  output_c = keras.layers.Dense(3, activation='softmax', name='dense_2')(merged)
  output_d = keras.layers.Dense(2, activation='softmax', name='dense_3')(merged)
  model = keras.models.Model(
      inputs=[input_a, input_b], outputs=[output_c, output_d])
  return model


def get_multi_inputs_multi_outputs_data():
  (a_train, c_train), (a_test, c_test) = testing_utils.get_test_data(
      train_samples=_TRAIN_SIZE,
      test_samples=50,
      input_shape=(16,),
      num_classes=3,
      random_seed=_RANDOM_SEED)
  (b_train, d_train), (b_test, d_test) = testing_utils.get_test_data(
      train_samples=_TRAIN_SIZE,
      test_samples=50,
      input_shape=(16,),
      num_classes=2,
      random_seed=_RANDOM_SEED)
  (m_train, _), (m_test, _) = testing_utils.get_test_data(
      train_samples=_TRAIN_SIZE,
      test_samples=50,
      input_shape=(8,),
      num_classes=2,
      random_seed=_RANDOM_SEED)

  c_train = np_utils.to_categorical(c_train)
  c_test = np_utils.to_categorical(c_test)
  d_train = np_utils.to_categorical(d_train)
  d_test = np_utils.to_categorical(d_test)

  train_data = {
      'input_a': a_train,
      'input_b': b_train,
      'input_m': m_train,
      'output_c': c_train,
      'output_d': d_train
  }
  test_data = {
      'input_a': a_test,
      'input_b': b_test,
      'input_m': m_test,
      'output_c': c_test,
      'output_d': d_test
  }

  return (train_data, test_data)


def batch_wrapper(dataset, batch_size, distribution, repeat=None):
  if repeat:
    dataset = dataset.repeat(repeat)
  # TPUs currently require fully defined input shapes, drop_remainder ensures
  # the input will have fully defined shapes.
  if backend.is_tpu_strategy(distribution):
    return dataset.batch(batch_size, drop_remainder=True)
  else:
    return dataset.batch(batch_size)


def get_model():
  x = keras.layers.Input(shape=(3,), name='input')
  y = keras.layers.Dense(4, name='dense')(x)
  model = keras.Model(x, y)
  return model


def get_sample_weights_model():
  x = keras.layers.Input(shape=(1,), name='input')
  y = keras.layers.Dense(
      1, kernel_initializer='ones', bias_initializer='zeros', name='dense')(
          x)
  model = keras.Model(x, y)
  return model


def get_dataset(distribution):
  inputs = np.zeros((10, 3), dtype=np.float32)
  targets = np.zeros((10, 4), dtype=np.float32)
  dataset = tf.data.Dataset.from_tensor_slices((inputs, targets))
  dataset = dataset.repeat(100)
  dataset = batch_wrapper(dataset, 10, distribution)
  return dataset


def get_predict_dataset(distribution):
  inputs = np.zeros((10, 3), dtype=np.float32)
  dataset = tf.data.Dataset.from_tensor_slices(inputs)
  dataset = dataset.repeat(100)
  dataset = batch_wrapper(dataset, 10, distribution)
  return dataset


def convert_numpy_to_dataset_with_unknown_cardinality(inputs, targets=None):
  if targets is not None:
    input_slices = (inputs, targets)
    dummy_op = (lambda inp, target: True)
  else:
    input_slices = inputs
    dummy_op = (lambda inp: True)

  original_dataset = (tf.data.Dataset.from_tensor_slices(input_slices))
  ds_with_unknown_cardinality = (
      original_dataset.filter(dummy_op).batch(10, drop_remainder=True))
  return ds_with_unknown_cardinality


def multi_input_output_model():
  a = keras.layers.Input(shape=(3,), name='input_a')
  b = keras.layers.Input(shape=(5,), name='input_b')
  # TODO(anjalisridhar): Change the output dimension of the second Dense layer
  # once the iterator output validation issue has been fixed.
  dense_1 = keras.layers.Dense(7, name='dense_1')
  dense_2 = keras.layers.Dense(7, name='dense_2')
  c = dense_1(a)
  d = dense_2(b)
  e = keras.layers.Dropout(0.5, name='dropout')(c)
  model = keras.models.Model([a, b], [d, e])
  return model


def strategy_minus_tpu_combinations():
  return tf.__internal__.test.combinations.combine(
      distribution=strategies_minus_tpu, mode=['graph', 'eager'])


def tpu_strategy_combinations():
  return tf.__internal__.test.combinations.combine(
      distribution=tpu_strategies, mode=['graph', 'eager'])


def tpu_strategy_combinations_graph_only():
  return tf.__internal__.test.combinations.combine(distribution=tpu_strategies, mode=['graph'])


def multi_worker_strategy_combinations_eager_only():
  return tf.__internal__.test.combinations.combine(
      distribution=multi_worker_mirrored_strategies, mode=['eager'])


def all_strategy_combinations():
  return strategy_minus_tpu_combinations() + tpu_strategy_combinations(
  ) + multi_worker_strategy_combinations_eager_only()


def all_strategy_minus_default_and_tpu_combinations():
  return tf.__internal__.test.combinations.combine(
      distribution=[
          tf.__internal__.distribute.combinations.one_device_strategy,
          tf.__internal__.distribute.combinations.one_device_strategy_gpu,
          tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu,
          tf.__internal__.distribute.combinations.mirrored_strategy_with_two_gpus,
      ],
      mode=['graph', 'eager'])


def all_strategy_combinations_minus_default():
  return (all_strategy_minus_default_and_tpu_combinations() +
          tpu_strategy_combinations() +
          multi_worker_strategy_combinations_eager_only())


def strategy_and_optimizer_combinations():
  non_tpu_strategies = tf.__internal__.test.combinations.times(
      strategy_minus_tpu_combinations(),
      tf.__internal__.test.combinations.combine(
          optimizer=[
              optimizer_combinations.adagrad_optimizer_v1_fn,
              optimizer_combinations.adam_optimizer_v1_fn,
              optimizer_combinations.gradient_descent_optimizer_v1_fn,
              optimizer_combinations.rmsprop_optimizer_v1_fn,
              optimizer_combinations.adadelta_optimizer_keras_v2_fn,
              optimizer_combinations.adagrad_optimizer_keras_v2_fn,
              optimizer_combinations.adam_optimizer_keras_v2_fn,
              optimizer_combinations.adamax_optimizer_keras_v2_fn,
              optimizer_combinations.gradient_descent_optimizer_keras_v2_fn,
              optimizer_combinations.nadam_optimizer_keras_v2_fn,
              optimizer_combinations.rmsprop_optimizer_keras_v2_fn,
              optimizer_combinations.ftrl_optimizer_keras_v2_fn
          ]))
  tpu_strategies_graph = tf.__internal__.test.combinations.combine(
      distribution=tpu_strategies,
      mode=['graph'],
      optimizer=[
          optimizer_combinations.adagrad_optimizer_v1_fn,
          optimizer_combinations.adam_optimizer_v1_fn,
          optimizer_combinations.gradient_descent_optimizer_v1_fn,
          optimizer_combinations.rmsprop_optimizer_v1_fn,
          optimizer_combinations.adagrad_optimizer_keras_v2_fn,
          optimizer_combinations.adam_optimizer_keras_v2_fn,
          optimizer_combinations.gradient_descent_optimizer_keras_v2_fn,
          optimizer_combinations.rmsprop_optimizer_keras_v2_fn
      ])
  tpu_strategies_eager = tf.__internal__.test.combinations.combine(
      distribution=tpu_strategies,
      mode=['eager'],
      optimizer=[
          optimizer_combinations.adagrad_optimizer_keras_v2_fn,
          optimizer_combinations.adam_optimizer_keras_v2_fn,
          optimizer_combinations.gradient_descent_optimizer_keras_v2_fn,
          optimizer_combinations.rmsprop_optimizer_keras_v2_fn
      ])
  multi_worker_eager = tf.__internal__.test.combinations.combine(
      distribution=multi_worker_mirrored_strategies,
      mode=['eager'],
      optimizer=[
          optimizer_combinations.adadelta_optimizer_keras_v2_fn,
          optimizer_combinations.adagrad_optimizer_keras_v2_fn,
          optimizer_combinations.adam_optimizer_keras_v2_fn,
          optimizer_combinations.adamax_optimizer_keras_v2_fn,
          optimizer_combinations.gradient_descent_optimizer_keras_v2_fn,
          optimizer_combinations.nadam_optimizer_keras_v2_fn,
          optimizer_combinations.rmsprop_optimizer_keras_v2_fn,
          optimizer_combinations.ftrl_optimizer_keras_v2_fn
      ])
  return (non_tpu_strategies + tpu_strategies_eager + tpu_strategies_graph +
          multi_worker_eager)


class BatchCountingCB(keras.callbacks.Callback):

  def __init__(self):
    super(BatchCountingCB, self).__init__()
    self.train_begin_batches = []
    self.train_end_batches = []
    self.test_begin_batches = []
    self.test_end_batches = []
    self.predict_begin_batches = []
    self.predict_end_batches = []

  def on_train_batch_begin(self, batch, logs=None):
    self.train_begin_batches.append(batch)

  def on_train_batch_end(self, batch, logs=None):
    self.train_end_batches.append(batch)

  def on_test_batch_begin(self, batch, logs=None):
    self.test_begin_batches.append(batch)

  def on_test_batch_end(self, batch, logs=None):
    self.test_end_batches.append(batch)

  def on_predict_batch_begin(self, batch, logs=None):
    self.predict_begin_batches.append(batch)

  def on_predict_batch_end(self, batch, logs=None):
    self.predict_end_batches.append(batch)


class TestDistributionStrategyWithNumpyArrays(tf.test.TestCase,
                                              parameterized.TestCase):

  @tf.__internal__.distribute.combinations.generate(all_strategy_combinations())
  def test_calculating_input_params_no_steps_no_batch_size(self, distribution):
    # Calculate the per_replica_batch_size scaling factor for strategies
    # that use per_core_batch_size
    replica_scale_factor = 1.0
    if not distributed_training_utils.global_batch_size_supported(distribution):
      replica_scale_factor = distribution.num_replicas_in_sync

    with self.cached_session():
      # Default global batch size 32 for input with 64 samples run in 2 steps
      steps, batch_size = distributed_training_utils_v1.get_input_params(
          distribution, 64, steps=None, batch_size=None)
      self.assertEqual(batch_size, 32 // replica_scale_factor)
      self.assertEqual(steps, 2)

      # Computed global batch size 20 is lower than 32 if we pass less samples.
      steps, batch_size = distributed_training_utils_v1.get_input_params(
          distribution, 20, steps=None, batch_size=None)
      self.assertEqual(batch_size, 20 // replica_scale_factor)
      self.assertEqual(steps, 1)

  @tf.__internal__.distribute.combinations.generate(all_strategy_combinations())
  def test_calculating_input_params_with_steps_no_batch_size(
      self, distribution):
    # Calculate the per_replica_batch_size scaling factor for strategies
    # that use per_core_batch_size
    replica_scale_factor = 1.0
    if not distributed_training_utils.global_batch_size_supported(distribution):
      replica_scale_factor = distribution.num_replicas_in_sync

    with self.cached_session():
      # Computed global batch size is correct for number of specified 1 step
      steps, batch_size = distributed_training_utils_v1.get_input_params(
          distribution, 64, steps=1, batch_size=None)
      self.assertEqual(batch_size, 64 // replica_scale_factor)
      self.assertEqual(steps, 1)

      # Computed global batch size is correct for number of specified 2 steps
      steps, batch_size = distributed_training_utils_v1.get_input_params(
          distribution, 64, steps=2, batch_size=None)
      self.assertEqual(batch_size, 32 // replica_scale_factor)
      self.assertEqual(steps, 2)

      # All samples can not be consumed in specified number of steps
      with self.assertRaisesRegex(ValueError, 'not divisible by steps'):
        distributed_training_utils_v1.get_input_params(
            distribution, 63, steps=2, batch_size=None)

      # This cases is different for different strategies due to the
      # difference in supported batch size being global or per-replica.
      if replica_scale_factor == 1:
        # Computed global batch size is correct even if not sharadable
        steps, batch_size = distributed_training_utils_v1.get_input_params(
            distribution, 63, steps=3, batch_size=None)
        self.assertEqual(batch_size, 21)
        self.assertEqual(steps, 3)
      else:
        # Computed global batch size can not be sharded across replicas
        with self.assertRaisesRegex(
            ValueError, 'could not be sharded evenly '
            'across the sync replicas'):
          distributed_training_utils_v1.get_input_params(
              distribution, 63, steps=1, batch_size=None)

  @tf.__internal__.distribute.combinations.generate(all_strategy_combinations())
  def test_calculating_input_params_no_steps_with_batch_size(
      self, distribution):
    # Calculate the per_replica_batch_size scaling factor for strategies
    # that use per_core_batch_size
    replica_scale_factor = 1.0
    if not distributed_training_utils.global_batch_size_supported(distribution):
      replica_scale_factor = distribution.num_replicas_in_sync

    with self.cached_session():
      # Computed steps is correct for specified batch size
      steps, batch_size = distributed_training_utils_v1.get_input_params(
          distribution, 64, steps=None, batch_size=16)
      self.assertEqual(batch_size, 16)
      self.assertEqual(steps, 4 // replica_scale_factor)

      # Computed steps is correct for specified batch size
      steps, batch_size = distributed_training_utils_v1.get_input_params(
          distribution, 64, steps=None, batch_size=32)
      self.assertEqual(batch_size, 32)
      self.assertEqual(steps, 2 // replica_scale_factor)

  @tf.__internal__.distribute.combinations.generate(all_strategy_combinations())
  def test_calculating_input_params_with_steps_with_batch_size(
      self, distribution):
    with self.cached_session():
      # No change to steps and batch size if both specified and feasible
      steps, batch_size = distributed_training_utils_v1.get_input_params(
          distribution, 64, steps=5, batch_size=3)
      self.assertEqual(batch_size, 3)
      self.assertEqual(steps, 5)

      # Number of samples is less than global batch size * steps
      with self.assertRaisesRegex(ValueError, 'less than samples required'):
        distributed_training_utils_v1.get_input_params(
            distribution, 64, steps=10, batch_size=13)

  @tf.__internal__.distribute.combinations.generate(all_strategy_combinations())
  def test_calling_model_with_numpy_arrays(self, distribution):
    with self.cached_session():
      with distribution.scope():
        optimizer_fn = gradient_descent_keras.SGD
        optimizer = optimizer_fn(0.001)
        model = get_model()
        loss = 'mse'
        metrics = ['mae']
        model.compile(
            optimizer,
            loss,
            metrics=metrics)

        inputs = np.zeros((64, 3), dtype=np.float32)
        targets = np.zeros((64, 4), dtype=np.float32)

        # Call fit with validation data
        model.fit(
            inputs,
            targets,
            epochs=1,
            batch_size=2,
            verbose=0,
            validation_data=(inputs, targets))

        # TODO(anjalisridhar): We need tests for when the batch size and steps
        # are smaller and results in a 0 batch_size and steps value.
        model.evaluate(inputs, targets)
        model.evaluate(inputs, targets, batch_size=8)

        model.predict(inputs)
        model.predict(inputs, batch_size=8)

  @tf.__internal__.distribute.combinations.generate(all_strategy_combinations())
  def test_calling_model_with_mixed_precision(self, distribution):
    if isinstance(distribution,
                  (tf.compat.v1.distribute.experimental.ParameterServerStrategy,
                   tf.distribute.experimental.ParameterServerStrategy,
                   tf.distribute.experimental.CentralStorageStrategy,
                   tf.compat.v1.distribute.experimental.CentralStorageStrategy)):
      self.skipTest('b/152097775')
    if backend.is_tpu_strategy(distribution):
      policy_name = 'mixed_bfloat16'
    else:
      policy_name = 'mixed_float16'
    with self.cached_session(), \
         distribution.scope(), \
         policy.policy_scope(policy_name):
      optimizer_fn = gradient_descent_keras.SGD
      optimizer = optimizer_fn(0.001)
      x = keras.layers.Input(shape=(3,), name='input')
      y = keras.layers.Dense(4, name='dense')(x)
      y = keras.layers.Activation('softmax', dtype='float32')(y)
      model = keras.Model(x, y)
      loss = 'mse'
      metrics = ['mae']
      model.compile(
          optimizer,
          loss,
          metrics=metrics)

      # We need to pass float32 since TPUs do not support float64, even though
      # these arrays will immediately be casted to bfloat16 on TPUs. We also
      # cannot pass bfloat16, as Numpy does not support it.
      inputs = np.zeros((64, 3), dtype='float32')
      targets = np.zeros((64, 4), dtype='float32')

      model.fit(
          inputs,
          targets,
          epochs=1,
          batch_size=2,
          verbose=0,
          validation_data=(inputs, targets))

      model.evaluate(inputs, targets)
      model.evaluate(inputs, targets, batch_size=8)

      model.predict(inputs)
      model.predict(inputs, batch_size=8)

  @tf.__internal__.distribute.combinations.generate(all_strategy_combinations())
  def test_operator_overload_mixed_precision(self, distribution):
    # Regression test that tests a fixed bug does not reoccur. Adding an
    # AutoCastVariable to a tensor on a TPU, where the variable was the LHS of
    # the '+' operator, used to cause the gradient w.r.t. the variable to be
    # None.
    if isinstance(distribution,
                  (tf.compat.v1.distribute.experimental.ParameterServerStrategy,
                   tf.distribute.experimental.ParameterServerStrategy,
                   tf.distribute.experimental.CentralStorageStrategy,
                   tf.compat.v1.distribute.experimental.CentralStorageStrategy)):
      self.skipTest('b/152097775')

    if backend.is_tpu_strategy(distribution):
      policy_name = 'mixed_bfloat16'
    else:
      policy_name = 'mixed_float16'

    class MyLayer(keras.layers.Layer):

      def build(self, _):
        self.v1 = self.add_weight('v', ())
        self.v2 = self.add_weight('v', ())

      def call(self, inp):
        inp += self.v1
        return self.v2 + inp

    with self.cached_session(), distribution.scope():
      layer = MyLayer(dtype=policy_name)
      def run_fn():
        x = np.array([1.])
        with tf.GradientTape() as tape:
          y = layer(x)
        grad_v1, grad_v2 = tape.gradient(y, [layer.v1, layer.v2])
        return grad_v1, grad_v2
      if tf.executing_eagerly():
        run_fn = tf.function(run_fn)

      grad_v1, grad_v2 = distribution.run(run_fn)
      self.assertIsNotNone(grad_v1)
      self.assertIsNotNone(grad_v2)

  @tf.__internal__.distribute.combinations.generate(
      tf.__internal__.test.combinations.combine(
          distribution=[tf.__internal__.distribute.combinations.one_device_strategy],
          mode=['graph', 'eager']))
  def test_optimizer_in_cross_replica_context_raises_error(self, distribution):

    with self.cached_session(), distribution.scope():
      model = keras.models.Sequential([keras.layers.Dense(1)])
      x = np.array([[1.]])
      with tf.GradientTape() as tape:
        y = model(x)
      gradients = tape.gradient(y, model.trainable_variables)
      optimizer = gradient_descent_keras.SGD()

      with self.assertRaisesRegex(RuntimeError,
                                  'cannot be called in cross-replica context'):
        optimizer.apply_gradients(zip(gradients, model.trainable_variables))

  @tf.__internal__.distribute.combinations.generate(all_strategy_combinations())
  def test_calling_model_with_nested_numpy_arrays(self, distribution):
    with self.cached_session():
      with distribution.scope():
        optimizer_fn = gradient_descent_keras.SGD
        optimizer = optimizer_fn(learning_rate=0.001)
        model = multi_input_output_model()
        loss = 'mse'
        model.compile(
            optimizer,
            loss)

      input_a_np = np.asarray(np.random.random((64, 3)), dtype=np.float32)
      input_b_np = np.asarray(np.random.random((64, 5)), dtype=np.float32)
      inputs = [input_a_np, input_b_np]

      output_d_np = np.asarray(np.random.random((64, 7)), dtype=np.float32)
      output_e_np = np.asarray(np.random.random((64, 7)), dtype=np.float32)
      targets = [output_d_np, output_e_np]

      # Call fit with validation data
      model.fit(inputs, targets, epochs=1, batch_size=8, verbose=0)

      # TODO(anjalisridhar): We need tests for when the batch size and steps are
      # smaller and results in a 0 batch_size and steps value.
      model.evaluate(inputs, targets)
      model.evaluate(inputs, targets, batch_size=8)

      model.predict(inputs)
      model.predict(inputs, batch_size=8)

  @tf.__internal__.distribute.combinations.generate(
      tf.__internal__.test.combinations.combine(
          distribution=strategies_minus_tpu, mode=['graph', 'eager']) +
      tf.__internal__.test.combinations.combine(
          distribution=multi_worker_mirrored_strategies, mode=['eager']))
  def test_numpy_with_sample_weights(self, distribution):
    with self.cached_session(), distribution.scope():
      model = get_sample_weights_model()
      optimizer = tf.compat.v1.train.RMSPropOptimizer(learning_rate=0.001)
      loss = 'mse'
      model.compile(
          optimizer,
          loss)

      inputs = np.array([[0], [1], [2], [3]], np.float32)
      targets = np.array([[2], [4], [6], [8]], np.float32)
      sample_weights = np.array([0.25, 0.5, 0.75, 1], np.float32)

      result = model.evaluate(
          inputs,
          targets,
          batch_size=2,
          sample_weight=sample_weights,
          verbose=1)
      # The per sample loss is multipled by the corresponding sample weight. The
      # average of these weighted losses is the return value of the `evaluate`
      # call. For example, in the test above the average weighted loss is
      # calculated in the following manner:
      # batch_1 = (((2-0)^2) * 0.25 + ((4-1)^2) * 0.5) / 2 = 5.5 / 2 = 2.75
      # batch_2 = (((6-2)^2 * 0.75) + ((8-3)^2 * 1)) / 2 = 37 / 2 = 18.5
      # final result = (batch_1 + batch_2) / 2 = 10.625.
      # The first time we divide by number of input samples and the second time
      # we divide by number of steps/batches that the loss is aggregated over.
      self.assertAllClose(result, 10.625)

      # We now test without passing sample_weights:
      # batch_1 = ((2-0)^2) + ((4-1)^2) / 2 = 13 / 2 = 6.5
      # batch_2 = ((6-2)^2) + ((8-3)^2) / 2 = 41 / 2 = 20.5
      # final result = (batch_1 + batch_2) / 2 =  27 / 2 = 13.5
      result = model.evaluate(inputs, targets, batch_size=2, verbose=1)
      self.assertAllClose(result, 13.5)

  @tf.__internal__.distribute.combinations.generate(all_strategy_combinations())
  def test_flatten_predict_outputs(self, distribution):
    with self.cached_session():
      with distribution.scope():
        model = multi_input_output_model()
        optimizer_fn = gradient_descent_keras.SGD
        optimizer = optimizer_fn(learning_rate=0.001)
        loss = 'mse'
        model.compile(
            optimizer,
            loss)

      # We take 6 input samples with each input having a dimension of 3 or 5.
      input_a_np = np.asarray(np.random.random((6, 3)), dtype=np.float32)
      input_b_np = np.asarray(np.random.random((6, 5)), dtype=np.float32)
      inputs = [input_a_np, input_b_np]

      outs = model.predict(inputs)
      # `predict` a list that is equal in length to the number of model outputs.
      # In this test our model has two outputs and each element of `outs`
      # corresponds to all the samples of one of the model outputs.
      self.assertLen(outs, 2)
      # Each of the output samples have a dimension of 7. We should process all
      # the available input samples(6).
      self.assertAllEqual([6, 7], outs[0].shape)
      self.assertAllEqual([6, 7], outs[1].shape)

  @tf.__internal__.distribute.combinations.generate(
      tf.__internal__.test.combinations.times(tpu_strategy_combinations_graph_only(),
                         tf.__internal__.test.combinations.combine(batch_size=[4, 6])))
  def test_evaluate_with_partial_batch(self, distribution, batch_size):
    with self.cached_session():
      optimizer = tf.compat.v1.train.GradientDescentOptimizer(0.001)
      loss = 'mse'
      metrics = ['mae', keras.metrics.CategoricalAccuracy()]

      with distribution.scope():
        model_with_ds_strategy = get_model()
        model_with_ds_strategy.compile(optimizer, loss, metrics=metrics)

      cpu_model = get_model()
      cpu_model.compile(optimizer, loss, metrics=metrics)

      x = np.random.random((10, 3)).astype('float32')
      y = np.random.random((10, 4)).astype('float32')

      # As sample size is 10, we batch by 4 so that the last batch is
      # a partial batch. Also `evaluate()` using numpy array as inputs without
      # distribution strategy uses entire sample as a single batch. As so,
      # we remove parameters `batch_size` and `steps`.
      cpu_model.set_weights(model_with_ds_strategy.get_weights())
      evaluate_ground_truth = cpu_model.evaluate(x, y)

      # We don't compare the loss as loss is currently not computed as metric
      # in Keras, the loss value is inaccurate for last partial batch due to
      # more weights for the last batch samples.
      steps = np.ceil(10.0 / batch_size)
      self.assertAllClose(
          model_with_ds_strategy.evaluate(
              x, y, batch_size=batch_size, steps=steps)[1:],
          evaluate_ground_truth[1:],
          atol=1e-5,
          rtol=1e-5)
      # Test that `steps` is inferred correctly when final partial batch exists.
      self.assertAllClose(
          model_with_ds_strategy.evaluate(x, y, batch_size=batch_size)[1:],
          evaluate_ground_truth[1:],
          atol=1e-5,
          rtol=1e-5)

  @tf.__internal__.distribute.combinations.generate(
      tf.__internal__.test.combinations.times(
          tpu_strategy_combinations_graph_only()))
  def test_predict_with_partial_batch(self, distribution):
    with self.cached_session():
      optimizer = tf.compat.v1.train.GradientDescentOptimizer(0.001)
      loss = 'mse'

      with distribution.scope():
        model_with_ds_strategy = get_model()
        model_with_ds_strategy.compile(
            optimizer,
            loss)

      cpu_model = get_model()
      cpu_model.compile(optimizer, loss)

      inputs = np.random.random((10, 3)).astype(np.float32)

      # As sample size is 10, we batch by 4 so that the last batch is
      # a partial batch. Also `predict()` using numpy array as inputs without
      # distribution strategy uses entire sample as a single batch. As so,
      # we remove parameters `batch_size` and `steps`.
      cpu_model.set_weights(model_with_ds_strategy.get_weights())
      predict_ground_truth = cpu_model.predict(inputs)
      self.assertAllClose(
          model_with_ds_strategy.predict(inputs, batch_size=4, steps=3),
          predict_ground_truth,
          atol=1e-5,
          rtol=1e-5)
      # Test that `steps` is inferred correctly when final partial batch exists.
      self.assertAllClose(
          model_with_ds_strategy.predict(inputs, batch_size=4),
          predict_ground_truth,
          atol=1e-5,
          rtol=1e-5)

  @tf.__internal__.distribute.combinations.generate(tpu_strategy_combinations_graph_only())
  def test_no_target_model(self, distribution):
    with self.cached_session():
      optimizer = tf.compat.v1.train.GradientDescentOptimizer(0.001)

      class MyLayer(keras.layers.Layer):

        def call(self, inputs, training=None):
          self.add_loss(tf.reduce_sum(inputs), inputs=True)
          return inputs

      with distribution.scope():
        model = keras.models.Sequential()
        model.add(
            keras.layers.Dense(16, activation='relu', input_shape=_INPUT_SIZE))
        model.add(MyLayer())
        model.add(keras.layers.Dense(_NUM_CLASS, activation='softmax'))

        model.compile(optimizer)
        inputs = np.zeros((20, 10), np.float32)

        model.fit(inputs, epochs=1, steps_per_epoch=2)
        model.predict(inputs, steps=1)
        model.evaluate(inputs, steps=1)

  @tf.__internal__.distribute.combinations.generate(
      tf.__internal__.test.combinations.times(
          tpu_strategy_combinations_graph_only()))
  def test_predict_multi_output_model_with_partial_batch(
      self, distribution):
    with self.cached_session():
      optimizer = tf.compat.v1.train.GradientDescentOptimizer(0.001)
      loss = 'mse'

      with distribution.scope():
        model_with_ds_strategy = simple_multi_inputs_multi_outputs_model()
        model_with_ds_strategy.compile(
            optimizer,
            loss)

      cpu_model = simple_multi_inputs_multi_outputs_model()
      cpu_model.compile(optimizer, loss)

      input_data, _ = get_multi_inputs_multi_outputs_data()
      input_dict = {
          'input_a': input_data['input_a'],
          'input_b': input_data['input_b'],
      }

      # As sample size is 200, we batch by 18 so that the last batch is
      # a partial batch. Also `fit()` using numpy array as inputs without
      # distribution strategy uses entire sample as a single batch. As so,
      # we remove parameters `batch_size` and `steps`.
      cpu_model.set_weights(model_with_ds_strategy.get_weights())
      self.assertAllClose(
          model_with_ds_strategy.predict(input_dict, batch_size=18, steps=12),
          cpu_model.predict(input_dict),
          atol=1e-4,
          rtol=1e-4)

  @tf.__internal__.distribute.combinations.generate(all_strategy_combinations())
  def test_gradients_are_none(self, distribution):

    if not tf.executing_eagerly():
      self.skipTest('None gradients are not supported in graph mode')

    class DenseWithExtraWeight(keras.layers.Dense):

      def build(self, input_shape):
        # Gradients w.r.t. extra_weights are None
        self.extra_weight_1 = self.add_weight('extra_weight_1', shape=(),
                                              initializer='ones')
        super(DenseWithExtraWeight, self).build(input_shape)
        self.extra_weight_2 = self.add_weight('extra_weight_2', shape=(),
                                              initializer='ones')

    with distribution.scope():
      model = keras.Sequential([DenseWithExtraWeight(4, input_shape=(4,))])
      model.compile('adam', 'mse')

    inputs = np.random.normal(size=(64, 4))
    targets = np.random.normal(size=(64, 4))
    old_kernel = model.get_weights()[1]
    model.fit(inputs, targets)
    new_kernel = model.get_weights()[1]
    self.assertNotAllEqual(old_kernel, new_kernel)


class TestDistributionStrategyWithDatasets(tf.test.TestCase,
                                           parameterized.TestCase):

  @tf.__internal__.distribute.combinations.generate(all_strategy_combinations())
  def test_calling_model_on_same_dataset(self, distribution):
    with self.cached_session():
      with distribution.scope():
        optimizer_fn = gradient_descent_keras.SGD
        optimizer = optimizer_fn(0.001)
        model = get_model()
        loss = 'mse'
        metrics = ['mae', keras.metrics.CategoricalAccuracy()]
        model.compile(
            optimizer,
            loss,
            metrics=metrics)

      dataset = get_dataset(distribution)

      # Call fit with validation data
      model.fit(
          dataset,
          epochs=1,
          steps_per_epoch=2,
          verbose=0,
          validation_data=dataset,
          validation_steps=2)
      model.fit(
          dataset,
          epochs=1,
          steps_per_epoch=2,
          verbose=0,
          validation_data=dataset,
          validation_steps=2)
      model.predict(get_predict_dataset(distribution), steps=2)

  @tf.__internal__.distribute.combinations.generate(all_strategy_combinations())
  def test_model_interleaved_eval_same_as_direct_eval(
      self, distribution):
    with self.cached_session():
      with distribution.scope():
        optimizer_fn = gradient_descent_keras.SGD
        user_controlled_model = get_model()
        user_controlled_model.compile(
            optimizer_fn(0.001),
            loss='mse',
            metrics=['mae', keras.metrics.CategoricalAccuracy()])

        interleaved_model = get_model()
        interleaved_model.set_weights(user_controlled_model.get_weights())
        interleaved_model.compile(
            optimizer_fn(0.001),
            loss='mse',
            metrics=['mae', keras.metrics.CategoricalAccuracy()])

      dataset = get_dataset(distribution)

      # Call fit with validation interleaved
      interleaved_output = interleaved_model.fit(
          dataset,
          epochs=2,
          steps_per_epoch=2,
          verbose=1,
          validation_data=dataset,
          validation_steps=2,
          shuffle=False)

      # Manually control the validation running after each epoch.
      user_controlled_output = []
      for _ in range(2):
        user_controlled_model.fit(
            dataset, epochs=1, steps_per_epoch=2, verbose=1, shuffle=False)
        user_controlled_output.append(
            user_controlled_model.evaluate(dataset, steps=2))

      self.assertEqual(interleaved_output.history['val_loss'],
                       [x[0] for x in user_controlled_output])
      val_mean_absolute_error = interleaved_output.history.get(
          'val_mean_absolute_error')
      if not val_mean_absolute_error:
        # The name of the metric changed in TF2.0
        val_mean_absolute_error = interleaved_output.history['val_mae']
      self.assertEqual(val_mean_absolute_error,
                       [x[1] for x in user_controlled_output])
      self.assertEqual(interleaved_output.history['val_categorical_accuracy'],
                       [x[2] for x in user_controlled_output])

  @tf.__internal__.distribute.combinations.generate(all_strategy_combinations())
  def test_fit_with_tuple_and_dict_dataset_inputs(self, distribution):
    with self.cached_session():
      with distribution.scope():
        optimizer_fn = gradient_descent_keras.SGD
        optimizer = optimizer_fn(learning_rate=0.001)
        model = multi_input_output_model()
        loss = 'mse'
        metrics = ['mae', keras.metrics.CategoricalAccuracy()]
        model.compile(
            optimizer,
            loss,
            metrics=metrics)

      input_a_np = np.random.random((10, 3)).astype('float32')
      input_b_np = np.random.random((10, 5)).astype('float32')
      output_d_np = np.random.random((10, 7)).astype('float32')
      output_e_np = np.random.random((10, 7)).astype('float32')

      # Test with tuples
      dataset_tuple = tf.data.Dataset.from_tensor_slices(
          ((input_a_np, input_b_np), (output_d_np, output_e_np)))
      dataset_tuple = dataset_tuple.repeat(100)
      dataset_tuple = dataset_tuple.batch(10)

      model.fit(dataset_tuple, epochs=1, steps_per_epoch=2, verbose=1)

      # Test with dict
      dataset_dict = tf.data.Dataset.from_tensor_slices(({
          'input_a': input_a_np,
          'input_b': input_b_np
      }, (output_d_np, output_e_np)))
      dataset_dict = dataset_dict.repeat(100)
      dataset_dict = dataset_dict.batch(10)

      model.fit(dataset_dict, epochs=1, steps_per_epoch=2, verbose=1)

  @tf.__internal__.distribute.combinations.generate(all_strategy_combinations())
  def test_fit_with_dictionary_in_the_dataset_b135161171(
      self, distribution):

    if backend.is_tpu_strategy(distribution):
      self.skipTest('b/142805125')

    def custom_loss(predict, label, weight):
      bce = keras.losses.binary_crossentropy(label, predict)
      return tf.reduce_mean(bce * weight)

    with self.cached_session():
      with distribution.scope():
        input_img = keras.layers.Input([64, 64, 3], name='img')
        input_lbl = keras.layers.Input([64, 64, 1], name='lbl')
        input_weight = keras.layers.Input([64, 64], name='weight')
        predict = keras.layers.Conv2D(2, [1, 1], padding='same')(input_img)
        loss_lambda = keras.layers.Lambda(
            lambda x: custom_loss(*x), name='my_loss')
        my_loss = loss_lambda([predict, input_lbl, input_weight])
        model = keras.models.Model(
            inputs=[input_img, input_lbl, input_weight],
            outputs=[predict, my_loss])
        model.add_loss(model.get_layer('my_loss').output)
        model.compile(
            optimizer='adam')

      if tf.executing_eagerly():

        def map_fn(img, lbl, weight):
          inputs = {'img': img, 'lbl': lbl, 'weight': weight}
          return (inputs,)
      else:

        def map_fn(img, lbl, weight):
          inputs = {'img': img, 'lbl': lbl, 'weight': weight}
          return inputs, {}

      fake_imgs = np.ones([50, 64, 64, 3], dtype=np.float32)
      fake_lbls = np.ones([50, 64, 64, 1], dtype=np.float32)
      fake_weights = np.ones([50, 64, 64], dtype=np.float32)

      data = tf.data.Dataset.from_tensor_slices(
          (fake_imgs, fake_lbls, fake_weights)).map(map_fn).batch(10)

      model.fit(data)

  @tf.__internal__.distribute.combinations.generate(all_strategy_combinations())
  def test_fit_eval_and_predict_methods_on_dataset_without_steps(
      self, distribution):
    with self.cached_session():
      with distribution.scope():
        optimizer_fn = gradient_descent_keras.SGD
        optimizer = optimizer_fn(0.001)
        model = get_model()
        loss = 'mse'
        metrics = ['mae', keras.metrics.CategoricalAccuracy()]
        model.compile(
            optimizer,
            loss,
            metrics=metrics)

      inputs = np.zeros((1000, 3), dtype=np.float32)
      targets = np.zeros((1000, 4), dtype=np.float32)
      # steps/steps_per_epoch are calculated when using numpy arrays as
      # input data.
      fit_with_numpy = model.fit(
          inputs, targets, epochs=1, batch_size=10).history
      eval_with_numpy = model.evaluate(inputs, targets, batch_size=10)
      predict_with_numpy = model.predict(inputs, batch_size=10)

      dataset = tf.data.Dataset.from_tensor_slices((inputs, targets))
      dataset = dataset.batch(10, drop_remainder=True)
      fit_with_ds = model.fit(dataset, epochs=1).history
      eval_with_ds = model.evaluate(dataset)
      predict_dataset = tf.data.Dataset.from_tensor_slices(inputs)
      predict_dataset = predict_dataset.batch(10, drop_remainder=True)
      predict_with_ds = model.predict(predict_dataset)
      self.assertAllClose(fit_with_numpy, fit_with_ds, atol=1e-4, rtol=1e-4)
      self.assertAllClose(eval_with_numpy, eval_with_ds, atol=1e-4, rtol=1e-4)
      self.assertAllClose(
          predict_with_numpy, predict_with_ds, atol=1e-4, rtol=1e-4)

  @tf.__internal__.distribute.combinations.generate(all_strategy_combinations())
  def test_predict_on_dataset_with_unknown_cardinality_without_steps(
      self, distribution, mode):

    if mode == 'graph' and backend.is_tpu_strategy(distribution):
      self.skipTest('partial batch not supported with TPU in graph mode.')

    with self.cached_session():
      with distribution.scope():
        optimizer_fn = gradient_descent_keras.SGD
        optimizer = optimizer_fn(0.001)
        model = get_model()
        loss = 'mse'
        metrics = ['mae', keras.metrics.CategoricalAccuracy()]
        model.compile(optimizer, loss, metrics=metrics)

      inputs = np.zeros((20, 3), dtype=np.float32)
      # steps/steps_per_epoch are calculated when using numpy arrays as
      # input data.
      predict_with_numpy = model.predict(inputs, batch_size=10)

      predict_dataset = convert_numpy_to_dataset_with_unknown_cardinality(
          inputs)

      self.assertEqual(
          keras.backend.get_value(tf.data.experimental.cardinality(predict_dataset)),
          tf.data.experimental.UNKNOWN_CARDINALITY)

      predict_with_ds = model.predict(predict_dataset)
      self.assertAllClose(
          predict_with_numpy, predict_with_ds, atol=1e-4, rtol=1e-4)

  @tf.__internal__.distribute.combinations.generate(all_strategy_combinations())
  def test_on_dataset_with_unknown_cardinality_without_steps(
      self, distribution, mode):
    # TODO(b/155867206): Investigate why this test occasionally segfaults on TPU
    # in eager mode.
    if mode == 'eager' and backend.is_tpu_strategy(distribution):
      self.skipTest('caused segfault with TPU in eager mode.')

    if mode == 'graph' and backend.is_tpu_strategy(distribution):
      self.skipTest('partial batch not supported with TPU in graph mode.')

    with self.cached_session():
      with distribution.scope():
        optimizer_fn = gradient_descent_keras.SGD
        optimizer = optimizer_fn(0.001)
        model = get_model()
        loss = 'mse'
        metrics = ['mae', keras.metrics.CategoricalAccuracy()]
        model.compile(
            optimizer,
            loss,
            metrics=metrics)

      inputs = np.zeros((100, 3), dtype=np.float32)
      targets = np.zeros((100, 4), dtype=np.float32)
      # steps/steps_per_epoch are calculated when using numpy arrays as
      # input data.
      fit_with_numpy = model.fit(
          inputs, targets, epochs=1, batch_size=10).history
      fit_with_numpy_multiple_epochs = model.fit(
          inputs, targets, epochs=2, batch_size=10).history
      eval_with_numpy = model.evaluate(inputs, targets, batch_size=10)
      predict_with_numpy = model.predict(inputs, batch_size=10)

      dataset = convert_numpy_to_dataset_with_unknown_cardinality(
          inputs, targets)
      predict_dataset = convert_numpy_to_dataset_with_unknown_cardinality(
          inputs)

      self.assertEqual(
          keras.backend.get_value(tf.data.experimental.cardinality(dataset)),
          tf.data.experimental.UNKNOWN_CARDINALITY)
      self.assertEqual(
          keras.backend.get_value(tf.data.experimental.cardinality(predict_dataset)),
          tf.data.experimental.UNKNOWN_CARDINALITY)

      eval_with_ds = model.evaluate(dataset)
      predict_with_ds = model.predict(predict_dataset)
      self.assertAllClose(eval_with_numpy, eval_with_ds, atol=1e-4, rtol=1e-4)
      self.assertAllClose(
          predict_with_numpy, predict_with_ds, atol=1e-4, rtol=1e-4)

      fit_with_ds = model.fit(dataset, epochs=1).history
      fit_with_ds_multiple_epochs = model.fit(dataset, epochs=2).history
      self.assertAllClose(fit_with_numpy, fit_with_ds, atol=1e-4, rtol=1e-4)
      self.assertAllClose(
          fit_with_numpy_multiple_epochs,
          fit_with_ds_multiple_epochs,
          atol=1e-4,
          rtol=1e-4)

  @tf.__internal__.distribute.combinations.generate(tpu_strategy_combinations_graph_only())
  def test_on_dataset_with_unknown_cardinality(self, distribution):
    with self.cached_session():
      with distribution.scope():
        model = get_model()
        loss = 'mse'
        metrics = ['mae', keras.metrics.CategoricalAccuracy()]
        model.compile(
            tf.compat.v1.train.GradientDescentOptimizer(0.001),
            loss,
            metrics=metrics)

      inputs = np.zeros((1000, 3), dtype=np.float32)
      targets = np.zeros((1000, 4), dtype=np.float32)
      # steps/steps_per_epoch are calculated when using numpy arrays as
      # input data.
      eval_with_numpy = model.evaluate(inputs, targets, batch_size=10)
      predict_with_numpy = model.predict(inputs, batch_size=10)

      dataset = convert_numpy_to_dataset_with_unknown_cardinality(
          inputs, targets)
      predict_dataset = convert_numpy_to_dataset_with_unknown_cardinality(
          inputs)

      self.assertEqual(
          keras.backend.get_value(tf.data.experimental.cardinality(dataset)),
          tf.data.experimental.UNKNOWN_CARDINALITY)
      self.assertEqual(
          keras.backend.get_value(tf.data.experimental.cardinality(predict_dataset)),
          tf.data.experimental.UNKNOWN_CARDINALITY)

      eval_with_ds = model.evaluate(dataset, steps=100)
      predict_with_ds = model.predict(predict_dataset, steps=100)
      self.assertAllClose(eval_with_numpy, eval_with_ds, atol=1e-4, rtol=1e-4)
      self.assertAllClose(
          predict_with_numpy, predict_with_ds, atol=1e-4, rtol=1e-4)

      with self.assertRaisesRegex(ValueError,
                                  'Number of steps could not be inferred'):
        model.fit(dataset, epochs=1)

  @tf.__internal__.distribute.combinations.generate(all_strategy_combinations())
  def test_fit_eval_and_predict_methods_on_dataset(
      self, distribution):
    with self.cached_session():
      with distribution.scope():
        optimizer_fn = gradient_descent_keras.SGD
        optimizer = optimizer_fn(0.001)
        model = get_model()
        loss = 'mse'
        metrics = ['mae', keras.metrics.CategoricalAccuracy()]
        model.compile(
            optimizer,
            loss,
            metrics=metrics)

      dataset = get_dataset(distribution)

      model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)
      model.evaluate(dataset, steps=2, verbose=1)
      model.predict(get_predict_dataset(distribution), steps=2)

  @tf.__internal__.distribute.combinations.generate(strategy_and_optimizer_combinations())
  def test_fit_eval_and_predict_with_optimizer(self, distribution, optimizer):
    with self.cached_session():

      with distribution.scope():

        model = get_model()
        loss = 'mse'
        model.compile(
            optimizer(),
            loss)

      dataset = get_dataset(distribution)

      model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)
      model.evaluate(dataset, steps=2, verbose=1)
      model.predict(get_predict_dataset(distribution), steps=2)

  @tf.__internal__.distribute.combinations.generate(
      tf.__internal__.test.combinations.combine(
          distribution=[
              tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu,
              tf.__internal__.distribute.combinations.one_device_strategy
          ],
          mode=['graph', 'eager']))
  def test_dataset_wrong_input_shape(self, distribution, mode):
    if mode == 'graph':
      self.skipTest(
          'TODO(b/120943676, b/120957836): Re-enable for graph once the '
          'validation code is restored.')
    with self.cached_session():
      with distribution.scope():
        optimizer_fn = gradient_descent_keras.SGD
        optimizer = optimizer_fn(learning_rate=0.001)
        model = get_model()
        loss = 'mse'
        model.compile(
            optimizer,
            loss)

      # Wrong input shape
      inputs = np.zeros((10, 5), dtype=np.float32)
      targets = np.zeros((10, 4), dtype=np.float32)
      dataset = tf.data.Dataset.from_tensor_slices((inputs, targets))
      dataset = dataset.repeat(100)
      dataset = dataset.batch(10)

      with self.assertRaisesRegex(ValueError, 'is incompatible with'):
        model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0)

  @tf.__internal__.distribute.combinations.generate(
      tf.__internal__.test.combinations.combine(
          distribution=[
              tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu
          ],
          mode=['graph', 'eager']))
  def test_dataset_external_batch_input_validation(
      self, distribution):
    with self.cached_session():
      with distribution.scope():
        optimizer_fn = gradient_descent_keras.SGD
        optimizer = optimizer_fn(learning_rate=0.001)
        model = get_model()
        loss = 'mse'
        model.compile(
            optimizer,
            loss)

      # Batching is done outside tf.data's `batch`
      inputs = np.zeros((100, 10, 3), dtype=np.float32)
      targets = np.zeros((100, 10, 4), dtype=np.float32)
      dataset = tf.data.Dataset.from_tensor_slices((inputs, targets))
      dataset = dataset.repeat(100)

      model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)

  @tf.__internal__.distribute.combinations.generate(
      tf.__internal__.test.combinations.combine(
          distribution=[
              tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu,
              tf.__internal__.distribute.combinations.mirrored_strategy_with_two_gpus
          ],
          mode=['graph', 'eager']))
  def test_learning_phase_value(self, distribution):
    # TODO(anjalisridhar): Modify this test to use Lambdas since we can compare
    # meaningful values. Currently we don't pass the learning phase if the
    # Lambda layer uses the learning phase.
    with self.cached_session():
      with distribution.scope():
        x = keras.layers.Input(shape=(1,), name='input')
        y = keras.layers.Dense(1, kernel_initializer='ones')(x)
        z = keras.layers.Dropout(0.9999)(y)
        model = keras.Model(x, z)
        initial_weights = model.get_weights()

        optimizer_fn = gradient_descent_keras.SGD
        optimizer = optimizer_fn(0.005)
        loss = 'mse'
        metrics = ['acc']
        model.compile(
            optimizer,
            loss,
            metrics=metrics)

      batch_size = 8
      if isinstance(distribution, (tf.distribute.MirroredStrategy,
                                   tf.compat.v1.distribute.MirroredStrategy)):
        # MirroredStrategy uses global batch size.
        batch_size = 8 * distribution.num_replicas_in_sync

      inputs = np.ones((10, 1), dtype=np.float32)
      targets = np.ones((10, 1), dtype=np.float32)
      dataset = tf.data.Dataset.from_tensor_slices((inputs, targets))
      dataset = dataset.repeat().batch(batch_size)
      hist = model.fit(dataset, epochs=1, steps_per_epoch=20, verbose=1)
      self.assertAlmostEqual(hist.history['acc'][0], 0, 0)

      with distribution.scope():
        model.set_weights(initial_weights)
      # TODO(psv/anjalisridhar): Enable these lines after we fix b/117431185.
      # evaluate_output = model.evaluate(dataset, steps=20)
      # self.assertAlmostEqual(evaluate_output[1], 1, 0)

      inputs = np.ones((10, 1), dtype=np.float32)
      predict_dataset = tf.data.Dataset.from_tensor_slices(inputs)

      predict_dataset = predict_dataset.repeat().batch(batch_size)
      output = model.predict(predict_dataset, steps=10)
      # `predict` runs for 10 steps
      ref_output = np.ones((160, 1), dtype=np.float32)
      self.assertArrayNear(output, ref_output, 1e-1)

  @tf.__internal__.distribute.combinations.generate(all_strategy_combinations())
  def testOptimizerWithCallbacks(self, distribution):
    with self.cached_session():
      with distribution.scope():
        model = get_model()
        optimizer = gradient_descent_keras.SGD(0.01)
        loss = 'mse'
        model.compile(
            optimizer,
            loss)

      dataset = get_dataset(distribution)

      def schedule(_):
        return 0.001

      model.fit(
          dataset,
          epochs=1,
          steps_per_epoch=2,
          verbose=0,
          callbacks=[keras.callbacks.LearningRateScheduler(schedule)])
      self.assertAllClose(0.001, keras.backend.get_value(model.optimizer.lr))

  @tf.__internal__.distribute.combinations.generate(
      tf.__internal__.test.combinations.times(tpu_strategy_combinations_graph_only(),
                         tf.__internal__.test.combinations.combine(batch_size=[4, 6])))
  def test_evaluate_with_dataset_with_partial_batch(self, distribution,
                                                    batch_size):
    with self.cached_session():
      optimizer = tf.compat.v1.train.GradientDescentOptimizer(0.001)
      loss = 'mse'
      metrics = ['mae', keras.metrics.CategoricalAccuracy()]

      with distribution.scope():
        model_with_ds_strategy = get_model()
        model_with_ds_strategy.compile(optimizer, loss, metrics=metrics)

      cpu_model = get_model()
      cpu_model.compile(optimizer, loss, metrics=metrics)

      x = np.random.random((10, 3)).astype('float32')
      y = np.random.random((10, 4)).astype('float32')
      dataset = tf.data.Dataset.from_tensor_slices((x, y))

      # As sample size is 10, we make the last batch a partial batch.
      cpu_model.set_weights(model_with_ds_strategy.get_weights())
      dataset_with_partial_batch = dataset.batch(batch_size)

      # We don't compare the loss as loss is currently not computed as metric
      # in Keras, the loss value is inaccurate for last partial batch due to
      # more weights for the last batch samples.
      steps = np.ceil(10.0 / batch_size)
      self.assertAllClose(
          model_with_ds_strategy.evaluate(
              dataset_with_partial_batch, steps=steps)[1:],
          cpu_model.evaluate(dataset_with_partial_batch, steps=steps)[1:],
          atol=1e-5,
          rtol=1e-5)
      self.assertAllClose(
          model_with_ds_strategy.evaluate(dataset_with_partial_batch)[1:],
          cpu_model.evaluate(dataset_with_partial_batch)[1:],
          atol=1e-5,
          rtol=1e-5)

  @tf.__internal__.distribute.combinations.generate(
      tf.__internal__.test.combinations.times(
          tpu_strategy_combinations_graph_only()))
  def test_predict_with_dataset_with_partial_batch(
      self, distribution):
    with self.cached_session():
      optimizer = tf.compat.v1.train.GradientDescentOptimizer(0.001)
      loss = 'mse'

      with distribution.scope():
        model_with_ds_strategy = get_model()
        model_with_ds_strategy.compile(
            optimizer,
            loss)

      cpu_model = get_model()
      cpu_model.compile(optimizer, loss)

      inputs = np.random.random((10, 3)).astype(np.float32)
      dataset = tf.data.Dataset.from_tensor_slices((inputs))

      # As sample size is 10, we batch by 4 so that the last batch is
      # a partial batch.
      dataset_with_partial_batch = dataset.batch(4)
      cpu_model.set_weights(model_with_ds_strategy.get_weights())

      self.assertAllClose(
          model_with_ds_strategy.predict(dataset_with_partial_batch, steps=3),
          cpu_model.predict(dataset_with_partial_batch, steps=3),
          atol=1e-5,
          rtol=1e-5)

  @tf.__internal__.distribute.combinations.generate(
      tf.__internal__.test.combinations.times(
          tpu_strategy_combinations_graph_only()))
  def test_predict_multi_output_model_with_dataset_with_partial_batch(
      self, distribution):
    with self.cached_session():
      optimizer = tf.compat.v1.train.GradientDescentOptimizer(0.001)
      loss = 'mse'

      with distribution.scope():
        model_with_ds_strategy = simple_multi_inputs_multi_outputs_model()
        model_with_ds_strategy.compile(
            optimizer,
            loss)

      cpu_model = simple_multi_inputs_multi_outputs_model()
      cpu_model.compile(optimizer, loss)

      input_data, _ = get_multi_inputs_multi_outputs_data()
      input_dict = {
          'input_a': input_data['input_a'],
          'input_b': input_data['input_b'],
      }

      dataset = tf.data.Dataset.from_tensor_slices(input_dict)

      # As sample size is 200, we batch by 18 using 12 steps per epoch so
      # that the last batch is a partial batch.
      dataset_with_partial_batch = dataset.batch(18)
      cpu_model.set_weights(model_with_ds_strategy.get_weights())

      self.assertAllClose(
          model_with_ds_strategy.predict(dataset_with_partial_batch, steps=12),
          cpu_model.predict(dataset_with_partial_batch, steps=12),
          atol=1e-4,
          rtol=1e-4)

  @tf.__internal__.distribute.combinations.generate(all_strategy_combinations_minus_default())
  def test_match_model_input_matches_with_dataset_tensors(self, distribution):

    def _create_model_input_output_tensors():
      input_a = keras.layers.Input(shape=(16,), name='z_input_sorted_last')
      input_b = keras.layers.Input(shape=(32,), name='a_input_sorted_first')
      intermediate_a = keras.layers.Dense(10)(input_a)
      intermediate_b = keras.layers.Dense(10)(input_b)
      merged = keras.layers.Add()([intermediate_a, intermediate_b])
      output = keras.layers.Dense(2)(merged)
      return input_a, input_b, output

    input_dict = {
        'z_input_sorted_last': np.random.rand(32, 16).astype(np.float32),
        'a_input_sorted_first': np.random.rand(32, 32).astype(np.float32)
    }
    target = np.ones((32, 2), dtype=np.float32)
    dataset = tf.data.Dataset.from_tensor_slices((input_dict, target))
    dataset = dataset.batch(4, drop_remainder=True)

    with self.cached_session():
      with distribution.scope():
        input_a, input_b, output = _create_model_input_output_tensors()
        # `input_a`, which has input name that comes last in alphanumeric
        # order, is the first input of the model input layers. If tensors
        # from `input_dict` is blindly flattened and passed to model
        # inputs incorrectly, this would result in `input_a` input layer
        # matching with tensor `a_input_sorted_first` and would result in
        # shape mismatch.
        model_with_array_input = keras.models.Model(
            inputs=[input_a, input_b], outputs=output)
        model_with_array_input.compile('sgd', 'mse')
        model_weights = model_with_array_input.get_weights()
        model_with_array_input_fit = model_with_array_input.fit(
            dataset, steps_per_epoch=1, epochs=1).history

        input_a, input_b, output = _create_model_input_output_tensors()
        model_with_dict_input = keras.models.Model(
            inputs={
                'z_input_sorted_last': input_a,
                'a_input_sorted_first': input_b,
            },
            outputs=output)
        model_with_dict_input.compile('sgd', 'mse')
        model_with_dict_input.set_weights(model_weights)
        model_with_dict_input_fit = model_with_dict_input.fit(
            dataset, steps_per_epoch=1, epochs=1).history
        self.assertAllClose(
            model_with_dict_input_fit,
            model_with_array_input_fit,
            atol=1e-4,
            rtol=1e-4)

  @tf.__internal__.distribute.combinations.generate(
      tf.__internal__.test.combinations.combine(
          distribution=strategies_minus_tpu, mode=['graph', 'eager']) +
      tf.__internal__.test.combinations.combine(
          distribution=multi_worker_mirrored_strategies, mode=['eager']))
  def test_dataset_with_sample_weights(self, distribution):
    with self.cached_session(), distribution.scope():
      model = get_sample_weights_model()
      optimizer = tf.compat.v1.train.RMSPropOptimizer(learning_rate=0.001)
      loss = 'mse'
      model.compile(
          optimizer,
          loss)

      inputs = np.array([[0], [1], [2], [3]], np.float32)
      targets = np.array([[2], [4], [6], [8]], np.float32)
      sample_weights = np.array([0.25, 0.5, 0.75, 1], np.float32)
      ds = tf.data.Dataset.from_tensor_slices(
          (inputs, targets, sample_weights)).batch(2)
      result = model.evaluate(ds, verbose=1)
      # The per sample loss is multipled by the corresponding sample weight. The
      # average of these weighted losses is the return value of the `evaluate`
      # call. For example, in the test above the average weighted loss is
      # calculated in the following manner:
      # batch_1 = (((2-0)^2) * 0.25 + ((4-1)^2) * 0.5) / 2 = 5.5 / 2 = 2.75
      # batch_2 = (((6-2)^2 * 0.75) + ((8-3)^2 * 1)) / 2 = 37 / 2 = 18.5
      # final result = (batch_1 + batch_2) / 2 = 10.625.
      # The first time we divide by number of input samples and the second time
      # we divide by number of steps/batches that the loss is aggregated over.
      self.assertAllClose(result, 10.625)

      # We now test without passing sample_weights:
      # batch_1 = ((2-0)^2) + ((4-1)^2) / 2 = 13 / 2 = 6.5
      # batch_2 = ((6-2)^2) + ((8-3)^2) / 2 = 41 / 2 = 20.5
      # final result = (batch_1 + batch_2) / 2 =  27 / 2 = 13.5
      ds = tf.data.Dataset.from_tensor_slices((inputs, targets)).batch(2)
      result = model.evaluate(ds, verbose=1)
      self.assertAllClose(result, 13.5)


class TestDistributionStrategyWithDatasetsFile(tf.test.TestCase,
                                               parameterized.TestCase):

  def setUp(self):
    super(TestDistributionStrategyWithDatasetsFile, self).setUp()
    self.input_file_name = os.path.join(self.get_temp_dir(), 'input.tfrecord')
    inputs = np.zeros((20, 3), dtype=np.float32)
    input_dataset = tf.data.Dataset.from_tensor_slices(inputs)
    input_dataset = input_dataset.map(tf.io.serialize_tensor)
    writer = tf.data.experimental.TFRecordWriter(self.input_file_name)
    writer.write(input_dataset)

  # TODO(wxinyi): add a multi-worker test for TPU
  @tf.__internal__.distribute.combinations.generate(multi_worker_strategy_combinations_eager_only())
  def test_predict_on_dataset_shard_options_file_multi_worker_mirrored(
      self, distribution, mode):
    # This test is to verify if we successfully switch auto_shard_policy of a
    # input dataset inside model.predict with MultiWorkerMirroredStrategy to
    # AutoShardPolicy.DATA. Since there is only one input file for multiple
    # workers, AutoShardPolicy.AUTO or AutoShardPolicy.FILE will lead to an
    # error. However, since we switch to AutoShardPolicy.DATA in model.predict,
    # no error is raised.
    del mode
    with distribution.scope():
      optimizer_fn = gradient_descent_keras.SGD
      optimizer = optimizer_fn(0.001)
      model = get_model()
      loss = 'mse'
      model.compile(optimizer, loss)

    dataset = tf.data.TFRecordDataset(self.input_file_name)
    dataset = dataset.map(lambda x: tf.io.parse_tensor(x, tf.float32))

    dummy_op = lambda inp: True

    dataset = dataset.filter(dummy_op).batch(8, drop_remainder=True)

    options = tf.data.Options()
    options.experimental_distribute.auto_shard_policy = \
        tf.data.experimental.AutoShardPolicy.FILE
    dataset = dataset.with_options(options)

    model.predict(dataset, steps=1)


class TestRegularizerLoss(tf.test.TestCase, parameterized.TestCase):

  class IdentityRegularizer(keras.regularizers.Regularizer):

    def __call__(self, x):
      return tf.identity(x)

  class AddLayer(keras.layers.Layer):

    def build(self, _):
      self.v = self.add_weight(
          'v', (),
          initializer='ones',
          regularizer=TestRegularizerLoss.IdentityRegularizer())

    def call(self, inputs):
      return inputs + self.v

  @staticmethod
  def loss_fn(_, y_pred):
    return tf.reduce_mean(y_pred)

  @tf.__internal__.distribute.combinations.generate(
      tf.__internal__.test.combinations.times(all_strategy_combinations_minus_default()))
  def test_regularizer_loss(self, distribution):
    batch_size = 2
    if not distributed_training_utils.global_batch_size_supported(distribution):
      batch_size //= distribution.num_replicas_in_sync

      # Given an input x, which is always 1, and variable v, this model computes
      # Loss=x+v+regularizer_loss, where regularizer_loss=v and the variable is
      # initialized to 1. Therefore, this model computes Loss=1+2v, and so the
      # gradient dLoss/dv = 2. This gradient of 2 is averaged over all examples
      # in a batch and then multiplied by the learning rate of 1. As a result,
      # the model update for one batch should subtract 2 from v, resulting in v
      # being -1. If the regularizer loss is not scaled correctly by number of
      # replicas, the variable value will be incorrect when number of replicas
      # >1. For e.g. it will be -2 if num replicas = 2.
    with distribution.scope():
      x = keras.layers.Input(shape=(1,), batch_size=batch_size)
      y = TestRegularizerLoss.AddLayer()(x)
      model = keras.models.Model(inputs=x, outputs=y)
      opt = gradient_descent_keras.SGD(1.)
      model.compile(
          opt,
          loss=TestRegularizerLoss.loss_fn)
      model.fit(
          x=np.array([[1.], [1.]], dtype=np.float32),
          y=np.array([[1.], [1.]], dtype=np.float32),
          batch_size=batch_size)
      v = model.get_weights()[0]
      self.assertEqual(-1.0, v)


@testing_utils.run_all_without_tensor_float_32(
    'Uses Dense layers, which call matmul')
class TestDistributionStrategyWithKerasModels(tf.test.TestCase,
                                              parameterized.TestCase):

  @tf.__internal__.distribute.combinations.generate(all_strategy_combinations())
  def test_distribution_strategy_on_sequential_model(
      self, distribution):
    with distribution.scope():
      optimizer_fn = gradient_descent_keras.SGD
      optimizer = optimizer_fn(learning_rate=0.001)
      model = simple_sequential_model()
      loss = 'mse'
      model.compile(
          optimizer,
          loss)

      inputs = np.zeros((20, 10), np.float32)
      targets = np.zeros((20, 2), np.float32)

    model.fit(inputs, targets, epochs=1, batch_size=10)
    model.predict(inputs, batch_size=10)
    model.evaluate(inputs, targets, batch_size=10)

  @tf.__internal__.distribute.combinations.generate(all_strategy_combinations())
  def test_distribution_strategy_on_functional_model(
      self, distribution):
    with distribution.scope():
      optimizer_fn = gradient_descent_keras.SGD
      optimizer = optimizer_fn(learning_rate=0.001)
      model = get_model()
      loss = 'mse'
      model.compile(
          optimizer,
          loss)

      inputs = np.zeros((64, 3), dtype=np.float32)
      targets = np.zeros((64, 4), dtype=np.float32)

    model.fit(inputs, targets, epochs=1)
    model.predict(inputs)
    model.evaluate(inputs, targets)

  @tf.__internal__.distribute.combinations.generate(
      tf.__internal__.test.combinations.combine(distribution=all_strategies, mode=['eager']))
  def test_distributed_dataset(self, distribution):
    with distribution.scope():

      class CBCounter(keras.callbacks.Callback):

        def __init__(self):
          self.epochs = 0
          self.train_batches = 0
          self.test_batches = 0

        def on_epoch_end(self, batch, logs=None):
          self.epochs += 1

        def on_train_batch_end(self, batch, logs=None):
          self.train_batches += 1

        def on_test_batch_end(self, batch, logs=None):
          self.test_batches += 1

      model = keras.Sequential([keras.layers.Dense(1)])
      model.compile('sgd', 'mse')
      cb_counter = CBCounter()

      x, y = np.ones((100, 10)), np.ones((100, 1))
      ds = tf.data.Dataset.from_tensor_slices((x, y))
      ds = ds.batch(10).repeat(2)
      ds = distribution.experimental_distribute_dataset(ds)

      val_ds = tf.data.Dataset.from_tensor_slices((x, y))
      val_ds = val_ds.batch(20)
      val_ds = distribution.experimental_distribute_dataset(val_ds)

      model.fit(
          ds,
          steps_per_epoch=10,
          validation_data=val_ds,
          validation_steps=5,
          epochs=2,
          callbacks=[cb_counter])

      self.assertEqual(cb_counter.train_batches, 20)
      self.assertEqual(cb_counter.test_batches, 10)
      self.assertEqual(cb_counter.epochs, 2)

      # Check for `steps_per_epoch`.
      if distribution.num_replicas_in_sync > 1:
        with self.assertRaisesRegex(ValueError,
                                    'distributed dataset, you must specify'):
          model.fit(ds, epochs=2)

  @tf.__internal__.distribute.combinations.generate(
      tf.__internal__.test.combinations.combine(distribution=all_strategies, mode=['eager']))
  def test_distributed_datasets_from_function(self, distribution):
    with distribution.scope():

      class CBCounter(keras.callbacks.Callback):

        def __init__(self):
          self.epochs = 0
          self.train_batches = 0
          self.test_batches = 0

        def on_epoch_end(self, batch, logs=None):
          self.epochs += 1

        def on_train_batch_end(self, batch, logs=None):
          self.train_batches += 1

        def on_test_batch_end(self, batch, logs=None):
          self.test_batches += 1

      model = keras.Sequential([keras.layers.Dense(1)])
      model.compile('sgd', 'mse')
      cb_counter = CBCounter()

      def make_dataset(_):
        x, y = np.ones((100, 10)), np.ones((100, 1))
        ds = tf.data.Dataset.from_tensor_slices((x, y))
        ds = ds.batch(5).repeat()
        return ds

      ds = distribution.distribute_datasets_from_function(make_dataset)
      val_ds = distribution.distribute_datasets_from_function(make_dataset)

      model.fit(
          ds,
          steps_per_epoch=10,
          validation_data=val_ds,
          validation_steps=5,
          epochs=2,
          callbacks=[cb_counter])

      self.assertEqual(cb_counter.train_batches, 20)
      self.assertEqual(cb_counter.test_batches, 10)
      self.assertEqual(cb_counter.epochs, 2)

      # Check for `steps_per_epoch`.
      if distribution.num_replicas_in_sync > 1:
        with self.assertRaisesRegex(ValueError,
                                    'distributed dataset, you must specify'):
          model.fit(ds, epochs=2)

  @tf.__internal__.distribute.combinations.generate(
      tf.__internal__.test.combinations.combine(distribution=all_strategies, mode=['eager']))
  def test_host_training_loop(self, distribution):
    if isinstance(distribution,
                  tf.distribute.MultiWorkerMirroredStrategy):
      self.skipTest('b/172032817')
    with distribution.scope():
      inputs = keras.Input((10, 10, 3))
      x = keras.layers.Conv2D(3, kernel_size=3)(inputs)
      x = keras.layers.Flatten()(x)
      outputs = keras.layers.Dense(1)(x)
      model = keras.Model(inputs, outputs)

    model.compile('sgd', 'mse', steps_per_execution=10)

    bc = BatchCountingCB()
    x, y = np.ones((100, 10, 10, 3)), np.ones((100, 1))
    model.fit(x, y, batch_size=2, epochs=1, callbacks=[bc])
    self.assertEqual(bc.train_begin_batches, [0, 10, 20, 30, 40])
    self.assertEqual(bc.train_end_batches, [9, 19, 29, 39, 49])

    model.evaluate(x, y, batch_size=2, callbacks=[bc])
    self.assertEqual(bc.test_begin_batches, [0, 10, 20, 30, 40])
    self.assertEqual(bc.test_end_batches, [9, 19, 29, 39, 49])

    model.predict(x, batch_size=2, callbacks=[bc])
    self.assertEqual(bc.predict_begin_batches, [0, 10, 20, 30, 40])
    self.assertEqual(bc.predict_end_batches, [9, 19, 29, 39, 49])

  @tf.__internal__.distribute.combinations.generate(
      tf.__internal__.test.combinations.combine(distribution=all_strategies, mode=['eager']))
  def test_host_training_loop_last_partial_execution(self, distribution):
    if isinstance(distribution,
                  tf.distribute.MultiWorkerMirroredStrategy):
      self.skipTest('b/172032817')
    with distribution.scope():
      inputs = keras.Input(10)
      outputs = keras.layers.Dense(1)(inputs)
      model = keras.Model(inputs, outputs)

    model.compile('sgd', 'mse', steps_per_execution=20)

    bc = BatchCountingCB()
    x, y = np.ones((100, 10)), np.ones((100, 1))
    model.fit(x, y, batch_size=2, epochs=1, callbacks=[bc])
    self.assertEqual(bc.train_begin_batches, [0, 20, 40])
    self.assertEqual(bc.train_end_batches, [19, 39, 49])

    model.evaluate(x, y, batch_size=2, callbacks=[bc])
    self.assertEqual(bc.test_begin_batches, [0, 20, 40])
    self.assertEqual(bc.test_end_batches, [19, 39, 49])

    model.predict(x, batch_size=2, callbacks=[bc])
    self.assertEqual(bc.predict_begin_batches, [0, 20, 40])
    self.assertEqual(bc.predict_end_batches, [19, 39, 49])

  @tf.__internal__.distribute.combinations.generate(
      tf.__internal__.test.combinations.combine(distribution=all_strategies, mode=['eager']))
  def test_host_training_loop_dataset_unknown_size(self, distribution):
    if isinstance(distribution,
                  tf.distribute.MultiWorkerMirroredStrategy):
      self.skipTest('b/172032817')
    with distribution.scope():
      inputs = keras.Input(10)
      outputs = keras.layers.Dense(1)(inputs)
      model = keras.Model(inputs, outputs)

    model.compile('sgd', 'mse', steps_per_execution=20)

    x, y = np.ones((100, 10)), np.ones((100, 1))
    ds = tf.data.Dataset.from_tensor_slices((x, y)).batch(2)
    ds = ds.filter(lambda *args, **kwargs: True)  # Makes the size UNKNOWN.
    bc = BatchCountingCB()

    with self.assertRaisesRegex(ValueError, 'steps_per_execution'):
      model.fit(ds, epochs=2, callbacks=[bc])

    train_ds = ds.repeat(2)
    model.fit(train_ds, steps_per_epoch=50, epochs=2, callbacks=[bc])
    self.assertEqual(bc.train_begin_batches, [0, 20, 40, 0, 20, 40])
    self.assertEqual(bc.train_end_batches, [19, 39, 49, 19, 39, 49])

    with self.assertRaisesRegex(ValueError, 'steps_per_execution'):
      model.evaluate(ds, callbacks=[bc])

    test_ds = ds.repeat(2)
    model.evaluate(test_ds, steps=50, callbacks=[bc])
    self.assertEqual(bc.test_begin_batches, [0, 20, 40])
    self.assertEqual(bc.test_end_batches, [19, 39, 49])

    predict_ds = ds.repeat(2)
    model.predict(predict_ds, steps=50, callbacks=[bc])
    self.assertEqual(bc.predict_begin_batches, [0, 20, 40])
    self.assertEqual(bc.predict_end_batches, [19, 39, 49])

  @tf.__internal__.distribute.combinations.generate(
      tf.__internal__.test.combinations.combine(distribution=all_strategies, mode=['eager']))
  def test_host_training_loop_truncate_to_epoch(self, distribution):
    if isinstance(distribution,
                  tf.distribute.MultiWorkerMirroredStrategy):
      self.skipTest('b/172032817')
    with distribution.scope():
      inputs = keras.Input(10)
      outputs = keras.layers.Dense(1)(inputs)
      model = keras.Model(inputs, outputs)

    model.compile('sgd', 'mse', steps_per_execution=500)

    x, y = np.ones((100, 10)), np.ones((100, 1))
    bc = BatchCountingCB()
    model.fit(x, y, batch_size=2, epochs=2, callbacks=[bc])
    self.assertEqual(bc.train_begin_batches, [0, 0])
    self.assertEqual(bc.train_end_batches, [49, 49])

    x, y = np.ones((50, 10)), np.ones((50, 1))
    model.evaluate(x, y, batch_size=2, callbacks=[bc])
    self.assertEqual(bc.test_begin_batches, [0])
    self.assertEqual(bc.test_end_batches, [24])

    x = np.ones((50, 10))
    model.predict(x, batch_size=2, callbacks=[bc])
    self.assertEqual(bc.predict_begin_batches, [0])
    self.assertEqual(bc.predict_end_batches, [24])

  @tf.__internal__.distribute.combinations.generate(
      tf.__internal__.test.combinations.combine(distribution=all_strategies, mode=['eager']))
  def test_gradient_clipping(self, distribution):

    class MyLayer(keras.layers.Layer):

      def build(self, _):
        self.v1 = tf.Variable(1.)
        self.v2 = tf.Variable(1.)

      def call(self, x):
        return 3 * self.v1 - 3 * self.v2

    x, y = np.ones((10, 1)), np.ones((10, 1))

    with distribution.scope():
      layer = MyLayer()
      model = keras.Sequential([layer])
      optimizer = gradient_descent_keras.SGD(1., clipnorm=2., clipvalue=2.)
    model.compile(optimizer, 'mae')

    if isinstance(distribution,
                  (tf.distribute.experimental.CentralStorageStrategy,
                   tf.compat.v1.distribute.experimental.CentralStorageStrategy)):
      with self.assertRaisesRegex(ValueError, 'not supported'):
        model.fit(x, y, batch_size=10, epochs=1)
    else:
      model.fit(x, y, batch_size=10, epochs=1)
      self.assertAllClose(self.evaluate(layer.v1), 3.)
      self.assertAllClose(self.evaluate(layer.v2), -1.)

  @tf.__internal__.distribute.combinations.generate(
      tf.__internal__.test.combinations.combine(distribution=all_strategies, mode=['eager']))
  def test_custom_gradient_transformation(self, distribution):
    if isinstance(distribution,
                  (tf.distribute.experimental.CentralStorageStrategy,
                   tf.compat.v1.distribute.experimental.CentralStorageStrategy)):
      self.skipTest('Not supported with `CentralStorageStrategy`')

    class MyLayer(keras.layers.Layer):

      def build(self, _):
        self.v1 = tf.Variable(1.)
        self.v2 = tf.Variable(-1.)

      def call(self, x):
        return x + self.v1 + self.v2

    def custom_transform(grads_and_vars):
      # Always set gradients to 1.
      return [(tf.ones_like(g), v) for g, v in grads_and_vars]

    x, y = np.ones((10, 1)), np.ones((10, 1))

    with distribution.scope():
      layer = MyLayer()
      model = keras.Sequential([layer])
      optimizer = gradient_descent_keras.SGD(
          1., gradient_transformers=[custom_transform])
    model.compile(optimizer, 'mae')

    model.fit(x, y, batch_size=10, epochs=1)
    self.assertAllClose(self.evaluate(layer.v1), 0.)
    self.assertAllClose(self.evaluate(layer.v2), -2.)

  @tf.__internal__.distribute.combinations.generate(
      tf.__internal__.test.combinations.times(
          all_strategy_combinations_minus_default()))
  def test_distribution_strategy_one_dimensional(self, distribution):
    with distribution.scope():
      inp = keras.layers.Input(shape=(10,))
      out = keras.layers.Dense(3, activation='softmax')(inp)
      model = keras.Model(inputs=[inp], outputs=[out])
      model.compile(
          optimizer='rmsprop',
          loss='sparse_categorical_crossentropy',
          metrics=['sparse_categorical_accuracy'])

      x = np.random.random((64, 10)).astype('float32')
      y = np.random.randint(3, size=64)

      model.fit(x, y, epochs=1, steps_per_epoch=2)

  @tf.__internal__.distribute.combinations.generate(
      tf.__internal__.test.combinations.combine(
          distribution=[
              tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu,
              tf.__internal__.distribute.combinations.mirrored_strategy_with_two_gpus
          ],
          mode=['graph', 'eager'],
          reduction=[
              losses_utils.ReductionV2.AUTO,
              losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE,
              losses_utils.ReductionV2.SUM
          ]))
  def test_distribution_strategy_with_loss_reduction_types(
      self, distribution, reduction):
    np.random.seed(_RANDOM_SEED)

    def _get_model():
      inputs = keras.Input((10,))
      x1 = keras.layers.Dense(10, kernel_initializer='zeros')(inputs)
      x2 = keras.layers.Dense(10, kernel_initializer='zeros')(x1)
      outputs = keras.layers.Dense(1, kernel_initializer='zeros')(x2)
      model = keras.Model(inputs, outputs)
      return model

    x = np.random.random((64, 10))
    y = np.random.random((64, 1))
    dataset = tf.data.Dataset.from_tensor_slices((x, y))
    dataset = dataset.batch(32)

    model = _get_model()
    model.compile(
        'sgd', loss=keras.losses.MeanSquaredError(reduction=reduction))
    history = model.fit(dataset, steps_per_epoch=2, epochs=1, shuffle=False)

    with distribution.scope():
      ds_model = _get_model()
      ds_model.compile(
          'sgd',
          loss=keras.losses.MeanSquaredError(reduction=reduction))
      ds_history = ds_model.fit(
          dataset, steps_per_epoch=2, epochs=1, shuffle=False)
    self.assertArrayNear(history.history['loss'], ds_history.history['loss'],
                         1e-5)

  @tf.__internal__.distribute.combinations.generate(
      tf.__internal__.test.combinations.times(
          all_strategy_combinations_minus_default()))
  def test_distribution_strategy_with_symbolic_add_loss(
      self, mode, distribution):

    def _make_model_with_add_loss():
      inputs = keras.Input((10,))
      x1 = keras.layers.Dense(10, kernel_initializer='zeros')(inputs)
      x2 = keras.layers.Dense(10, kernel_initializer='zeros')(x1)
      outputs = keras.layers.Dense(1, kernel_initializer='zeros')(x2)
      model = keras.Model(inputs, outputs)
      model.add_loss(tf.reduce_mean(x1))
      model.add_loss(tf.reduce_mean(outputs))
      return model

    x = np.ones((64, 10)).astype('float32')

    model = _make_model_with_add_loss()
    model.compile('sgd')
    history = model.fit(x, epochs=1)

    with distribution.scope():
      ds_model = _make_model_with_add_loss()
      ds_model.compile(
          'sgd')
      ds_history = ds_model.fit(x, epochs=1)

    self.assertAllClose(history.history, ds_history.history)

  # TODO(omalleyt): Investigate flakiness and re-enable.
  @tf.__internal__.distribute.combinations.generate(all_strategy_minus_default_and_tpu_combinations())
  def DISABLED_test_distribution_strategy_with_callable_add_loss(
      self, distribution):

    def _make_model():
      inputs = keras.Input((10,))
      x1 = keras.layers.Dense(10, kernel_initializer='zeros')(inputs)
      x2 = keras.layers.Dense(10, kernel_initializer='zeros')(x1)
      d = keras.layers.Dense(1, kernel_initializer='zeros')
      outputs = d(x2)
      model = keras.Model(inputs, outputs)
      model.add_loss(lambda: 100. * tf.reduce_mean(d.kernel))
      return model

    x = np.ones((64, 10)).astype('float32')
    y = np.ones((64, 1)).astype('float32')

    model = _make_model()
    self.assertLen(model.losses, 1)

    model.compile('sgd', 'mse')
    history = model.fit(x, y, steps_per_epoch=2, epochs=1)

    with distribution.scope():
      ds_model = _make_model()
      self.assertLen(ds_model.losses, 1)
      ds_model.compile('sgd', 'mse')
      ds_history = ds_model.fit(x, y, steps_per_epoch=2, epochs=1)

    self.assertAllClose(history.history, ds_history.history)

  @tf.__internal__.distribute.combinations.generate(
      tf.__internal__.test.combinations.times(
          all_strategy_minus_default_and_tpu_combinations()))
  def test_distribution_strategy_with_add_metric_in_call(
      self, distribution):

    class Bias(keras.layers.Layer):

      def build(self, input_shape):
        self.bias = self.add_weight(name='bias', initializer='zeros', shape=())

      def call(self, inputs):
        self.add_metric(
            tf.reduce_mean(inputs), name='bias', aggregation='mean')
        return inputs + self.bias

    def _make_model_with_add_metric():
      inputs = keras.Input((10,))
      x1 = keras.layers.Dense(10, kernel_initializer='zeros')(inputs)
      x2 = Bias()(x1)
      outputs = keras.layers.Dense(1, kernel_initializer='zeros')(x2)
      model = keras.Model(inputs, outputs)
      return model

    x = np.ones((64, 10)).astype('float32')
    y = np.ones((64, 1)).astype('float32')

    model = _make_model_with_add_metric()
    self.assertLen(model.metrics, 1)

    model.compile('sgd', 'mse')
    history = model.fit(
        x, y, validation_data=(x, y), validation_steps=2, epochs=2)

    with distribution.scope():
      ds_model = _make_model_with_add_metric()
      self.assertLen(ds_model.metrics, 1)
      ds_model.compile(
          'sgd',
          'mse')
      ds_history = ds_model.fit(
          x, y, validation_data=(x, y), validation_steps=2, epochs=2)
      # includes stateful loss metric in eager.
      metrics_len = 2 if tf.executing_eagerly() else 1
      self.assertLen(ds_model.metrics, metrics_len)

    self.assertAllClose(history.history, ds_history.history)

  @tf.__internal__.distribute.combinations.generate(
      tf.__internal__.test.combinations.combine(
          distribution=[
              tf.__internal__.distribute.combinations.one_device_strategy,
              tf.__internal__.distribute.combinations.one_device_strategy_gpu,
              tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu,
              tf.__internal__.distribute.combinations.mirrored_strategy_with_two_gpus
          ],
          mode=['eager']))
  def test_distribution_strategy_with_add_metric_object(
      self, distribution):

    class Bias(keras.layers.Layer):

      def build(self, input_shape):
        self.bias = self.add_weight(name='bias', initializer='zeros', shape=())
        self.mean = keras.metrics.Mean(name='mean')

      def call(self, inputs):
        self.add_metric(self.mean(inputs))
        return inputs + self.bias

    def _make_model_with_add_metric_object():
      inputs = keras.Input((10,))
      x1 = keras.layers.Dense(10, kernel_initializer='zeros')(inputs)
      x2 = Bias()(x1)
      outputs = keras.layers.Dense(1, kernel_initializer='zeros')(x2)
      model = keras.Model(inputs, outputs)
      return model

    x = np.ones((64, 10)).astype('float32')
    y = np.ones((64, 1)).astype('float32')

    model = _make_model_with_add_metric_object()
    self.assertLen(model.metrics, 1)

    model.compile('sgd', 'mse')
    history = model.fit(
        x, y, validation_data=(x, y), validation_steps=2, epochs=2)

    with distribution.scope():
      ds_model = _make_model_with_add_metric_object()
      self.assertLen(ds_model.metrics, 1)
      ds_model.compile(
          'sgd',
          'mse')
      ds_history = ds_model.fit(
          x, y, validation_data=(x, y), validation_steps=2, epochs=2)
      # includes stateful loss metric in eager.
      metrics_len = 2 if tf.executing_eagerly() else 1
      self.assertLen(ds_model.metrics, metrics_len)

    self.assertAllClose(history.history, ds_history.history)

  @tf.__internal__.distribute.combinations.generate(
      # TODO(phillypham): Why does validation_steps > 1 not work on TPUs?
      tf.__internal__.test.combinations.times(
          all_strategy_minus_default_and_tpu_combinations()))
  def test_distribution_strategy_with_add_metric_outside_call(
      self, distribution):

    def _make_model_with_add_metric():
      inputs = keras.Input((10,))
      x1 = keras.layers.Dense(10, kernel_initializer='zeros')(inputs)
      outputs = keras.layers.Dense(1, kernel_initializer='zeros')(x1)
      model = keras.Model(inputs, outputs)
      model.add_metric(
          tf.reduce_mean(x1), name='mid_mean', aggregation='mean')
      return model

    x = np.ones((64, 10)).astype('float32')
    y = np.ones((64, 1)).astype('float32')

    model = _make_model_with_add_metric()
    self.assertLen(model.metrics, 1)

    model.compile('sgd', 'mse')
    history = model.fit(
        x, y, validation_data=(x, y), validation_steps=2, epochs=2)

    with distribution.scope():
      ds_model = _make_model_with_add_metric()
      self.assertLen(ds_model.metrics, 1)
      ds_model.compile(
          'sgd',
          'mse')
      ds_history = ds_model.fit(
          x, y, validation_data=(x, y), validation_steps=2, epochs=2)
      # includes stateful loss metric in eager.
      metrics_len = 2 if tf.executing_eagerly() else 1
      self.assertLen(ds_model.metrics, metrics_len)

    self.assertAllClose(history.history, ds_history.history)

  @tf.__internal__.distribute.combinations.generate(
      tf.__internal__.test.combinations.combine(
          distribution=strategies_minus_tpu + multi_worker_mirrored_strategies,
          mode=['eager']))
  def test_sparse_tensor_outputs(self, distribution):

    class ToSparse(keras.layers.Layer):
      """Create a sparse tensor based on a given dense tensor."""

      def call(self, inputs):
        indices = tf.where(tf.not_equal(inputs, 0))
        values = tf.gather_nd(inputs, indices)
        shape = tf.shape(inputs, out_type='int64')
        return tf.SparseTensor(indices, values, dense_shape=shape)

    model = keras.Sequential([ToSparse()])

    # Define some input data with additional padding.
    input_data = np.array([[1, 0, 0], [2, 3, 0]])
    output = model.predict(input_data, batch_size=2)

    expected_indices = np.array([[0, 0], [1, 0], [1, 1]])
    expected_values = np.array([1, 2, 3])
    expected_dense_shape = np.array([2, 3])

    self.assertAllEqual(output.indices, expected_indices)
    self.assertAllEqual(output.values, expected_values)
    self.assertAllEqual(output.dense_shape, expected_dense_shape)

  @tf.__internal__.distribute.combinations.generate(
      tf.__internal__.test.combinations.combine(
          distribution=strategies_minus_tpu + multi_worker_mirrored_strategies,
          mode=['eager']))
  def test_ragged_tensor_outputs(self, distribution):

    class ToRagged(keras.layers.Layer):
      """Create a ragged tensor based on a given dense tensor."""

      def __init__(self, padding, ragged_rank=1, **kwargs):
        super(ToRagged, self).__init__(**kwargs)
        self._padding = padding
        self._ragged_rank = ragged_rank

      def call(self, inputs):
        return tf.RaggedTensor.from_tensor(
            inputs, padding=self._padding, ragged_rank=self._ragged_rank)

    model = keras.Sequential([ToRagged(padding=0)])

    # Define some input data with additional padding.
    input_data = np.array([[1, 0, 0], [2, 3, 0]])
    output = model.predict(input_data, batch_size=2)

    expected_values = [[1], [2, 3]]
    self.assertAllEqual(expected_values, output)

  @tf.__internal__.distribute.combinations.generate(
      tf.__internal__.test.combinations.combine(
          distribution=strategies_minus_default_minus_tpu + tpu_strategies +
          multi_worker_mirrored_strategies,
          mode=['eager']))
  def test_correctness_of_add_loss_with_merge_call(self, distribution):
    batch_size = 32

    def _get_model():
      inputs = keras.layers.Input(shape=(1,))
      labels = keras.layers.Input(shape=(1,))
      x = keras.layers.Dense(10, activation='relu')(inputs)
      y = keras.layers.Dense(1)(x)
      model = keras.models.Model([inputs, labels], y)
      model.add_loss(keras.losses.mean_squared_error(labels, y))
      return model

    def _get_data():
      x_train = np.random.rand(64, 1)
      y_train = 3 * x_train
      x_train = x_train.astype('float32')
      y_train = y_train.astype('float32')
      dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
      dataset = dataset.batch(batch_size)
      return dataset

    with distribution.scope():
      model = _get_model()
      optimizer = gradient_descent_keras.SGD(0.2)

      @tf.function
      def train_step(dist_inputs):

        def step_fn(inputs):
          with tf.GradientTape() as tape:
            logits = model(inputs)

            # Invoke a merge_call()
            tf.distribute.get_replica_context().merge_call(
                lambda d: None)

            # Verify that there is only one loss on the model.
            assert len(model.losses) == 1
            loss_from_model = tf.reduce_sum(
                model.losses) * 1.0 / batch_size

            # Compute loss in this loop.
            loss = keras.losses.mean_squared_error(inputs[1], logits)
            loss = tf.nn.compute_average_loss(loss, global_batch_size=batch_size)

            # Verify that the loss computed in this loop is equivalent to the
            # loss from the model that was added via add_loss.
            tf.compat.v1.assert_equal(loss, loss_from_model)

          grads = tape.gradient(loss, model.trainable_variables)
          optimizer.apply_gradients(zip(grads, model.trainable_variables))
          return loss

        per_replica_losses = distribution.run(step_fn, args=(dist_inputs,))
        return distribution.reduce(
            tf.distribute.ReduceOp.SUM, per_replica_losses, axis=None)

      dataset = distribution.experimental_distribute_dataset(_get_data())
      for _ in range(2):
        for x in dataset:
          train_step(x)

  @tf.__internal__.distribute.combinations.generate(tf.__internal__.test.combinations.combine(mode=['graph', 'eager']))
  def test_unimplemented_parameter_server_strategy(self):
    cluster_spec = multi_worker_testing_utils.create_in_process_cluster(
        num_workers=3, num_ps=2)
    cluster_resolver = SimpleClusterResolver(
        cluster_spec=tf.train.ClusterSpec(cluster_spec),
        task_type='worker',
        task_id=1,
        num_accelerators={'GPU': 0})
    distribution = tf.compat.v1.distribute.experimental.ParameterServerStrategy(
        cluster_resolver)

    self.assertIsInstance(distribution,
                          tf.compat.v1.distribute.experimental.ParameterServerStrategy)

    with self.assertRaisesRegex(NotImplementedError,
                                'ParameterServerStrategy*'):
      with distribution.scope():
        model = simple_sequential_model()
        optimizer = tf.compat.v1.train.RMSPropOptimizer(learning_rate=0.001)
        loss = 'mse'
        model.compile(optimizer, loss)


# Models to exercise inserting ancillary layers with add_loss and add_metric.
def _functional_with_add_loss_and_metric(input_shape, num_classes, l1, l2):
  inputs = keras.Input(input_shape, name='images')
  x = keras.layers.Conv2D(32, kernel_size=5, activation='relu')(inputs)
  x = keras.layers.MaxPooling2D(pool_size=2)(x)
  x = keras.layers.Conv2D(64, kernel_size=5, activation='relu')(x)
  x = keras.layers.MaxPooling2D(pool_size=2)(x)
  # Apply L2 regularization to embedding. Use a mix of TensorFlow ops and layers
  # to exercise all code paths.
  x = keras.layers.Flatten(name='embedding')(x)
  l2_loss = tf.reduce_mean(tf.reduce_sum(tf.square(x), -1))
  # Apply L1 regularization to next layer.
  x = keras.layers.Dense(1024, activation='relu', name='sparse_embedding')(x)
  l1_loss = keras.layers.Lambda(
      lambda x: tf.reduce_mean(tf.reduce_sum(x, -1)),
      name='l1_loss')(
          x)
  outputs = keras.layers.Dense(num_classes, name='logits')(x)
  model = keras.Model(inputs=inputs, outputs=outputs)
  # Weight regularization terms.
  model.add_loss(keras.layers.Lambda(lambda x: x * l2)(l2_loss))
  model.add_metric(l2_loss, aggregation='mean', name='l2_loss')
  model.add_loss(l1_loss * l1)
  model.add_metric(l1_loss, aggregation='mean', name='l1_loss')
  return model


def _sequential_with_add_loss_and_metric(input_shape, num_classes, l1, l2):
  model = keras.Sequential([
      keras.layers.Conv2D(
          32, kernel_size=5, activation='relu', input_shape=input_shape),
      keras.layers.MaxPooling2D(pool_size=2),
      keras.layers.Conv2D(64, kernel_size=5, activation='relu'),
      keras.layers.MaxPooling2D(pool_size=2),
      keras.layers.Flatten(name='embedding'),
      keras.layers.Dense(1024, activation='relu', name='sparse_embedding'),
      keras.layers.Dense(num_classes, name='logits'),
  ])
  # Extract layer outputs, add regularization terms, and rescale the metric.
  # Use a mix of TensorFlow ops and layers to exercise all code paths.
  x = model.get_layer('sparse_embedding').get_output_at(-1)
  l1_loss = l1 * tf.reduce_mean(tf.reduce_sum(x, -1))
  model.add_loss(l1_loss)
  model.add_metric(
      keras.layers.Lambda(lambda x: tf.divide(x, l1))(l1_loss),
      aggregation='mean',
      name='l1_loss')
  x = model.get_layer('embedding').get_output_at(-1)
  l2_loss = keras.layers.Lambda(
      lambda x: l2 * tf.reduce_mean(tf.reduce_sum(x * x, -1)),
      name='l2_loss')(
          x)
  model.add_loss(l2_loss)
  model.add_metric(l2_loss / l2, aggregation='mean', name='l2_loss')
  return model


def _functional_with_layer_reuse(input_shape, num_classes, l1, l2):
  base_model = keras.Sequential([
      keras.layers.Conv2D(
          32, kernel_size=5, activation='relu', input_shape=input_shape),
      keras.layers.MaxPooling2D(pool_size=2),
      keras.layers.Conv2D(64, kernel_size=5, activation='relu'),
      keras.layers.MaxPooling2D(pool_size=2),
      keras.layers.Flatten(),
      keras.layers.Dense(1024, activation='relu'),
      keras.layers.Dense(num_classes, name='logits'),
  ])
  inputs = keras.Input(input_shape, name='images')
  logits = base_model(inputs)
  model = keras.Model(inputs=inputs, outputs=logits)
  # Reuse sequential layer and create new nodes.
  zero_logits = base_model(tf.zeros_like(inputs))
  one_logits = base_model(tf.ones_like(inputs))
  # L2 loss.
  l2_loss = tf.reduce_mean(
      tf.reduce_sum(tf.square(logits - zero_logits), -1))
  model.add_loss(l2_loss * l2)
  model.add_metric(l2_loss, aggregation='mean', name='l2_loss')
  # L1 loss.
  l1_loss = tf.reduce_mean(
      tf.reduce_sum(tf.abs(logits - one_logits), -1))
  model.add_loss(l1_loss * l1)
  model.add_metric(l1_loss, aggregation='mean', name='l1_loss')
  return model


class TestDistributionStrategyWithMultipleAddLossAndMetricCalls(
    tf.test.TestCase, parameterized.TestCase):
  """Tests complex models with multiple add loss and metric calls."""

  @tf.__internal__.distribute.combinations.generate(
      tf.__internal__.test.combinations.times(
          all_strategy_combinations_minus_default(),
          tf.__internal__.test.combinations.combine(
              model_fn=[
                  _functional_with_add_loss_and_metric,
                  _sequential_with_add_loss_and_metric,
                  _functional_with_layer_reuse,
              ],
              l1=[0.01],
              l2=[0.1])))
  def test_fit_and_evaluate(self, distribution, model_fn, l1, l2):
    # Make fake MNIST-like image data.
    np.random.seed(_RANDOM_SEED)
    dataset = tf.data.Dataset.from_tensor_slices(
        (np.random.uniform(size=(64, 28, 28, 1)).astype(np.float32),
         np.random.randint(0, 10, size=(64,))))
    dataset = dataset.shuffle(64).batch(
        8 * distribution.num_replicas_in_sync, drop_remainder=True)
    # Make model with distribution strategy and initialize with dataset shape.
    input_shape = tf.data.experimental.get_structure(dataset)[0].shape[1:]
    with distribution.scope():
      model = model_fn(input_shape, 10, l1, l2)
      model.compile(
          optimizer=keras.optimizers.adam_v2.Adam(1e-4),
          loss=keras.losses.SparseCategoricalCrossentropy(
              from_logits=True,
              reduction=losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE),
          metrics=[
              keras.metrics.SparseCategoricalAccuracy(),
              keras.metrics.SparseCategoricalCrossentropy(from_logits=True),
          ])
    # Non-eager training doesn't support steps_per_epoch=None.
    for unused_epoch in range(2):
      model.fit(dataset)
    results = dict(zip(model.metrics_names, model.evaluate(dataset)))
    # Sanity checks.
    self.assertBetween(results['sparse_categorical_accuracy'], 0.02, 1.)
    self.assertGreater(results['l2_loss'], 0.)
    self.assertGreater(results['l1_loss'], 0.)
    # Assert correctness of the loss calculation and updating of metrics.
    self.assertNear(
        results['l1_loss'] * l1 + results['l2_loss'] * l2 +
        results['sparse_categorical_crossentropy'], results['loss'], 1e-6)


class DeterministicModel(keras.Model):
  """Deterministic Model that always outputs the same initial result.

  It verifies the `call` method is run inside the same distribution
  strategy that the model was initially passed.
  """

  def __init__(self, strategy):
    super(DeterministicModel, self).__init__()
    self.x = None
    self.strategy = strategy

  def build(self, input_shape):
    self.x = tf.Variable(tf.ones(shape=()))

  def call(self, inputs, training=None, mask=None):
    active_strategy = tf.distribute.get_strategy()
    if active_strategy is not self.strategy:
      raise ValueError('Model must execute call w/ the original strategy')
    return self.x * inputs


class TestModelCapturesStrategy(tf.test.TestCase, parameterized.TestCase):
  """Tests that model creation captures the strategy."""

  @tf.__internal__.distribute.combinations.generate(
      tf.__internal__.test.combinations.combine(distribution=all_strategies, mode=['eager']))
  def test_fit_and_evaluate(self, distribution):
    dataset = tf.data.Dataset.from_tensor_slices(
        (tf.ones(shape=(64,)), tf.ones(shape=(64,))))
    dataset = dataset.batch(8 * distribution.num_replicas_in_sync)
    # Make model with distribution strategy
    with distribution.scope():
      model = DeterministicModel(distribution)
      optimizer = keras.optimizers.adam_v2.Adam(1e-4)

    # Compile & evaluate the model outside of the distribution strategy scope
    model.compile(
        optimizer=optimizer,
        loss=keras.losses.MeanSquaredError(),
        metrics=['binary_accuracy'])

    # Call `optimizer.iterations` out of strategy scope.
    self.assertEqual(model.optimizer.iterations.numpy(), 0)

    # Non-eager training doesn't support steps_per_epoch=None.
    for unused_epoch in range(2):
      model.fit(dataset)

    results = model.evaluate(dataset)
    results = dict(zip(model.metrics_names, results))

    # Check that the metrics have a result we expect
    self.assertEqual(results['binary_accuracy'], 1.0)
    self.assertAllClose(results['loss'], 0.0)

    # Assert that all metric/optimizer/model variables were made in the
    # distribution strategy (Test that compile uses the captured
    # distribution strategy)
    metric_vars = tf.nest.flatten(
        [metric.variables for metric in model.metrics])
    for var in metric_vars:
      self.assertTrue(distribution.extended.variable_created_in_scope(var))
    for var in model.optimizer._weights:
      self.assertTrue(distribution.extended.variable_created_in_scope(var))
    for var in model.variables:
      self.assertTrue(distribution.extended.variable_created_in_scope(var))

    # Make sure the metric must be created in the same scope as the model:
    # This shouldn't raise any validation errors
    with distribution.scope():
      metric = keras.metrics.BinaryAccuracy()
    model.compile(
        optimizer=optimizer,
        loss=keras.losses.MeanSquaredError(),
        metrics=[metric])

    # This should raise an error because the metric is constructed
    # outside of the scope, and not by compile
    if tf.distribute.has_strategy():
      with self.assertRaisesRegex(ValueError, 'All metrics must be created in'):
        model.compile(
            optimizer=keras.optimizers.adam_v2.Adam(1e-4),
            loss=keras.losses.MeanSquaredError(),
            metrics=[keras.metrics.BinaryAccuracy()])

  @tf.__internal__.distribute.combinations.generate(
      tf.__internal__.test.combinations.combine(
          distribution=tf.__internal__.distribute.combinations.mirrored_strategy_with_one_cpu,
          mode=['eager']))
  def test_optimizer(self, distribution):
    temp_dir = os.path.join(self.get_temp_dir(), 'ckpt')

    def create_model():
      model = keras.models.Sequential([
          keras.layers.Dense(1),
      ])
      model.compile(optimizer='adam', loss='mse')
      model.build([None, 1])  # create weights.
      self.assertEmpty(model.optimizer.weights)
      return model

    model = create_model()
    x = y = tf.ones(shape=(1, 1))
    model.fit(x=x, y=y, batch_size=1)
    model.save_weights(temp_dir)

    with distribution.scope():
      model = create_model()
      model.load_weights(temp_dir)
      self.assertNotEmpty(model.optimizer.weights)
      self.assertTrue(
          distributed_training_utils.is_distributed_variable(
              model.optimizer.weights[0]))

    with distribution.scope():
      model = create_model()
    # create/restore slot variables outside of scope is fine.
    model.load_weights(temp_dir)
    self.assertNotEmpty(model.optimizer.weights)
    self.assertTrue(
        distributed_training_utils.is_distributed_variable(
            model.optimizer.weights[0]))


if __name__ == '__main__':
  base_layer_utils.enable_v2_dtype_behavior()
  tf.__internal__.distribute.multi_process_runner.test_main()

Functions

def all_strategy_combinations()
Expand source code
def all_strategy_combinations():
  return strategy_minus_tpu_combinations() + tpu_strategy_combinations(
  ) + multi_worker_strategy_combinations_eager_only()
def all_strategy_combinations_minus_default()
Expand source code
def all_strategy_combinations_minus_default():
  return (all_strategy_minus_default_and_tpu_combinations() +
          tpu_strategy_combinations() +
          multi_worker_strategy_combinations_eager_only())
def all_strategy_minus_default_and_tpu_combinations()
Expand source code
def all_strategy_minus_default_and_tpu_combinations():
  return tf.__internal__.test.combinations.combine(
      distribution=[
          tf.__internal__.distribute.combinations.one_device_strategy,
          tf.__internal__.distribute.combinations.one_device_strategy_gpu,
          tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu,
          tf.__internal__.distribute.combinations.mirrored_strategy_with_two_gpus,
      ],
      mode=['graph', 'eager'])
def batch_wrapper(dataset, batch_size, distribution, repeat=None)
Expand source code
def batch_wrapper(dataset, batch_size, distribution, repeat=None):
  if repeat:
    dataset = dataset.repeat(repeat)
  # TPUs currently require fully defined input shapes, drop_remainder ensures
  # the input will have fully defined shapes.
  if backend.is_tpu_strategy(distribution):
    return dataset.batch(batch_size, drop_remainder=True)
  else:
    return dataset.batch(batch_size)
def convert_numpy_to_dataset_with_unknown_cardinality(inputs, targets=None)
Expand source code
def convert_numpy_to_dataset_with_unknown_cardinality(inputs, targets=None):
  if targets is not None:
    input_slices = (inputs, targets)
    dummy_op = (lambda inp, target: True)
  else:
    input_slices = inputs
    dummy_op = (lambda inp: True)

  original_dataset = (tf.data.Dataset.from_tensor_slices(input_slices))
  ds_with_unknown_cardinality = (
      original_dataset.filter(dummy_op).batch(10, drop_remainder=True))
  return ds_with_unknown_cardinality
def get_dataset(distribution)
Expand source code
def get_dataset(distribution):
  inputs = np.zeros((10, 3), dtype=np.float32)
  targets = np.zeros((10, 4), dtype=np.float32)
  dataset = tf.data.Dataset.from_tensor_slices((inputs, targets))
  dataset = dataset.repeat(100)
  dataset = batch_wrapper(dataset, 10, distribution)
  return dataset
def get_model()
Expand source code
def get_model():
  x = keras.layers.Input(shape=(3,), name='input')
  y = keras.layers.Dense(4, name='dense')(x)
  model = keras.Model(x, y)
  return model
def get_multi_inputs_multi_outputs_data()
Expand source code
def get_multi_inputs_multi_outputs_data():
  (a_train, c_train), (a_test, c_test) = testing_utils.get_test_data(
      train_samples=_TRAIN_SIZE,
      test_samples=50,
      input_shape=(16,),
      num_classes=3,
      random_seed=_RANDOM_SEED)
  (b_train, d_train), (b_test, d_test) = testing_utils.get_test_data(
      train_samples=_TRAIN_SIZE,
      test_samples=50,
      input_shape=(16,),
      num_classes=2,
      random_seed=_RANDOM_SEED)
  (m_train, _), (m_test, _) = testing_utils.get_test_data(
      train_samples=_TRAIN_SIZE,
      test_samples=50,
      input_shape=(8,),
      num_classes=2,
      random_seed=_RANDOM_SEED)

  c_train = np_utils.to_categorical(c_train)
  c_test = np_utils.to_categorical(c_test)
  d_train = np_utils.to_categorical(d_train)
  d_test = np_utils.to_categorical(d_test)

  train_data = {
      'input_a': a_train,
      'input_b': b_train,
      'input_m': m_train,
      'output_c': c_train,
      'output_d': d_train
  }
  test_data = {
      'input_a': a_test,
      'input_b': b_test,
      'input_m': m_test,
      'output_c': c_test,
      'output_d': d_test
  }

  return (train_data, test_data)
def get_predict_dataset(distribution)
Expand source code
def get_predict_dataset(distribution):
  inputs = np.zeros((10, 3), dtype=np.float32)
  dataset = tf.data.Dataset.from_tensor_slices(inputs)
  dataset = dataset.repeat(100)
  dataset = batch_wrapper(dataset, 10, distribution)
  return dataset
def get_sample_weights_model()
Expand source code
def get_sample_weights_model():
  x = keras.layers.Input(shape=(1,), name='input')
  y = keras.layers.Dense(
      1, kernel_initializer='ones', bias_initializer='zeros', name='dense')(
          x)
  model = keras.Model(x, y)
  return model
def multi_input_output_model()
Expand source code
def multi_input_output_model():
  a = keras.layers.Input(shape=(3,), name='input_a')
  b = keras.layers.Input(shape=(5,), name='input_b')
  # TODO(anjalisridhar): Change the output dimension of the second Dense layer
  # once the iterator output validation issue has been fixed.
  dense_1 = keras.layers.Dense(7, name='dense_1')
  dense_2 = keras.layers.Dense(7, name='dense_2')
  c = dense_1(a)
  d = dense_2(b)
  e = keras.layers.Dropout(0.5, name='dropout')(c)
  model = keras.models.Model([a, b], [d, e])
  return model
def multi_worker_strategy_combinations_eager_only()
Expand source code
def multi_worker_strategy_combinations_eager_only():
  return tf.__internal__.test.combinations.combine(
      distribution=multi_worker_mirrored_strategies, mode=['eager'])
def simple_multi_inputs_multi_outputs_model()
Expand source code
def simple_multi_inputs_multi_outputs_model():
  input_a = keras.layers.Input(shape=(16,), name='input_a')
  input_b = keras.layers.Input(shape=(16,), name='input_b')

  merged = keras.layers.concatenate([input_a, input_b], name='merge')
  output_c = keras.layers.Dense(3, activation='softmax', name='dense_2')(merged)
  output_d = keras.layers.Dense(2, activation='softmax', name='dense_3')(merged)
  model = keras.models.Model(
      inputs=[input_a, input_b], outputs=[output_c, output_d])
  return model
def simple_sequential_model()
Expand source code
def simple_sequential_model():
  model = keras.models.Sequential()
  model.add(keras.layers.Dense(16, activation='relu', input_shape=_INPUT_SIZE))
  model.add(keras.layers.Dropout(0.1))
  model.add(keras.layers.Dense(_NUM_CLASS, activation='softmax'))
  return model
def simple_subclassed_model(num_labels=2)
Expand source code
def simple_subclassed_model(num_labels=_NUM_CLASS):

  class _SimpleMLP(keras.Model):

    def __init__(self, num_labels):
      super(_SimpleMLP, self).__init__()
      self.dense = keras.layers.Dense(num_labels)

    def call(self, inputs):
      return self.dense(inputs)

  return _SimpleMLP(num_labels)
def strategy_and_optimizer_combinations()
Expand source code
def strategy_and_optimizer_combinations():
  non_tpu_strategies = tf.__internal__.test.combinations.times(
      strategy_minus_tpu_combinations(),
      tf.__internal__.test.combinations.combine(
          optimizer=[
              optimizer_combinations.adagrad_optimizer_v1_fn,
              optimizer_combinations.adam_optimizer_v1_fn,
              optimizer_combinations.gradient_descent_optimizer_v1_fn,
              optimizer_combinations.rmsprop_optimizer_v1_fn,
              optimizer_combinations.adadelta_optimizer_keras_v2_fn,
              optimizer_combinations.adagrad_optimizer_keras_v2_fn,
              optimizer_combinations.adam_optimizer_keras_v2_fn,
              optimizer_combinations.adamax_optimizer_keras_v2_fn,
              optimizer_combinations.gradient_descent_optimizer_keras_v2_fn,
              optimizer_combinations.nadam_optimizer_keras_v2_fn,
              optimizer_combinations.rmsprop_optimizer_keras_v2_fn,
              optimizer_combinations.ftrl_optimizer_keras_v2_fn
          ]))
  tpu_strategies_graph = tf.__internal__.test.combinations.combine(
      distribution=tpu_strategies,
      mode=['graph'],
      optimizer=[
          optimizer_combinations.adagrad_optimizer_v1_fn,
          optimizer_combinations.adam_optimizer_v1_fn,
          optimizer_combinations.gradient_descent_optimizer_v1_fn,
          optimizer_combinations.rmsprop_optimizer_v1_fn,
          optimizer_combinations.adagrad_optimizer_keras_v2_fn,
          optimizer_combinations.adam_optimizer_keras_v2_fn,
          optimizer_combinations.gradient_descent_optimizer_keras_v2_fn,
          optimizer_combinations.rmsprop_optimizer_keras_v2_fn
      ])
  tpu_strategies_eager = tf.__internal__.test.combinations.combine(
      distribution=tpu_strategies,
      mode=['eager'],
      optimizer=[
          optimizer_combinations.adagrad_optimizer_keras_v2_fn,
          optimizer_combinations.adam_optimizer_keras_v2_fn,
          optimizer_combinations.gradient_descent_optimizer_keras_v2_fn,
          optimizer_combinations.rmsprop_optimizer_keras_v2_fn
      ])
  multi_worker_eager = tf.__internal__.test.combinations.combine(
      distribution=multi_worker_mirrored_strategies,
      mode=['eager'],
      optimizer=[
          optimizer_combinations.adadelta_optimizer_keras_v2_fn,
          optimizer_combinations.adagrad_optimizer_keras_v2_fn,
          optimizer_combinations.adam_optimizer_keras_v2_fn,
          optimizer_combinations.adamax_optimizer_keras_v2_fn,
          optimizer_combinations.gradient_descent_optimizer_keras_v2_fn,
          optimizer_combinations.nadam_optimizer_keras_v2_fn,
          optimizer_combinations.rmsprop_optimizer_keras_v2_fn,
          optimizer_combinations.ftrl_optimizer_keras_v2_fn
      ])
  return (non_tpu_strategies + tpu_strategies_eager + tpu_strategies_graph +
          multi_worker_eager)
def strategy_minus_tpu_combinations()
Expand source code
def strategy_minus_tpu_combinations():
  return tf.__internal__.test.combinations.combine(
      distribution=strategies_minus_tpu, mode=['graph', 'eager'])
def tpu_strategy_combinations()
Expand source code
def tpu_strategy_combinations():
  return tf.__internal__.test.combinations.combine(
      distribution=tpu_strategies, mode=['graph', 'eager'])
def tpu_strategy_combinations_graph_only()
Expand source code
def tpu_strategy_combinations_graph_only():
  return tf.__internal__.test.combinations.combine(distribution=tpu_strategies, mode=['graph'])

Classes

class BatchCountingCB

Abstract base class used to build new callbacks.

Callbacks can be passed to keras methods such as fit, evaluate, and predict in order to hook into the various stages of the model training and inference lifecycle.

To create a custom callback, subclass Callback and override the method associated with the stage of interest. See https://www.tensorflow.org/guide/keras/custom_callback for more information.

Example:

>>> training_finished = False
>>> class MyCallback(tf.keras.callbacks.Callback):
...   def on_train_end(self, logs=None):
...     global training_finished
...     training_finished = True
>>> model = tf.keras.Sequential([tf.keras.layers.Dense(1, input_shape=(1,))])
>>> model.compile(loss='mean_squared_error')
>>> model.fit(tf.constant([[1.0]]), tf.constant([[1.0]]),
...           callbacks=[MyCallback()])
>>> assert training_finished == True

If you want to use Callback objects in a custom training loop:

  1. You should pack all your callbacks into a single callbacks.CallbackList so they can all be called together.
  2. You will need to manually call all the on_* methods at the apropriate locations in your loop. Like this:

``` callbacks = tf.keras.callbacks.CallbackList([…]) callbacks.append(…)

callbacks.on_train_begin(…) for epoch in range(EPOCHS): callbacks.on_epoch_begin(epoch) for i, data in dataset.enumerate(): callbacks.on_train_batch_begin(i) batch_logs = model.train_step(data) callbacks.on_train_batch_end(i, batch_logs) epoch_logs = … callbacks.on_epoch_end(epoch, epoch_logs) final_logs=… callbacks.on_train_end(final_logs) ```

Attributes

params
Dict. Training parameters (eg. verbosity, batch size, number of epochs…).
model
Instance of keras.models.Model. Reference of the model being trained.

The logs dictionary that callback methods take as argument will contain keys for quantities relevant to the current batch or epoch (see method-specific docstrings).

Expand source code
class BatchCountingCB(keras.callbacks.Callback):

  def __init__(self):
    super(BatchCountingCB, self).__init__()
    self.train_begin_batches = []
    self.train_end_batches = []
    self.test_begin_batches = []
    self.test_end_batches = []
    self.predict_begin_batches = []
    self.predict_end_batches = []

  def on_train_batch_begin(self, batch, logs=None):
    self.train_begin_batches.append(batch)

  def on_train_batch_end(self, batch, logs=None):
    self.train_end_batches.append(batch)

  def on_test_batch_begin(self, batch, logs=None):
    self.test_begin_batches.append(batch)

  def on_test_batch_end(self, batch, logs=None):
    self.test_end_batches.append(batch)

  def on_predict_batch_begin(self, batch, logs=None):
    self.predict_begin_batches.append(batch)

  def on_predict_batch_end(self, batch, logs=None):
    self.predict_end_batches.append(batch)

Ancestors

Inherited members

class DeterministicModel (strategy)

Deterministic Model that always outputs the same initial result.

It verifies the call method is run inside the same distribution strategy that the model was initially passed.

Expand source code
class DeterministicModel(keras.Model):
  """Deterministic Model that always outputs the same initial result.

  It verifies the `call` method is run inside the same distribution
  strategy that the model was initially passed.
  """

  def __init__(self, strategy):
    super(DeterministicModel, self).__init__()
    self.x = None
    self.strategy = strategy

  def build(self, input_shape):
    self.x = tf.Variable(tf.ones(shape=()))

  def call(self, inputs, training=None, mask=None):
    active_strategy = tf.distribute.get_strategy()
    if active_strategy is not self.strategy:
      raise ValueError('Model must execute call w/ the original strategy')
    return self.x * inputs

Ancestors

Inherited members

class TestDistributionStrategyWithDatasets (methodName='runTest')

Base class for tests that need to test TensorFlow.

Create an instance of the class that will use the named test method when executed. Raises a ValueError if the instance does not have a method with the specified name.

Expand source code
class TestDistributionStrategyWithDatasets(tf.test.TestCase,
                                           parameterized.TestCase):

  @tf.__internal__.distribute.combinations.generate(all_strategy_combinations())
  def test_calling_model_on_same_dataset(self, distribution):
    with self.cached_session():
      with distribution.scope():
        optimizer_fn = gradient_descent_keras.SGD
        optimizer = optimizer_fn(0.001)
        model = get_model()
        loss = 'mse'
        metrics = ['mae', keras.metrics.CategoricalAccuracy()]
        model.compile(
            optimizer,
            loss,
            metrics=metrics)

      dataset = get_dataset(distribution)

      # Call fit with validation data
      model.fit(
          dataset,
          epochs=1,
          steps_per_epoch=2,
          verbose=0,
          validation_data=dataset,
          validation_steps=2)
      model.fit(
          dataset,
          epochs=1,
          steps_per_epoch=2,
          verbose=0,
          validation_data=dataset,
          validation_steps=2)
      model.predict(get_predict_dataset(distribution), steps=2)

  @tf.__internal__.distribute.combinations.generate(all_strategy_combinations())
  def test_model_interleaved_eval_same_as_direct_eval(
      self, distribution):
    with self.cached_session():
      with distribution.scope():
        optimizer_fn = gradient_descent_keras.SGD
        user_controlled_model = get_model()
        user_controlled_model.compile(
            optimizer_fn(0.001),
            loss='mse',
            metrics=['mae', keras.metrics.CategoricalAccuracy()])

        interleaved_model = get_model()
        interleaved_model.set_weights(user_controlled_model.get_weights())
        interleaved_model.compile(
            optimizer_fn(0.001),
            loss='mse',
            metrics=['mae', keras.metrics.CategoricalAccuracy()])

      dataset = get_dataset(distribution)

      # Call fit with validation interleaved
      interleaved_output = interleaved_model.fit(
          dataset,
          epochs=2,
          steps_per_epoch=2,
          verbose=1,
          validation_data=dataset,
          validation_steps=2,
          shuffle=False)

      # Manually control the validation running after each epoch.
      user_controlled_output = []
      for _ in range(2):
        user_controlled_model.fit(
            dataset, epochs=1, steps_per_epoch=2, verbose=1, shuffle=False)
        user_controlled_output.append(
            user_controlled_model.evaluate(dataset, steps=2))

      self.assertEqual(interleaved_output.history['val_loss'],
                       [x[0] for x in user_controlled_output])
      val_mean_absolute_error = interleaved_output.history.get(
          'val_mean_absolute_error')
      if not val_mean_absolute_error:
        # The name of the metric changed in TF2.0
        val_mean_absolute_error = interleaved_output.history['val_mae']
      self.assertEqual(val_mean_absolute_error,
                       [x[1] for x in user_controlled_output])
      self.assertEqual(interleaved_output.history['val_categorical_accuracy'],
                       [x[2] for x in user_controlled_output])

  @tf.__internal__.distribute.combinations.generate(all_strategy_combinations())
  def test_fit_with_tuple_and_dict_dataset_inputs(self, distribution):
    with self.cached_session():
      with distribution.scope():
        optimizer_fn = gradient_descent_keras.SGD
        optimizer = optimizer_fn(learning_rate=0.001)
        model = multi_input_output_model()
        loss = 'mse'
        metrics = ['mae', keras.metrics.CategoricalAccuracy()]
        model.compile(
            optimizer,
            loss,
            metrics=metrics)

      input_a_np = np.random.random((10, 3)).astype('float32')
      input_b_np = np.random.random((10, 5)).astype('float32')
      output_d_np = np.random.random((10, 7)).astype('float32')
      output_e_np = np.random.random((10, 7)).astype('float32')

      # Test with tuples
      dataset_tuple = tf.data.Dataset.from_tensor_slices(
          ((input_a_np, input_b_np), (output_d_np, output_e_np)))
      dataset_tuple = dataset_tuple.repeat(100)
      dataset_tuple = dataset_tuple.batch(10)

      model.fit(dataset_tuple, epochs=1, steps_per_epoch=2, verbose=1)

      # Test with dict
      dataset_dict = tf.data.Dataset.from_tensor_slices(({
          'input_a': input_a_np,
          'input_b': input_b_np
      }, (output_d_np, output_e_np)))
      dataset_dict = dataset_dict.repeat(100)
      dataset_dict = dataset_dict.batch(10)

      model.fit(dataset_dict, epochs=1, steps_per_epoch=2, verbose=1)

  @tf.__internal__.distribute.combinations.generate(all_strategy_combinations())
  def test_fit_with_dictionary_in_the_dataset_b135161171(
      self, distribution):

    if backend.is_tpu_strategy(distribution):
      self.skipTest('b/142805125')

    def custom_loss(predict, label, weight):
      bce = keras.losses.binary_crossentropy(label, predict)
      return tf.reduce_mean(bce * weight)

    with self.cached_session():
      with distribution.scope():
        input_img = keras.layers.Input([64, 64, 3], name='img')
        input_lbl = keras.layers.Input([64, 64, 1], name='lbl')
        input_weight = keras.layers.Input([64, 64], name='weight')
        predict = keras.layers.Conv2D(2, [1, 1], padding='same')(input_img)
        loss_lambda = keras.layers.Lambda(
            lambda x: custom_loss(*x), name='my_loss')
        my_loss = loss_lambda([predict, input_lbl, input_weight])
        model = keras.models.Model(
            inputs=[input_img, input_lbl, input_weight],
            outputs=[predict, my_loss])
        model.add_loss(model.get_layer('my_loss').output)
        model.compile(
            optimizer='adam')

      if tf.executing_eagerly():

        def map_fn(img, lbl, weight):
          inputs = {'img': img, 'lbl': lbl, 'weight': weight}
          return (inputs,)
      else:

        def map_fn(img, lbl, weight):
          inputs = {'img': img, 'lbl': lbl, 'weight': weight}
          return inputs, {}

      fake_imgs = np.ones([50, 64, 64, 3], dtype=np.float32)
      fake_lbls = np.ones([50, 64, 64, 1], dtype=np.float32)
      fake_weights = np.ones([50, 64, 64], dtype=np.float32)

      data = tf.data.Dataset.from_tensor_slices(
          (fake_imgs, fake_lbls, fake_weights)).map(map_fn).batch(10)

      model.fit(data)

  @tf.__internal__.distribute.combinations.generate(all_strategy_combinations())
  def test_fit_eval_and_predict_methods_on_dataset_without_steps(
      self, distribution):
    with self.cached_session():
      with distribution.scope():
        optimizer_fn = gradient_descent_keras.SGD
        optimizer = optimizer_fn(0.001)
        model = get_model()
        loss = 'mse'
        metrics = ['mae', keras.metrics.CategoricalAccuracy()]
        model.compile(
            optimizer,
            loss,
            metrics=metrics)

      inputs = np.zeros((1000, 3), dtype=np.float32)
      targets = np.zeros((1000, 4), dtype=np.float32)
      # steps/steps_per_epoch are calculated when using numpy arrays as
      # input data.
      fit_with_numpy = model.fit(
          inputs, targets, epochs=1, batch_size=10).history
      eval_with_numpy = model.evaluate(inputs, targets, batch_size=10)
      predict_with_numpy = model.predict(inputs, batch_size=10)

      dataset = tf.data.Dataset.from_tensor_slices((inputs, targets))
      dataset = dataset.batch(10, drop_remainder=True)
      fit_with_ds = model.fit(dataset, epochs=1).history
      eval_with_ds = model.evaluate(dataset)
      predict_dataset = tf.data.Dataset.from_tensor_slices(inputs)
      predict_dataset = predict_dataset.batch(10, drop_remainder=True)
      predict_with_ds = model.predict(predict_dataset)
      self.assertAllClose(fit_with_numpy, fit_with_ds, atol=1e-4, rtol=1e-4)
      self.assertAllClose(eval_with_numpy, eval_with_ds, atol=1e-4, rtol=1e-4)
      self.assertAllClose(
          predict_with_numpy, predict_with_ds, atol=1e-4, rtol=1e-4)

  @tf.__internal__.distribute.combinations.generate(all_strategy_combinations())
  def test_predict_on_dataset_with_unknown_cardinality_without_steps(
      self, distribution, mode):

    if mode == 'graph' and backend.is_tpu_strategy(distribution):
      self.skipTest('partial batch not supported with TPU in graph mode.')

    with self.cached_session():
      with distribution.scope():
        optimizer_fn = gradient_descent_keras.SGD
        optimizer = optimizer_fn(0.001)
        model = get_model()
        loss = 'mse'
        metrics = ['mae', keras.metrics.CategoricalAccuracy()]
        model.compile(optimizer, loss, metrics=metrics)

      inputs = np.zeros((20, 3), dtype=np.float32)
      # steps/steps_per_epoch are calculated when using numpy arrays as
      # input data.
      predict_with_numpy = model.predict(inputs, batch_size=10)

      predict_dataset = convert_numpy_to_dataset_with_unknown_cardinality(
          inputs)

      self.assertEqual(
          keras.backend.get_value(tf.data.experimental.cardinality(predict_dataset)),
          tf.data.experimental.UNKNOWN_CARDINALITY)

      predict_with_ds = model.predict(predict_dataset)
      self.assertAllClose(
          predict_with_numpy, predict_with_ds, atol=1e-4, rtol=1e-4)

  @tf.__internal__.distribute.combinations.generate(all_strategy_combinations())
  def test_on_dataset_with_unknown_cardinality_without_steps(
      self, distribution, mode):
    # TODO(b/155867206): Investigate why this test occasionally segfaults on TPU
    # in eager mode.
    if mode == 'eager' and backend.is_tpu_strategy(distribution):
      self.skipTest('caused segfault with TPU in eager mode.')

    if mode == 'graph' and backend.is_tpu_strategy(distribution):
      self.skipTest('partial batch not supported with TPU in graph mode.')

    with self.cached_session():
      with distribution.scope():
        optimizer_fn = gradient_descent_keras.SGD
        optimizer = optimizer_fn(0.001)
        model = get_model()
        loss = 'mse'
        metrics = ['mae', keras.metrics.CategoricalAccuracy()]
        model.compile(
            optimizer,
            loss,
            metrics=metrics)

      inputs = np.zeros((100, 3), dtype=np.float32)
      targets = np.zeros((100, 4), dtype=np.float32)
      # steps/steps_per_epoch are calculated when using numpy arrays as
      # input data.
      fit_with_numpy = model.fit(
          inputs, targets, epochs=1, batch_size=10).history
      fit_with_numpy_multiple_epochs = model.fit(
          inputs, targets, epochs=2, batch_size=10).history
      eval_with_numpy = model.evaluate(inputs, targets, batch_size=10)
      predict_with_numpy = model.predict(inputs, batch_size=10)

      dataset = convert_numpy_to_dataset_with_unknown_cardinality(
          inputs, targets)
      predict_dataset = convert_numpy_to_dataset_with_unknown_cardinality(
          inputs)

      self.assertEqual(
          keras.backend.get_value(tf.data.experimental.cardinality(dataset)),
          tf.data.experimental.UNKNOWN_CARDINALITY)
      self.assertEqual(
          keras.backend.get_value(tf.data.experimental.cardinality(predict_dataset)),
          tf.data.experimental.UNKNOWN_CARDINALITY)

      eval_with_ds = model.evaluate(dataset)
      predict_with_ds = model.predict(predict_dataset)
      self.assertAllClose(eval_with_numpy, eval_with_ds, atol=1e-4, rtol=1e-4)
      self.assertAllClose(
          predict_with_numpy, predict_with_ds, atol=1e-4, rtol=1e-4)

      fit_with_ds = model.fit(dataset, epochs=1).history
      fit_with_ds_multiple_epochs = model.fit(dataset, epochs=2).history
      self.assertAllClose(fit_with_numpy, fit_with_ds, atol=1e-4, rtol=1e-4)
      self.assertAllClose(
          fit_with_numpy_multiple_epochs,
          fit_with_ds_multiple_epochs,
          atol=1e-4,
          rtol=1e-4)

  @tf.__internal__.distribute.combinations.generate(tpu_strategy_combinations_graph_only())
  def test_on_dataset_with_unknown_cardinality(self, distribution):
    with self.cached_session():
      with distribution.scope():
        model = get_model()
        loss = 'mse'
        metrics = ['mae', keras.metrics.CategoricalAccuracy()]
        model.compile(
            tf.compat.v1.train.GradientDescentOptimizer(0.001),
            loss,
            metrics=metrics)

      inputs = np.zeros((1000, 3), dtype=np.float32)
      targets = np.zeros((1000, 4), dtype=np.float32)
      # steps/steps_per_epoch are calculated when using numpy arrays as
      # input data.
      eval_with_numpy = model.evaluate(inputs, targets, batch_size=10)
      predict_with_numpy = model.predict(inputs, batch_size=10)

      dataset = convert_numpy_to_dataset_with_unknown_cardinality(
          inputs, targets)
      predict_dataset = convert_numpy_to_dataset_with_unknown_cardinality(
          inputs)

      self.assertEqual(
          keras.backend.get_value(tf.data.experimental.cardinality(dataset)),
          tf.data.experimental.UNKNOWN_CARDINALITY)
      self.assertEqual(
          keras.backend.get_value(tf.data.experimental.cardinality(predict_dataset)),
          tf.data.experimental.UNKNOWN_CARDINALITY)

      eval_with_ds = model.evaluate(dataset, steps=100)
      predict_with_ds = model.predict(predict_dataset, steps=100)
      self.assertAllClose(eval_with_numpy, eval_with_ds, atol=1e-4, rtol=1e-4)
      self.assertAllClose(
          predict_with_numpy, predict_with_ds, atol=1e-4, rtol=1e-4)

      with self.assertRaisesRegex(ValueError,
                                  'Number of steps could not be inferred'):
        model.fit(dataset, epochs=1)

  @tf.__internal__.distribute.combinations.generate(all_strategy_combinations())
  def test_fit_eval_and_predict_methods_on_dataset(
      self, distribution):
    with self.cached_session():
      with distribution.scope():
        optimizer_fn = gradient_descent_keras.SGD
        optimizer = optimizer_fn(0.001)
        model = get_model()
        loss = 'mse'
        metrics = ['mae', keras.metrics.CategoricalAccuracy()]
        model.compile(
            optimizer,
            loss,
            metrics=metrics)

      dataset = get_dataset(distribution)

      model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)
      model.evaluate(dataset, steps=2, verbose=1)
      model.predict(get_predict_dataset(distribution), steps=2)

  @tf.__internal__.distribute.combinations.generate(strategy_and_optimizer_combinations())
  def test_fit_eval_and_predict_with_optimizer(self, distribution, optimizer):
    with self.cached_session():

      with distribution.scope():

        model = get_model()
        loss = 'mse'
        model.compile(
            optimizer(),
            loss)

      dataset = get_dataset(distribution)

      model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)
      model.evaluate(dataset, steps=2, verbose=1)
      model.predict(get_predict_dataset(distribution), steps=2)

  @tf.__internal__.distribute.combinations.generate(
      tf.__internal__.test.combinations.combine(
          distribution=[
              tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu,
              tf.__internal__.distribute.combinations.one_device_strategy
          ],
          mode=['graph', 'eager']))
  def test_dataset_wrong_input_shape(self, distribution, mode):
    if mode == 'graph':
      self.skipTest(
          'TODO(b/120943676, b/120957836): Re-enable for graph once the '
          'validation code is restored.')
    with self.cached_session():
      with distribution.scope():
        optimizer_fn = gradient_descent_keras.SGD
        optimizer = optimizer_fn(learning_rate=0.001)
        model = get_model()
        loss = 'mse'
        model.compile(
            optimizer,
            loss)

      # Wrong input shape
      inputs = np.zeros((10, 5), dtype=np.float32)
      targets = np.zeros((10, 4), dtype=np.float32)
      dataset = tf.data.Dataset.from_tensor_slices((inputs, targets))
      dataset = dataset.repeat(100)
      dataset = dataset.batch(10)

      with self.assertRaisesRegex(ValueError, 'is incompatible with'):
        model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0)

  @tf.__internal__.distribute.combinations.generate(
      tf.__internal__.test.combinations.combine(
          distribution=[
              tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu
          ],
          mode=['graph', 'eager']))
  def test_dataset_external_batch_input_validation(
      self, distribution):
    with self.cached_session():
      with distribution.scope():
        optimizer_fn = gradient_descent_keras.SGD
        optimizer = optimizer_fn(learning_rate=0.001)
        model = get_model()
        loss = 'mse'
        model.compile(
            optimizer,
            loss)

      # Batching is done outside tf.data's `batch`
      inputs = np.zeros((100, 10, 3), dtype=np.float32)
      targets = np.zeros((100, 10, 4), dtype=np.float32)
      dataset = tf.data.Dataset.from_tensor_slices((inputs, targets))
      dataset = dataset.repeat(100)

      model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)

  @tf.__internal__.distribute.combinations.generate(
      tf.__internal__.test.combinations.combine(
          distribution=[
              tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu,
              tf.__internal__.distribute.combinations.mirrored_strategy_with_two_gpus
          ],
          mode=['graph', 'eager']))
  def test_learning_phase_value(self, distribution):
    # TODO(anjalisridhar): Modify this test to use Lambdas since we can compare
    # meaningful values. Currently we don't pass the learning phase if the
    # Lambda layer uses the learning phase.
    with self.cached_session():
      with distribution.scope():
        x = keras.layers.Input(shape=(1,), name='input')
        y = keras.layers.Dense(1, kernel_initializer='ones')(x)
        z = keras.layers.Dropout(0.9999)(y)
        model = keras.Model(x, z)
        initial_weights = model.get_weights()

        optimizer_fn = gradient_descent_keras.SGD
        optimizer = optimizer_fn(0.005)
        loss = 'mse'
        metrics = ['acc']
        model.compile(
            optimizer,
            loss,
            metrics=metrics)

      batch_size = 8
      if isinstance(distribution, (tf.distribute.MirroredStrategy,
                                   tf.compat.v1.distribute.MirroredStrategy)):
        # MirroredStrategy uses global batch size.
        batch_size = 8 * distribution.num_replicas_in_sync

      inputs = np.ones((10, 1), dtype=np.float32)
      targets = np.ones((10, 1), dtype=np.float32)
      dataset = tf.data.Dataset.from_tensor_slices((inputs, targets))
      dataset = dataset.repeat().batch(batch_size)
      hist = model.fit(dataset, epochs=1, steps_per_epoch=20, verbose=1)
      self.assertAlmostEqual(hist.history['acc'][0], 0, 0)

      with distribution.scope():
        model.set_weights(initial_weights)
      # TODO(psv/anjalisridhar): Enable these lines after we fix b/117431185.
      # evaluate_output = model.evaluate(dataset, steps=20)
      # self.assertAlmostEqual(evaluate_output[1], 1, 0)

      inputs = np.ones((10, 1), dtype=np.float32)
      predict_dataset = tf.data.Dataset.from_tensor_slices(inputs)

      predict_dataset = predict_dataset.repeat().batch(batch_size)
      output = model.predict(predict_dataset, steps=10)
      # `predict` runs for 10 steps
      ref_output = np.ones((160, 1), dtype=np.float32)
      self.assertArrayNear(output, ref_output, 1e-1)

  @tf.__internal__.distribute.combinations.generate(all_strategy_combinations())
  def testOptimizerWithCallbacks(self, distribution):
    with self.cached_session():
      with distribution.scope():
        model = get_model()
        optimizer = gradient_descent_keras.SGD(0.01)
        loss = 'mse'
        model.compile(
            optimizer,
            loss)

      dataset = get_dataset(distribution)

      def schedule(_):
        return 0.001

      model.fit(
          dataset,
          epochs=1,
          steps_per_epoch=2,
          verbose=0,
          callbacks=[keras.callbacks.LearningRateScheduler(schedule)])
      self.assertAllClose(0.001, keras.backend.get_value(model.optimizer.lr))

  @tf.__internal__.distribute.combinations.generate(
      tf.__internal__.test.combinations.times(tpu_strategy_combinations_graph_only(),
                         tf.__internal__.test.combinations.combine(batch_size=[4, 6])))
  def test_evaluate_with_dataset_with_partial_batch(self, distribution,
                                                    batch_size):
    with self.cached_session():
      optimizer = tf.compat.v1.train.GradientDescentOptimizer(0.001)
      loss = 'mse'
      metrics = ['mae', keras.metrics.CategoricalAccuracy()]

      with distribution.scope():
        model_with_ds_strategy = get_model()
        model_with_ds_strategy.compile(optimizer, loss, metrics=metrics)

      cpu_model = get_model()
      cpu_model.compile(optimizer, loss, metrics=metrics)

      x = np.random.random((10, 3)).astype('float32')
      y = np.random.random((10, 4)).astype('float32')
      dataset = tf.data.Dataset.from_tensor_slices((x, y))

      # As sample size is 10, we make the last batch a partial batch.
      cpu_model.set_weights(model_with_ds_strategy.get_weights())
      dataset_with_partial_batch = dataset.batch(batch_size)

      # We don't compare the loss as loss is currently not computed as metric
      # in Keras, the loss value is inaccurate for last partial batch due to
      # more weights for the last batch samples.
      steps = np.ceil(10.0 / batch_size)
      self.assertAllClose(
          model_with_ds_strategy.evaluate(
              dataset_with_partial_batch, steps=steps)[1:],
          cpu_model.evaluate(dataset_with_partial_batch, steps=steps)[1:],
          atol=1e-5,
          rtol=1e-5)
      self.assertAllClose(
          model_with_ds_strategy.evaluate(dataset_with_partial_batch)[1:],
          cpu_model.evaluate(dataset_with_partial_batch)[1:],
          atol=1e-5,
          rtol=1e-5)

  @tf.__internal__.distribute.combinations.generate(
      tf.__internal__.test.combinations.times(
          tpu_strategy_combinations_graph_only()))
  def test_predict_with_dataset_with_partial_batch(
      self, distribution):
    with self.cached_session():
      optimizer = tf.compat.v1.train.GradientDescentOptimizer(0.001)
      loss = 'mse'

      with distribution.scope():
        model_with_ds_strategy = get_model()
        model_with_ds_strategy.compile(
            optimizer,
            loss)

      cpu_model = get_model()
      cpu_model.compile(optimizer, loss)

      inputs = np.random.random((10, 3)).astype(np.float32)
      dataset = tf.data.Dataset.from_tensor_slices((inputs))

      # As sample size is 10, we batch by 4 so that the last batch is
      # a partial batch.
      dataset_with_partial_batch = dataset.batch(4)
      cpu_model.set_weights(model_with_ds_strategy.get_weights())

      self.assertAllClose(
          model_with_ds_strategy.predict(dataset_with_partial_batch, steps=3),
          cpu_model.predict(dataset_with_partial_batch, steps=3),
          atol=1e-5,
          rtol=1e-5)

  @tf.__internal__.distribute.combinations.generate(
      tf.__internal__.test.combinations.times(
          tpu_strategy_combinations_graph_only()))
  def test_predict_multi_output_model_with_dataset_with_partial_batch(
      self, distribution):
    with self.cached_session():
      optimizer = tf.compat.v1.train.GradientDescentOptimizer(0.001)
      loss = 'mse'

      with distribution.scope():
        model_with_ds_strategy = simple_multi_inputs_multi_outputs_model()
        model_with_ds_strategy.compile(
            optimizer,
            loss)

      cpu_model = simple_multi_inputs_multi_outputs_model()
      cpu_model.compile(optimizer, loss)

      input_data, _ = get_multi_inputs_multi_outputs_data()
      input_dict = {
          'input_a': input_data['input_a'],
          'input_b': input_data['input_b'],
      }

      dataset = tf.data.Dataset.from_tensor_slices(input_dict)

      # As sample size is 200, we batch by 18 using 12 steps per epoch so
      # that the last batch is a partial batch.
      dataset_with_partial_batch = dataset.batch(18)
      cpu_model.set_weights(model_with_ds_strategy.get_weights())

      self.assertAllClose(
          model_with_ds_strategy.predict(dataset_with_partial_batch, steps=12),
          cpu_model.predict(dataset_with_partial_batch, steps=12),
          atol=1e-4,
          rtol=1e-4)

  @tf.__internal__.distribute.combinations.generate(all_strategy_combinations_minus_default())
  def test_match_model_input_matches_with_dataset_tensors(self, distribution):

    def _create_model_input_output_tensors():
      input_a = keras.layers.Input(shape=(16,), name='z_input_sorted_last')
      input_b = keras.layers.Input(shape=(32,), name='a_input_sorted_first')
      intermediate_a = keras.layers.Dense(10)(input_a)
      intermediate_b = keras.layers.Dense(10)(input_b)
      merged = keras.layers.Add()([intermediate_a, intermediate_b])
      output = keras.layers.Dense(2)(merged)
      return input_a, input_b, output

    input_dict = {
        'z_input_sorted_last': np.random.rand(32, 16).astype(np.float32),
        'a_input_sorted_first': np.random.rand(32, 32).astype(np.float32)
    }
    target = np.ones((32, 2), dtype=np.float32)
    dataset = tf.data.Dataset.from_tensor_slices((input_dict, target))
    dataset = dataset.batch(4, drop_remainder=True)

    with self.cached_session():
      with distribution.scope():
        input_a, input_b, output = _create_model_input_output_tensors()
        # `input_a`, which has input name that comes last in alphanumeric
        # order, is the first input of the model input layers. If tensors
        # from `input_dict` is blindly flattened and passed to model
        # inputs incorrectly, this would result in `input_a` input layer
        # matching with tensor `a_input_sorted_first` and would result in
        # shape mismatch.
        model_with_array_input = keras.models.Model(
            inputs=[input_a, input_b], outputs=output)
        model_with_array_input.compile('sgd', 'mse')
        model_weights = model_with_array_input.get_weights()
        model_with_array_input_fit = model_with_array_input.fit(
            dataset, steps_per_epoch=1, epochs=1).history

        input_a, input_b, output = _create_model_input_output_tensors()
        model_with_dict_input = keras.models.Model(
            inputs={
                'z_input_sorted_last': input_a,
                'a_input_sorted_first': input_b,
            },
            outputs=output)
        model_with_dict_input.compile('sgd', 'mse')
        model_with_dict_input.set_weights(model_weights)
        model_with_dict_input_fit = model_with_dict_input.fit(
            dataset, steps_per_epoch=1, epochs=1).history
        self.assertAllClose(
            model_with_dict_input_fit,
            model_with_array_input_fit,
            atol=1e-4,
            rtol=1e-4)

  @tf.__internal__.distribute.combinations.generate(
      tf.__internal__.test.combinations.combine(
          distribution=strategies_minus_tpu, mode=['graph', 'eager']) +
      tf.__internal__.test.combinations.combine(
          distribution=multi_worker_mirrored_strategies, mode=['eager']))
  def test_dataset_with_sample_weights(self, distribution):
    with self.cached_session(), distribution.scope():
      model = get_sample_weights_model()
      optimizer = tf.compat.v1.train.RMSPropOptimizer(learning_rate=0.001)
      loss = 'mse'
      model.compile(
          optimizer,
          loss)

      inputs = np.array([[0], [1], [2], [3]], np.float32)
      targets = np.array([[2], [4], [6], [8]], np.float32)
      sample_weights = np.array([0.25, 0.5, 0.75, 1], np.float32)
      ds = tf.data.Dataset.from_tensor_slices(
          (inputs, targets, sample_weights)).batch(2)
      result = model.evaluate(ds, verbose=1)
      # The per sample loss is multipled by the corresponding sample weight. The
      # average of these weighted losses is the return value of the `evaluate`
      # call. For example, in the test above the average weighted loss is
      # calculated in the following manner:
      # batch_1 = (((2-0)^2) * 0.25 + ((4-1)^2) * 0.5) / 2 = 5.5 / 2 = 2.75
      # batch_2 = (((6-2)^2 * 0.75) + ((8-3)^2 * 1)) / 2 = 37 / 2 = 18.5
      # final result = (batch_1 + batch_2) / 2 = 10.625.
      # The first time we divide by number of input samples and the second time
      # we divide by number of steps/batches that the loss is aggregated over.
      self.assertAllClose(result, 10.625)

      # We now test without passing sample_weights:
      # batch_1 = ((2-0)^2) + ((4-1)^2) / 2 = 13 / 2 = 6.5
      # batch_2 = ((6-2)^2) + ((8-3)^2) / 2 = 41 / 2 = 20.5
      # final result = (batch_1 + batch_2) / 2 =  27 / 2 = 13.5
      ds = tf.data.Dataset.from_tensor_slices((inputs, targets)).batch(2)
      result = model.evaluate(ds, verbose=1)
      self.assertAllClose(result, 13.5)

Ancestors

  • tensorflow.python.framework.test_util.TensorFlowTestCase
  • absl.testing.parameterized.TestCase
  • absl.testing.absltest.TestCase
  • absl.third_party.unittest3_backport.case.TestCase
  • unittest.case.TestCase

Methods

def testOptimizerWithCallbacks_test_distribution_CentralStorageCPUAndGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def testOptimizerWithCallbacks_test_distribution_CentralStorageCPUAndGPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def testOptimizerWithCallbacks_test_distribution_Default_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def testOptimizerWithCallbacks_test_distribution_Default_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def testOptimizerWithCallbacks_test_distribution_Mirrored2GPUs_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def testOptimizerWithCallbacks_test_distribution_Mirrored2GPUs_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def testOptimizerWithCallbacks_test_distribution_MirroredCPUAndGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def testOptimizerWithCallbacks_test_distribution_MirroredCPUAndGPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def testOptimizerWithCallbacks_test_distribution_MultiWorkerMirrored2x1CPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def testOptimizerWithCallbacks_test_distribution_MultiWorkerMirrored2x1GPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def testOptimizerWithCallbacks_test_distribution_MultiWorkerMirrored2x2GPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def testOptimizerWithCallbacks_test_distribution_OneDeviceCPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def testOptimizerWithCallbacks_test_distribution_OneDeviceCPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def testOptimizerWithCallbacks_test_distribution_OneDeviceGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def testOptimizerWithCallbacks_test_distribution_OneDeviceGPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def testOptimizerWithCallbacks_test_distribution_TPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def testOptimizerWithCallbacks_test_distribution_TPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calling_model_on_same_dataset_test_distribution_CentralStorageCPUAndGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calling_model_on_same_dataset_test_distribution_CentralStorageCPUAndGPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calling_model_on_same_dataset_test_distribution_Default_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calling_model_on_same_dataset_test_distribution_Default_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calling_model_on_same_dataset_test_distribution_Mirrored2GPUs_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calling_model_on_same_dataset_test_distribution_Mirrored2GPUs_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calling_model_on_same_dataset_test_distribution_MirroredCPUAndGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calling_model_on_same_dataset_test_distribution_MirroredCPUAndGPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calling_model_on_same_dataset_test_distribution_MultiWorkerMirrored2x1CPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calling_model_on_same_dataset_test_distribution_MultiWorkerMirrored2x1GPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calling_model_on_same_dataset_test_distribution_MultiWorkerMirrored2x2GPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calling_model_on_same_dataset_test_distribution_OneDeviceCPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calling_model_on_same_dataset_test_distribution_OneDeviceCPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calling_model_on_same_dataset_test_distribution_OneDeviceGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calling_model_on_same_dataset_test_distribution_OneDeviceGPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calling_model_on_same_dataset_test_distribution_TPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calling_model_on_same_dataset_test_distribution_TPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_dataset_external_batch_input_validation_test_distribution_MirroredCPUAndGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_dataset_external_batch_input_validation_test_distribution_MirroredCPUAndGPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_dataset_with_sample_weights_test_distribution_CentralStorageCPUAndGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_dataset_with_sample_weights_test_distribution_CentralStorageCPUAndGPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_dataset_with_sample_weights_test_distribution_Default_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_dataset_with_sample_weights_test_distribution_Default_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_dataset_with_sample_weights_test_distribution_Mirrored2GPUs_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_dataset_with_sample_weights_test_distribution_Mirrored2GPUs_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_dataset_with_sample_weights_test_distribution_MirroredCPUAndGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_dataset_with_sample_weights_test_distribution_MirroredCPUAndGPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_dataset_with_sample_weights_test_distribution_MultiWorkerMirrored2x1CPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_dataset_with_sample_weights_test_distribution_MultiWorkerMirrored2x1GPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_dataset_with_sample_weights_test_distribution_MultiWorkerMirrored2x2GPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_dataset_with_sample_weights_test_distribution_OneDeviceCPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_dataset_with_sample_weights_test_distribution_OneDeviceCPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_dataset_with_sample_weights_test_distribution_OneDeviceGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_dataset_with_sample_weights_test_distribution_OneDeviceGPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_dataset_wrong_input_shape_test_distribution_MirroredCPUAndGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_dataset_wrong_input_shape_test_distribution_MirroredCPUAndGPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_dataset_wrong_input_shape_test_distribution_OneDeviceCPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_dataset_wrong_input_shape_test_distribution_OneDeviceCPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_evaluate_with_dataset_with_partial_batch_test_distribution_TPU_mode_graph_batchsize_4(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_evaluate_with_dataset_with_partial_batch_test_distribution_TPU_mode_graph_batchsize_6(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_methods_on_dataset_test_distribution_CentralStorageCPUAndGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_methods_on_dataset_test_distribution_CentralStorageCPUAndGPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_methods_on_dataset_test_distribution_Default_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_methods_on_dataset_test_distribution_Default_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_methods_on_dataset_test_distribution_Mirrored2GPUs_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_methods_on_dataset_test_distribution_Mirrored2GPUs_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_methods_on_dataset_test_distribution_MirroredCPUAndGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_methods_on_dataset_test_distribution_MirroredCPUAndGPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_methods_on_dataset_test_distribution_MultiWorkerMirrored2x1CPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_methods_on_dataset_test_distribution_MultiWorkerMirrored2x1GPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_methods_on_dataset_test_distribution_MultiWorkerMirrored2x2GPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_methods_on_dataset_test_distribution_OneDeviceCPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_methods_on_dataset_test_distribution_OneDeviceCPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_methods_on_dataset_test_distribution_OneDeviceGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_methods_on_dataset_test_distribution_OneDeviceGPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_methods_on_dataset_test_distribution_TPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_methods_on_dataset_test_distribution_TPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_methods_on_dataset_without_steps_test_distribution_CentralStorageCPUAndGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_methods_on_dataset_without_steps_test_distribution_CentralStorageCPUAndGPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_methods_on_dataset_without_steps_test_distribution_Default_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_methods_on_dataset_without_steps_test_distribution_Default_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_methods_on_dataset_without_steps_test_distribution_Mirrored2GPUs_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_methods_on_dataset_without_steps_test_distribution_Mirrored2GPUs_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_methods_on_dataset_without_steps_test_distribution_MirroredCPUAndGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_methods_on_dataset_without_steps_test_distribution_MirroredCPUAndGPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_methods_on_dataset_without_steps_test_distribution_MultiWorkerMirrored2x1CPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_methods_on_dataset_without_steps_test_distribution_MultiWorkerMirrored2x1GPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_methods_on_dataset_without_steps_test_distribution_MultiWorkerMirrored2x2GPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_methods_on_dataset_without_steps_test_distribution_OneDeviceCPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_methods_on_dataset_without_steps_test_distribution_OneDeviceCPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_methods_on_dataset_without_steps_test_distribution_OneDeviceGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_methods_on_dataset_without_steps_test_distribution_OneDeviceGPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_methods_on_dataset_without_steps_test_distribution_TPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_methods_on_dataset_without_steps_test_distribution_TPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_CentralStorageCPUAndGPU_mode_eager_optimizer_AdadeltaKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_CentralStorageCPUAndGPU_mode_eager_optimizer_AdagradKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_CentralStorageCPUAndGPU_mode_eager_optimizer_AdagradV1(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_CentralStorageCPUAndGPU_mode_eager_optimizer_AdamKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_CentralStorageCPUAndGPU_mode_eager_optimizer_AdamV1(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_CentralStorageCPUAndGPU_mode_eager_optimizer_AdamaxKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_CentralStorageCPUAndGPU_mode_eager_optimizer_FtrlKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_CentralStorageCPUAndGPU_mode_eager_optimizer_GradientDescentKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_CentralStorageCPUAndGPU_mode_eager_optimizer_GradientDescentV1(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_CentralStorageCPUAndGPU_mode_eager_optimizer_NadamKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_CentralStorageCPUAndGPU_mode_eager_optimizer_RmsPropKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_CentralStorageCPUAndGPU_mode_eager_optimizer_RmsPropV1(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_CentralStorageCPUAndGPU_mode_graph_optimizer_AdadeltaKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_CentralStorageCPUAndGPU_mode_graph_optimizer_AdagradKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_CentralStorageCPUAndGPU_mode_graph_optimizer_AdagradV1(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_CentralStorageCPUAndGPU_mode_graph_optimizer_AdamKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_CentralStorageCPUAndGPU_mode_graph_optimizer_AdamV1(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_CentralStorageCPUAndGPU_mode_graph_optimizer_AdamaxKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_CentralStorageCPUAndGPU_mode_graph_optimizer_FtrlKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_CentralStorageCPUAndGPU_mode_graph_optimizer_GradientDescentKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_CentralStorageCPUAndGPU_mode_graph_optimizer_GradientDescentV1(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_CentralStorageCPUAndGPU_mode_graph_optimizer_NadamKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_CentralStorageCPUAndGPU_mode_graph_optimizer_RmsPropKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_CentralStorageCPUAndGPU_mode_graph_optimizer_RmsPropV1(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_Default_mode_eager_optimizer_AdadeltaKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_Default_mode_eager_optimizer_AdagradKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_Default_mode_eager_optimizer_AdagradV1(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_Default_mode_eager_optimizer_AdamKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_Default_mode_eager_optimizer_AdamV1(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_Default_mode_eager_optimizer_AdamaxKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_Default_mode_eager_optimizer_FtrlKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_Default_mode_eager_optimizer_GradientDescentKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_Default_mode_eager_optimizer_GradientDescentV1(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_Default_mode_eager_optimizer_NadamKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_Default_mode_eager_optimizer_RmsPropKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_Default_mode_eager_optimizer_RmsPropV1(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_Default_mode_graph_optimizer_AdadeltaKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_Default_mode_graph_optimizer_AdagradKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_Default_mode_graph_optimizer_AdagradV1(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_Default_mode_graph_optimizer_AdamKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_Default_mode_graph_optimizer_AdamV1(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_Default_mode_graph_optimizer_AdamaxKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_Default_mode_graph_optimizer_FtrlKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_Default_mode_graph_optimizer_GradientDescentKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_Default_mode_graph_optimizer_GradientDescentV1(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_Default_mode_graph_optimizer_NadamKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_Default_mode_graph_optimizer_RmsPropKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_Default_mode_graph_optimizer_RmsPropV1(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_Mirrored2GPUs_mode_eager_optimizer_AdadeltaKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_Mirrored2GPUs_mode_eager_optimizer_AdagradKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_Mirrored2GPUs_mode_eager_optimizer_AdagradV1(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_Mirrored2GPUs_mode_eager_optimizer_AdamKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_Mirrored2GPUs_mode_eager_optimizer_AdamV1(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_Mirrored2GPUs_mode_eager_optimizer_AdamaxKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_Mirrored2GPUs_mode_eager_optimizer_FtrlKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_Mirrored2GPUs_mode_eager_optimizer_GradientDescentKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_Mirrored2GPUs_mode_eager_optimizer_GradientDescentV1(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_Mirrored2GPUs_mode_eager_optimizer_NadamKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_Mirrored2GPUs_mode_eager_optimizer_RmsPropKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_Mirrored2GPUs_mode_eager_optimizer_RmsPropV1(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_Mirrored2GPUs_mode_graph_optimizer_AdadeltaKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_Mirrored2GPUs_mode_graph_optimizer_AdagradKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_Mirrored2GPUs_mode_graph_optimizer_AdagradV1(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_Mirrored2GPUs_mode_graph_optimizer_AdamKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_Mirrored2GPUs_mode_graph_optimizer_AdamV1(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_Mirrored2GPUs_mode_graph_optimizer_AdamaxKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_Mirrored2GPUs_mode_graph_optimizer_FtrlKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_Mirrored2GPUs_mode_graph_optimizer_GradientDescentKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_Mirrored2GPUs_mode_graph_optimizer_GradientDescentV1(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_Mirrored2GPUs_mode_graph_optimizer_NadamKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_Mirrored2GPUs_mode_graph_optimizer_RmsPropKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_Mirrored2GPUs_mode_graph_optimizer_RmsPropV1(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_MirroredCPUAndGPU_mode_eager_optimizer_AdadeltaKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_MirroredCPUAndGPU_mode_eager_optimizer_AdagradKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_MirroredCPUAndGPU_mode_eager_optimizer_AdagradV1(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_MirroredCPUAndGPU_mode_eager_optimizer_AdamKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_MirroredCPUAndGPU_mode_eager_optimizer_AdamV1(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_MirroredCPUAndGPU_mode_eager_optimizer_AdamaxKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_MirroredCPUAndGPU_mode_eager_optimizer_FtrlKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_MirroredCPUAndGPU_mode_eager_optimizer_GradientDescentKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_MirroredCPUAndGPU_mode_eager_optimizer_GradientDescentV1(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_MirroredCPUAndGPU_mode_eager_optimizer_NadamKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_MirroredCPUAndGPU_mode_eager_optimizer_RmsPropKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_MirroredCPUAndGPU_mode_eager_optimizer_RmsPropV1(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_MirroredCPUAndGPU_mode_graph_optimizer_AdadeltaKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_MirroredCPUAndGPU_mode_graph_optimizer_AdagradKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_MirroredCPUAndGPU_mode_graph_optimizer_AdagradV1(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_MirroredCPUAndGPU_mode_graph_optimizer_AdamKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_MirroredCPUAndGPU_mode_graph_optimizer_AdamV1(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_MirroredCPUAndGPU_mode_graph_optimizer_AdamaxKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_MirroredCPUAndGPU_mode_graph_optimizer_FtrlKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_MirroredCPUAndGPU_mode_graph_optimizer_GradientDescentKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_MirroredCPUAndGPU_mode_graph_optimizer_GradientDescentV1(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_MirroredCPUAndGPU_mode_graph_optimizer_NadamKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_MirroredCPUAndGPU_mode_graph_optimizer_RmsPropKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_MirroredCPUAndGPU_mode_graph_optimizer_RmsPropV1(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_MultiWorkerMirrored2x1CPU_mode_eager_optimizer_AdadeltaKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_MultiWorkerMirrored2x1CPU_mode_eager_optimizer_AdagradKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_MultiWorkerMirrored2x1CPU_mode_eager_optimizer_AdamKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_MultiWorkerMirrored2x1CPU_mode_eager_optimizer_AdamaxKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_MultiWorkerMirrored2x1CPU_mode_eager_optimizer_FtrlKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_MultiWorkerMirrored2x1CPU_mode_eager_optimizer_GradientDescentKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_MultiWorkerMirrored2x1CPU_mode_eager_optimizer_NadamKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_MultiWorkerMirrored2x1CPU_mode_eager_optimizer_RmsPropKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_MultiWorkerMirrored2x1GPU_mode_eager_optimizer_AdadeltaKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_MultiWorkerMirrored2x1GPU_mode_eager_optimizer_AdagradKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_MultiWorkerMirrored2x1GPU_mode_eager_optimizer_AdamKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_MultiWorkerMirrored2x1GPU_mode_eager_optimizer_AdamaxKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_MultiWorkerMirrored2x1GPU_mode_eager_optimizer_FtrlKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_MultiWorkerMirrored2x1GPU_mode_eager_optimizer_GradientDescentKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_MultiWorkerMirrored2x1GPU_mode_eager_optimizer_NadamKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_MultiWorkerMirrored2x1GPU_mode_eager_optimizer_RmsPropKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_MultiWorkerMirrored2x2GPU_mode_eager_optimizer_AdadeltaKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_MultiWorkerMirrored2x2GPU_mode_eager_optimizer_AdagradKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_MultiWorkerMirrored2x2GPU_mode_eager_optimizer_AdamKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_MultiWorkerMirrored2x2GPU_mode_eager_optimizer_AdamaxKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_MultiWorkerMirrored2x2GPU_mode_eager_optimizer_FtrlKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_MultiWorkerMirrored2x2GPU_mode_eager_optimizer_GradientDescentKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_MultiWorkerMirrored2x2GPU_mode_eager_optimizer_NadamKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_MultiWorkerMirrored2x2GPU_mode_eager_optimizer_RmsPropKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_OneDeviceCPU_mode_eager_optimizer_AdadeltaKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_OneDeviceCPU_mode_eager_optimizer_AdagradKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_OneDeviceCPU_mode_eager_optimizer_AdagradV1(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_OneDeviceCPU_mode_eager_optimizer_AdamKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_OneDeviceCPU_mode_eager_optimizer_AdamV1(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_OneDeviceCPU_mode_eager_optimizer_AdamaxKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_OneDeviceCPU_mode_eager_optimizer_FtrlKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_OneDeviceCPU_mode_eager_optimizer_GradientDescentKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_OneDeviceCPU_mode_eager_optimizer_GradientDescentV1(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_OneDeviceCPU_mode_eager_optimizer_NadamKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_OneDeviceCPU_mode_eager_optimizer_RmsPropKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_OneDeviceCPU_mode_eager_optimizer_RmsPropV1(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_OneDeviceCPU_mode_graph_optimizer_AdadeltaKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_OneDeviceCPU_mode_graph_optimizer_AdagradKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_OneDeviceCPU_mode_graph_optimizer_AdagradV1(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_OneDeviceCPU_mode_graph_optimizer_AdamKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_OneDeviceCPU_mode_graph_optimizer_AdamV1(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_OneDeviceCPU_mode_graph_optimizer_AdamaxKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_OneDeviceCPU_mode_graph_optimizer_FtrlKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_OneDeviceCPU_mode_graph_optimizer_GradientDescentKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_OneDeviceCPU_mode_graph_optimizer_GradientDescentV1(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_OneDeviceCPU_mode_graph_optimizer_NadamKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_OneDeviceCPU_mode_graph_optimizer_RmsPropKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_OneDeviceCPU_mode_graph_optimizer_RmsPropV1(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_OneDeviceGPU_mode_eager_optimizer_AdadeltaKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_OneDeviceGPU_mode_eager_optimizer_AdagradKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_OneDeviceGPU_mode_eager_optimizer_AdagradV1(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_OneDeviceGPU_mode_eager_optimizer_AdamKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_OneDeviceGPU_mode_eager_optimizer_AdamV1(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_OneDeviceGPU_mode_eager_optimizer_AdamaxKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_OneDeviceGPU_mode_eager_optimizer_FtrlKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_OneDeviceGPU_mode_eager_optimizer_GradientDescentKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_OneDeviceGPU_mode_eager_optimizer_GradientDescentV1(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_OneDeviceGPU_mode_eager_optimizer_NadamKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_OneDeviceGPU_mode_eager_optimizer_RmsPropKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_OneDeviceGPU_mode_eager_optimizer_RmsPropV1(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_OneDeviceGPU_mode_graph_optimizer_AdadeltaKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_OneDeviceGPU_mode_graph_optimizer_AdagradKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_OneDeviceGPU_mode_graph_optimizer_AdagradV1(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_OneDeviceGPU_mode_graph_optimizer_AdamKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_OneDeviceGPU_mode_graph_optimizer_AdamV1(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_OneDeviceGPU_mode_graph_optimizer_AdamaxKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_OneDeviceGPU_mode_graph_optimizer_FtrlKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_OneDeviceGPU_mode_graph_optimizer_GradientDescentKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_OneDeviceGPU_mode_graph_optimizer_GradientDescentV1(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_OneDeviceGPU_mode_graph_optimizer_NadamKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_OneDeviceGPU_mode_graph_optimizer_RmsPropKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_OneDeviceGPU_mode_graph_optimizer_RmsPropV1(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_TPU_mode_eager_optimizer_AdagradKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_TPU_mode_eager_optimizer_AdamKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_TPU_mode_eager_optimizer_GradientDescentKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_TPU_mode_eager_optimizer_RmsPropKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_TPU_mode_graph_optimizer_AdagradKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_TPU_mode_graph_optimizer_AdagradV1(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_TPU_mode_graph_optimizer_AdamKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_TPU_mode_graph_optimizer_AdamV1(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_TPU_mode_graph_optimizer_GradientDescentKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_TPU_mode_graph_optimizer_GradientDescentV1(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_TPU_mode_graph_optimizer_RmsPropKerasV2(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_eval_and_predict_with_optimizer_test_distribution_TPU_mode_graph_optimizer_RmsPropV1(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_with_dictionary_in_the_dataset_b135161171_test_distribution_CentralStorageCPUAndGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_with_dictionary_in_the_dataset_b135161171_test_distribution_CentralStorageCPUAndGPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_with_dictionary_in_the_dataset_b135161171_test_distribution_Default_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_with_dictionary_in_the_dataset_b135161171_test_distribution_Default_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_with_dictionary_in_the_dataset_b135161171_test_distribution_Mirrored2GPUs_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_with_dictionary_in_the_dataset_b135161171_test_distribution_Mirrored2GPUs_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_with_dictionary_in_the_dataset_b135161171_test_distribution_MirroredCPUAndGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_with_dictionary_in_the_dataset_b135161171_test_distribution_MirroredCPUAndGPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_with_dictionary_in_the_dataset_b135161171_test_distribution_MultiWorkerMirrored2x1CPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_with_dictionary_in_the_dataset_b135161171_test_distribution_MultiWorkerMirrored2x1GPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_with_dictionary_in_the_dataset_b135161171_test_distribution_MultiWorkerMirrored2x2GPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_with_dictionary_in_the_dataset_b135161171_test_distribution_OneDeviceCPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_with_dictionary_in_the_dataset_b135161171_test_distribution_OneDeviceCPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_with_dictionary_in_the_dataset_b135161171_test_distribution_OneDeviceGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_with_dictionary_in_the_dataset_b135161171_test_distribution_OneDeviceGPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_with_dictionary_in_the_dataset_b135161171_test_distribution_TPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_with_dictionary_in_the_dataset_b135161171_test_distribution_TPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_with_tuple_and_dict_dataset_inputs_test_distribution_CentralStorageCPUAndGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_with_tuple_and_dict_dataset_inputs_test_distribution_CentralStorageCPUAndGPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_with_tuple_and_dict_dataset_inputs_test_distribution_Default_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_with_tuple_and_dict_dataset_inputs_test_distribution_Default_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_with_tuple_and_dict_dataset_inputs_test_distribution_Mirrored2GPUs_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_with_tuple_and_dict_dataset_inputs_test_distribution_Mirrored2GPUs_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_with_tuple_and_dict_dataset_inputs_test_distribution_MirroredCPUAndGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_with_tuple_and_dict_dataset_inputs_test_distribution_MirroredCPUAndGPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_with_tuple_and_dict_dataset_inputs_test_distribution_MultiWorkerMirrored2x1CPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_with_tuple_and_dict_dataset_inputs_test_distribution_MultiWorkerMirrored2x1GPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_with_tuple_and_dict_dataset_inputs_test_distribution_MultiWorkerMirrored2x2GPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_with_tuple_and_dict_dataset_inputs_test_distribution_OneDeviceCPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_with_tuple_and_dict_dataset_inputs_test_distribution_OneDeviceCPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_with_tuple_and_dict_dataset_inputs_test_distribution_OneDeviceGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_with_tuple_and_dict_dataset_inputs_test_distribution_OneDeviceGPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_with_tuple_and_dict_dataset_inputs_test_distribution_TPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_with_tuple_and_dict_dataset_inputs_test_distribution_TPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_learning_phase_value_test_distribution_Mirrored2GPUs_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_learning_phase_value_test_distribution_Mirrored2GPUs_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_learning_phase_value_test_distribution_MirroredCPUAndGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_learning_phase_value_test_distribution_MirroredCPUAndGPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_match_model_input_matches_with_dataset_tensors_test_distribution_Mirrored2GPUs_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_match_model_input_matches_with_dataset_tensors_test_distribution_Mirrored2GPUs_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_match_model_input_matches_with_dataset_tensors_test_distribution_MirroredCPUAndGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_match_model_input_matches_with_dataset_tensors_test_distribution_MirroredCPUAndGPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_match_model_input_matches_with_dataset_tensors_test_distribution_MultiWorkerMirrored2x1CPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_match_model_input_matches_with_dataset_tensors_test_distribution_MultiWorkerMirrored2x1GPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_match_model_input_matches_with_dataset_tensors_test_distribution_MultiWorkerMirrored2x2GPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_match_model_input_matches_with_dataset_tensors_test_distribution_OneDeviceCPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_match_model_input_matches_with_dataset_tensors_test_distribution_OneDeviceCPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_match_model_input_matches_with_dataset_tensors_test_distribution_OneDeviceGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_match_model_input_matches_with_dataset_tensors_test_distribution_OneDeviceGPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_match_model_input_matches_with_dataset_tensors_test_distribution_TPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_match_model_input_matches_with_dataset_tensors_test_distribution_TPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_model_interleaved_eval_same_as_direct_eval_test_distribution_CentralStorageCPUAndGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_model_interleaved_eval_same_as_direct_eval_test_distribution_CentralStorageCPUAndGPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_model_interleaved_eval_same_as_direct_eval_test_distribution_Default_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_model_interleaved_eval_same_as_direct_eval_test_distribution_Default_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_model_interleaved_eval_same_as_direct_eval_test_distribution_Mirrored2GPUs_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_model_interleaved_eval_same_as_direct_eval_test_distribution_Mirrored2GPUs_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_model_interleaved_eval_same_as_direct_eval_test_distribution_MirroredCPUAndGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_model_interleaved_eval_same_as_direct_eval_test_distribution_MirroredCPUAndGPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_model_interleaved_eval_same_as_direct_eval_test_distribution_MultiWorkerMirrored2x1CPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_model_interleaved_eval_same_as_direct_eval_test_distribution_MultiWorkerMirrored2x1GPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_model_interleaved_eval_same_as_direct_eval_test_distribution_MultiWorkerMirrored2x2GPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_model_interleaved_eval_same_as_direct_eval_test_distribution_OneDeviceCPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_model_interleaved_eval_same_as_direct_eval_test_distribution_OneDeviceCPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_model_interleaved_eval_same_as_direct_eval_test_distribution_OneDeviceGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_model_interleaved_eval_same_as_direct_eval_test_distribution_OneDeviceGPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_model_interleaved_eval_same_as_direct_eval_test_distribution_TPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_model_interleaved_eval_same_as_direct_eval_test_distribution_TPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_on_dataset_with_unknown_cardinality_test_distribution_TPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_on_dataset_with_unknown_cardinality_without_steps_test_distribution_CentralStorageCPUAndGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_on_dataset_with_unknown_cardinality_without_steps_test_distribution_CentralStorageCPUAndGPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_on_dataset_with_unknown_cardinality_without_steps_test_distribution_Default_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_on_dataset_with_unknown_cardinality_without_steps_test_distribution_Default_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_on_dataset_with_unknown_cardinality_without_steps_test_distribution_Mirrored2GPUs_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_on_dataset_with_unknown_cardinality_without_steps_test_distribution_Mirrored2GPUs_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_on_dataset_with_unknown_cardinality_without_steps_test_distribution_MirroredCPUAndGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_on_dataset_with_unknown_cardinality_without_steps_test_distribution_MirroredCPUAndGPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_on_dataset_with_unknown_cardinality_without_steps_test_distribution_MultiWorkerMirrored2x1CPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_on_dataset_with_unknown_cardinality_without_steps_test_distribution_MultiWorkerMirrored2x1GPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_on_dataset_with_unknown_cardinality_without_steps_test_distribution_MultiWorkerMirrored2x2GPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_on_dataset_with_unknown_cardinality_without_steps_test_distribution_OneDeviceCPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_on_dataset_with_unknown_cardinality_without_steps_test_distribution_OneDeviceCPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_on_dataset_with_unknown_cardinality_without_steps_test_distribution_OneDeviceGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_on_dataset_with_unknown_cardinality_without_steps_test_distribution_OneDeviceGPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_on_dataset_with_unknown_cardinality_without_steps_test_distribution_TPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_on_dataset_with_unknown_cardinality_without_steps_test_distribution_TPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_predict_multi_output_model_with_dataset_with_partial_batch_test_distribution_TPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_predict_on_dataset_with_unknown_cardinality_without_steps_test_distribution_CentralStorageCPUAndGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_predict_on_dataset_with_unknown_cardinality_without_steps_test_distribution_CentralStorageCPUAndGPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_predict_on_dataset_with_unknown_cardinality_without_steps_test_distribution_Default_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_predict_on_dataset_with_unknown_cardinality_without_steps_test_distribution_Default_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_predict_on_dataset_with_unknown_cardinality_without_steps_test_distribution_Mirrored2GPUs_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_predict_on_dataset_with_unknown_cardinality_without_steps_test_distribution_Mirrored2GPUs_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_predict_on_dataset_with_unknown_cardinality_without_steps_test_distribution_MirroredCPUAndGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_predict_on_dataset_with_unknown_cardinality_without_steps_test_distribution_MirroredCPUAndGPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_predict_on_dataset_with_unknown_cardinality_without_steps_test_distribution_MultiWorkerMirrored2x1CPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_predict_on_dataset_with_unknown_cardinality_without_steps_test_distribution_MultiWorkerMirrored2x1GPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_predict_on_dataset_with_unknown_cardinality_without_steps_test_distribution_MultiWorkerMirrored2x2GPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_predict_on_dataset_with_unknown_cardinality_without_steps_test_distribution_OneDeviceCPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_predict_on_dataset_with_unknown_cardinality_without_steps_test_distribution_OneDeviceCPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_predict_on_dataset_with_unknown_cardinality_without_steps_test_distribution_OneDeviceGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_predict_on_dataset_with_unknown_cardinality_without_steps_test_distribution_OneDeviceGPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_predict_on_dataset_with_unknown_cardinality_without_steps_test_distribution_TPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_predict_on_dataset_with_unknown_cardinality_without_steps_test_distribution_TPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_predict_with_dataset_with_partial_batch_test_distribution_TPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
class TestDistributionStrategyWithDatasetsFile (methodName='runTest')

Base class for tests that need to test TensorFlow.

Create an instance of the class that will use the named test method when executed. Raises a ValueError if the instance does not have a method with the specified name.

Expand source code
class TestDistributionStrategyWithDatasetsFile(tf.test.TestCase,
                                               parameterized.TestCase):

  def setUp(self):
    super(TestDistributionStrategyWithDatasetsFile, self).setUp()
    self.input_file_name = os.path.join(self.get_temp_dir(), 'input.tfrecord')
    inputs = np.zeros((20, 3), dtype=np.float32)
    input_dataset = tf.data.Dataset.from_tensor_slices(inputs)
    input_dataset = input_dataset.map(tf.io.serialize_tensor)
    writer = tf.data.experimental.TFRecordWriter(self.input_file_name)
    writer.write(input_dataset)

  # TODO(wxinyi): add a multi-worker test for TPU
  @tf.__internal__.distribute.combinations.generate(multi_worker_strategy_combinations_eager_only())
  def test_predict_on_dataset_shard_options_file_multi_worker_mirrored(
      self, distribution, mode):
    # This test is to verify if we successfully switch auto_shard_policy of a
    # input dataset inside model.predict with MultiWorkerMirroredStrategy to
    # AutoShardPolicy.DATA. Since there is only one input file for multiple
    # workers, AutoShardPolicy.AUTO or AutoShardPolicy.FILE will lead to an
    # error. However, since we switch to AutoShardPolicy.DATA in model.predict,
    # no error is raised.
    del mode
    with distribution.scope():
      optimizer_fn = gradient_descent_keras.SGD
      optimizer = optimizer_fn(0.001)
      model = get_model()
      loss = 'mse'
      model.compile(optimizer, loss)

    dataset = tf.data.TFRecordDataset(self.input_file_name)
    dataset = dataset.map(lambda x: tf.io.parse_tensor(x, tf.float32))

    dummy_op = lambda inp: True

    dataset = dataset.filter(dummy_op).batch(8, drop_remainder=True)

    options = tf.data.Options()
    options.experimental_distribute.auto_shard_policy = \
        tf.data.experimental.AutoShardPolicy.FILE
    dataset = dataset.with_options(options)

    model.predict(dataset, steps=1)

Ancestors

  • tensorflow.python.framework.test_util.TensorFlowTestCase
  • absl.testing.parameterized.TestCase
  • absl.testing.absltest.TestCase
  • absl.third_party.unittest3_backport.case.TestCase
  • unittest.case.TestCase

Methods

def setUp(self)

Hook method for setting up the test fixture before exercising it.

Expand source code
def setUp(self):
  super(TestDistributionStrategyWithDatasetsFile, self).setUp()
  self.input_file_name = os.path.join(self.get_temp_dir(), 'input.tfrecord')
  inputs = np.zeros((20, 3), dtype=np.float32)
  input_dataset = tf.data.Dataset.from_tensor_slices(inputs)
  input_dataset = input_dataset.map(tf.io.serialize_tensor)
  writer = tf.data.experimental.TFRecordWriter(self.input_file_name)
  writer.write(input_dataset)
def test_predict_on_dataset_shard_options_file_multi_worker_mirrored_test_distribution_MultiWorkerMirrored2x1CPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_predict_on_dataset_shard_options_file_multi_worker_mirrored_test_distribution_MultiWorkerMirrored2x1GPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_predict_on_dataset_shard_options_file_multi_worker_mirrored_test_distribution_MultiWorkerMirrored2x2GPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
class TestDistributionStrategyWithKerasModels (methodName='runTest')

Base class for tests that need to test TensorFlow.

Create an instance of the class that will use the named test method when executed. Raises a ValueError if the instance does not have a method with the specified name.

Expand source code
class TestDistributionStrategyWithKerasModels(tf.test.TestCase,
                                              parameterized.TestCase):

  @tf.__internal__.distribute.combinations.generate(all_strategy_combinations())
  def test_distribution_strategy_on_sequential_model(
      self, distribution):
    with distribution.scope():
      optimizer_fn = gradient_descent_keras.SGD
      optimizer = optimizer_fn(learning_rate=0.001)
      model = simple_sequential_model()
      loss = 'mse'
      model.compile(
          optimizer,
          loss)

      inputs = np.zeros((20, 10), np.float32)
      targets = np.zeros((20, 2), np.float32)

    model.fit(inputs, targets, epochs=1, batch_size=10)
    model.predict(inputs, batch_size=10)
    model.evaluate(inputs, targets, batch_size=10)

  @tf.__internal__.distribute.combinations.generate(all_strategy_combinations())
  def test_distribution_strategy_on_functional_model(
      self, distribution):
    with distribution.scope():
      optimizer_fn = gradient_descent_keras.SGD
      optimizer = optimizer_fn(learning_rate=0.001)
      model = get_model()
      loss = 'mse'
      model.compile(
          optimizer,
          loss)

      inputs = np.zeros((64, 3), dtype=np.float32)
      targets = np.zeros((64, 4), dtype=np.float32)

    model.fit(inputs, targets, epochs=1)
    model.predict(inputs)
    model.evaluate(inputs, targets)

  @tf.__internal__.distribute.combinations.generate(
      tf.__internal__.test.combinations.combine(distribution=all_strategies, mode=['eager']))
  def test_distributed_dataset(self, distribution):
    with distribution.scope():

      class CBCounter(keras.callbacks.Callback):

        def __init__(self):
          self.epochs = 0
          self.train_batches = 0
          self.test_batches = 0

        def on_epoch_end(self, batch, logs=None):
          self.epochs += 1

        def on_train_batch_end(self, batch, logs=None):
          self.train_batches += 1

        def on_test_batch_end(self, batch, logs=None):
          self.test_batches += 1

      model = keras.Sequential([keras.layers.Dense(1)])
      model.compile('sgd', 'mse')
      cb_counter = CBCounter()

      x, y = np.ones((100, 10)), np.ones((100, 1))
      ds = tf.data.Dataset.from_tensor_slices((x, y))
      ds = ds.batch(10).repeat(2)
      ds = distribution.experimental_distribute_dataset(ds)

      val_ds = tf.data.Dataset.from_tensor_slices((x, y))
      val_ds = val_ds.batch(20)
      val_ds = distribution.experimental_distribute_dataset(val_ds)

      model.fit(
          ds,
          steps_per_epoch=10,
          validation_data=val_ds,
          validation_steps=5,
          epochs=2,
          callbacks=[cb_counter])

      self.assertEqual(cb_counter.train_batches, 20)
      self.assertEqual(cb_counter.test_batches, 10)
      self.assertEqual(cb_counter.epochs, 2)

      # Check for `steps_per_epoch`.
      if distribution.num_replicas_in_sync > 1:
        with self.assertRaisesRegex(ValueError,
                                    'distributed dataset, you must specify'):
          model.fit(ds, epochs=2)

  @tf.__internal__.distribute.combinations.generate(
      tf.__internal__.test.combinations.combine(distribution=all_strategies, mode=['eager']))
  def test_distributed_datasets_from_function(self, distribution):
    with distribution.scope():

      class CBCounter(keras.callbacks.Callback):

        def __init__(self):
          self.epochs = 0
          self.train_batches = 0
          self.test_batches = 0

        def on_epoch_end(self, batch, logs=None):
          self.epochs += 1

        def on_train_batch_end(self, batch, logs=None):
          self.train_batches += 1

        def on_test_batch_end(self, batch, logs=None):
          self.test_batches += 1

      model = keras.Sequential([keras.layers.Dense(1)])
      model.compile('sgd', 'mse')
      cb_counter = CBCounter()

      def make_dataset(_):
        x, y = np.ones((100, 10)), np.ones((100, 1))
        ds = tf.data.Dataset.from_tensor_slices((x, y))
        ds = ds.batch(5).repeat()
        return ds

      ds = distribution.distribute_datasets_from_function(make_dataset)
      val_ds = distribution.distribute_datasets_from_function(make_dataset)

      model.fit(
          ds,
          steps_per_epoch=10,
          validation_data=val_ds,
          validation_steps=5,
          epochs=2,
          callbacks=[cb_counter])

      self.assertEqual(cb_counter.train_batches, 20)
      self.assertEqual(cb_counter.test_batches, 10)
      self.assertEqual(cb_counter.epochs, 2)

      # Check for `steps_per_epoch`.
      if distribution.num_replicas_in_sync > 1:
        with self.assertRaisesRegex(ValueError,
                                    'distributed dataset, you must specify'):
          model.fit(ds, epochs=2)

  @tf.__internal__.distribute.combinations.generate(
      tf.__internal__.test.combinations.combine(distribution=all_strategies, mode=['eager']))
  def test_host_training_loop(self, distribution):
    if isinstance(distribution,
                  tf.distribute.MultiWorkerMirroredStrategy):
      self.skipTest('b/172032817')
    with distribution.scope():
      inputs = keras.Input((10, 10, 3))
      x = keras.layers.Conv2D(3, kernel_size=3)(inputs)
      x = keras.layers.Flatten()(x)
      outputs = keras.layers.Dense(1)(x)
      model = keras.Model(inputs, outputs)

    model.compile('sgd', 'mse', steps_per_execution=10)

    bc = BatchCountingCB()
    x, y = np.ones((100, 10, 10, 3)), np.ones((100, 1))
    model.fit(x, y, batch_size=2, epochs=1, callbacks=[bc])
    self.assertEqual(bc.train_begin_batches, [0, 10, 20, 30, 40])
    self.assertEqual(bc.train_end_batches, [9, 19, 29, 39, 49])

    model.evaluate(x, y, batch_size=2, callbacks=[bc])
    self.assertEqual(bc.test_begin_batches, [0, 10, 20, 30, 40])
    self.assertEqual(bc.test_end_batches, [9, 19, 29, 39, 49])

    model.predict(x, batch_size=2, callbacks=[bc])
    self.assertEqual(bc.predict_begin_batches, [0, 10, 20, 30, 40])
    self.assertEqual(bc.predict_end_batches, [9, 19, 29, 39, 49])

  @tf.__internal__.distribute.combinations.generate(
      tf.__internal__.test.combinations.combine(distribution=all_strategies, mode=['eager']))
  def test_host_training_loop_last_partial_execution(self, distribution):
    if isinstance(distribution,
                  tf.distribute.MultiWorkerMirroredStrategy):
      self.skipTest('b/172032817')
    with distribution.scope():
      inputs = keras.Input(10)
      outputs = keras.layers.Dense(1)(inputs)
      model = keras.Model(inputs, outputs)

    model.compile('sgd', 'mse', steps_per_execution=20)

    bc = BatchCountingCB()
    x, y = np.ones((100, 10)), np.ones((100, 1))
    model.fit(x, y, batch_size=2, epochs=1, callbacks=[bc])
    self.assertEqual(bc.train_begin_batches, [0, 20, 40])
    self.assertEqual(bc.train_end_batches, [19, 39, 49])

    model.evaluate(x, y, batch_size=2, callbacks=[bc])
    self.assertEqual(bc.test_begin_batches, [0, 20, 40])
    self.assertEqual(bc.test_end_batches, [19, 39, 49])

    model.predict(x, batch_size=2, callbacks=[bc])
    self.assertEqual(bc.predict_begin_batches, [0, 20, 40])
    self.assertEqual(bc.predict_end_batches, [19, 39, 49])

  @tf.__internal__.distribute.combinations.generate(
      tf.__internal__.test.combinations.combine(distribution=all_strategies, mode=['eager']))
  def test_host_training_loop_dataset_unknown_size(self, distribution):
    if isinstance(distribution,
                  tf.distribute.MultiWorkerMirroredStrategy):
      self.skipTest('b/172032817')
    with distribution.scope():
      inputs = keras.Input(10)
      outputs = keras.layers.Dense(1)(inputs)
      model = keras.Model(inputs, outputs)

    model.compile('sgd', 'mse', steps_per_execution=20)

    x, y = np.ones((100, 10)), np.ones((100, 1))
    ds = tf.data.Dataset.from_tensor_slices((x, y)).batch(2)
    ds = ds.filter(lambda *args, **kwargs: True)  # Makes the size UNKNOWN.
    bc = BatchCountingCB()

    with self.assertRaisesRegex(ValueError, 'steps_per_execution'):
      model.fit(ds, epochs=2, callbacks=[bc])

    train_ds = ds.repeat(2)
    model.fit(train_ds, steps_per_epoch=50, epochs=2, callbacks=[bc])
    self.assertEqual(bc.train_begin_batches, [0, 20, 40, 0, 20, 40])
    self.assertEqual(bc.train_end_batches, [19, 39, 49, 19, 39, 49])

    with self.assertRaisesRegex(ValueError, 'steps_per_execution'):
      model.evaluate(ds, callbacks=[bc])

    test_ds = ds.repeat(2)
    model.evaluate(test_ds, steps=50, callbacks=[bc])
    self.assertEqual(bc.test_begin_batches, [0, 20, 40])
    self.assertEqual(bc.test_end_batches, [19, 39, 49])

    predict_ds = ds.repeat(2)
    model.predict(predict_ds, steps=50, callbacks=[bc])
    self.assertEqual(bc.predict_begin_batches, [0, 20, 40])
    self.assertEqual(bc.predict_end_batches, [19, 39, 49])

  @tf.__internal__.distribute.combinations.generate(
      tf.__internal__.test.combinations.combine(distribution=all_strategies, mode=['eager']))
  def test_host_training_loop_truncate_to_epoch(self, distribution):
    if isinstance(distribution,
                  tf.distribute.MultiWorkerMirroredStrategy):
      self.skipTest('b/172032817')
    with distribution.scope():
      inputs = keras.Input(10)
      outputs = keras.layers.Dense(1)(inputs)
      model = keras.Model(inputs, outputs)

    model.compile('sgd', 'mse', steps_per_execution=500)

    x, y = np.ones((100, 10)), np.ones((100, 1))
    bc = BatchCountingCB()
    model.fit(x, y, batch_size=2, epochs=2, callbacks=[bc])
    self.assertEqual(bc.train_begin_batches, [0, 0])
    self.assertEqual(bc.train_end_batches, [49, 49])

    x, y = np.ones((50, 10)), np.ones((50, 1))
    model.evaluate(x, y, batch_size=2, callbacks=[bc])
    self.assertEqual(bc.test_begin_batches, [0])
    self.assertEqual(bc.test_end_batches, [24])

    x = np.ones((50, 10))
    model.predict(x, batch_size=2, callbacks=[bc])
    self.assertEqual(bc.predict_begin_batches, [0])
    self.assertEqual(bc.predict_end_batches, [24])

  @tf.__internal__.distribute.combinations.generate(
      tf.__internal__.test.combinations.combine(distribution=all_strategies, mode=['eager']))
  def test_gradient_clipping(self, distribution):

    class MyLayer(keras.layers.Layer):

      def build(self, _):
        self.v1 = tf.Variable(1.)
        self.v2 = tf.Variable(1.)

      def call(self, x):
        return 3 * self.v1 - 3 * self.v2

    x, y = np.ones((10, 1)), np.ones((10, 1))

    with distribution.scope():
      layer = MyLayer()
      model = keras.Sequential([layer])
      optimizer = gradient_descent_keras.SGD(1., clipnorm=2., clipvalue=2.)
    model.compile(optimizer, 'mae')

    if isinstance(distribution,
                  (tf.distribute.experimental.CentralStorageStrategy,
                   tf.compat.v1.distribute.experimental.CentralStorageStrategy)):
      with self.assertRaisesRegex(ValueError, 'not supported'):
        model.fit(x, y, batch_size=10, epochs=1)
    else:
      model.fit(x, y, batch_size=10, epochs=1)
      self.assertAllClose(self.evaluate(layer.v1), 3.)
      self.assertAllClose(self.evaluate(layer.v2), -1.)

  @tf.__internal__.distribute.combinations.generate(
      tf.__internal__.test.combinations.combine(distribution=all_strategies, mode=['eager']))
  def test_custom_gradient_transformation(self, distribution):
    if isinstance(distribution,
                  (tf.distribute.experimental.CentralStorageStrategy,
                   tf.compat.v1.distribute.experimental.CentralStorageStrategy)):
      self.skipTest('Not supported with `CentralStorageStrategy`')

    class MyLayer(keras.layers.Layer):

      def build(self, _):
        self.v1 = tf.Variable(1.)
        self.v2 = tf.Variable(-1.)

      def call(self, x):
        return x + self.v1 + self.v2

    def custom_transform(grads_and_vars):
      # Always set gradients to 1.
      return [(tf.ones_like(g), v) for g, v in grads_and_vars]

    x, y = np.ones((10, 1)), np.ones((10, 1))

    with distribution.scope():
      layer = MyLayer()
      model = keras.Sequential([layer])
      optimizer = gradient_descent_keras.SGD(
          1., gradient_transformers=[custom_transform])
    model.compile(optimizer, 'mae')

    model.fit(x, y, batch_size=10, epochs=1)
    self.assertAllClose(self.evaluate(layer.v1), 0.)
    self.assertAllClose(self.evaluate(layer.v2), -2.)

  @tf.__internal__.distribute.combinations.generate(
      tf.__internal__.test.combinations.times(
          all_strategy_combinations_minus_default()))
  def test_distribution_strategy_one_dimensional(self, distribution):
    with distribution.scope():
      inp = keras.layers.Input(shape=(10,))
      out = keras.layers.Dense(3, activation='softmax')(inp)
      model = keras.Model(inputs=[inp], outputs=[out])
      model.compile(
          optimizer='rmsprop',
          loss='sparse_categorical_crossentropy',
          metrics=['sparse_categorical_accuracy'])

      x = np.random.random((64, 10)).astype('float32')
      y = np.random.randint(3, size=64)

      model.fit(x, y, epochs=1, steps_per_epoch=2)

  @tf.__internal__.distribute.combinations.generate(
      tf.__internal__.test.combinations.combine(
          distribution=[
              tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu,
              tf.__internal__.distribute.combinations.mirrored_strategy_with_two_gpus
          ],
          mode=['graph', 'eager'],
          reduction=[
              losses_utils.ReductionV2.AUTO,
              losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE,
              losses_utils.ReductionV2.SUM
          ]))
  def test_distribution_strategy_with_loss_reduction_types(
      self, distribution, reduction):
    np.random.seed(_RANDOM_SEED)

    def _get_model():
      inputs = keras.Input((10,))
      x1 = keras.layers.Dense(10, kernel_initializer='zeros')(inputs)
      x2 = keras.layers.Dense(10, kernel_initializer='zeros')(x1)
      outputs = keras.layers.Dense(1, kernel_initializer='zeros')(x2)
      model = keras.Model(inputs, outputs)
      return model

    x = np.random.random((64, 10))
    y = np.random.random((64, 1))
    dataset = tf.data.Dataset.from_tensor_slices((x, y))
    dataset = dataset.batch(32)

    model = _get_model()
    model.compile(
        'sgd', loss=keras.losses.MeanSquaredError(reduction=reduction))
    history = model.fit(dataset, steps_per_epoch=2, epochs=1, shuffle=False)

    with distribution.scope():
      ds_model = _get_model()
      ds_model.compile(
          'sgd',
          loss=keras.losses.MeanSquaredError(reduction=reduction))
      ds_history = ds_model.fit(
          dataset, steps_per_epoch=2, epochs=1, shuffle=False)
    self.assertArrayNear(history.history['loss'], ds_history.history['loss'],
                         1e-5)

  @tf.__internal__.distribute.combinations.generate(
      tf.__internal__.test.combinations.times(
          all_strategy_combinations_minus_default()))
  def test_distribution_strategy_with_symbolic_add_loss(
      self, mode, distribution):

    def _make_model_with_add_loss():
      inputs = keras.Input((10,))
      x1 = keras.layers.Dense(10, kernel_initializer='zeros')(inputs)
      x2 = keras.layers.Dense(10, kernel_initializer='zeros')(x1)
      outputs = keras.layers.Dense(1, kernel_initializer='zeros')(x2)
      model = keras.Model(inputs, outputs)
      model.add_loss(tf.reduce_mean(x1))
      model.add_loss(tf.reduce_mean(outputs))
      return model

    x = np.ones((64, 10)).astype('float32')

    model = _make_model_with_add_loss()
    model.compile('sgd')
    history = model.fit(x, epochs=1)

    with distribution.scope():
      ds_model = _make_model_with_add_loss()
      ds_model.compile(
          'sgd')
      ds_history = ds_model.fit(x, epochs=1)

    self.assertAllClose(history.history, ds_history.history)

  # TODO(omalleyt): Investigate flakiness and re-enable.
  @tf.__internal__.distribute.combinations.generate(all_strategy_minus_default_and_tpu_combinations())
  def DISABLED_test_distribution_strategy_with_callable_add_loss(
      self, distribution):

    def _make_model():
      inputs = keras.Input((10,))
      x1 = keras.layers.Dense(10, kernel_initializer='zeros')(inputs)
      x2 = keras.layers.Dense(10, kernel_initializer='zeros')(x1)
      d = keras.layers.Dense(1, kernel_initializer='zeros')
      outputs = d(x2)
      model = keras.Model(inputs, outputs)
      model.add_loss(lambda: 100. * tf.reduce_mean(d.kernel))
      return model

    x = np.ones((64, 10)).astype('float32')
    y = np.ones((64, 1)).astype('float32')

    model = _make_model()
    self.assertLen(model.losses, 1)

    model.compile('sgd', 'mse')
    history = model.fit(x, y, steps_per_epoch=2, epochs=1)

    with distribution.scope():
      ds_model = _make_model()
      self.assertLen(ds_model.losses, 1)
      ds_model.compile('sgd', 'mse')
      ds_history = ds_model.fit(x, y, steps_per_epoch=2, epochs=1)

    self.assertAllClose(history.history, ds_history.history)

  @tf.__internal__.distribute.combinations.generate(
      tf.__internal__.test.combinations.times(
          all_strategy_minus_default_and_tpu_combinations()))
  def test_distribution_strategy_with_add_metric_in_call(
      self, distribution):

    class Bias(keras.layers.Layer):

      def build(self, input_shape):
        self.bias = self.add_weight(name='bias', initializer='zeros', shape=())

      def call(self, inputs):
        self.add_metric(
            tf.reduce_mean(inputs), name='bias', aggregation='mean')
        return inputs + self.bias

    def _make_model_with_add_metric():
      inputs = keras.Input((10,))
      x1 = keras.layers.Dense(10, kernel_initializer='zeros')(inputs)
      x2 = Bias()(x1)
      outputs = keras.layers.Dense(1, kernel_initializer='zeros')(x2)
      model = keras.Model(inputs, outputs)
      return model

    x = np.ones((64, 10)).astype('float32')
    y = np.ones((64, 1)).astype('float32')

    model = _make_model_with_add_metric()
    self.assertLen(model.metrics, 1)

    model.compile('sgd', 'mse')
    history = model.fit(
        x, y, validation_data=(x, y), validation_steps=2, epochs=2)

    with distribution.scope():
      ds_model = _make_model_with_add_metric()
      self.assertLen(ds_model.metrics, 1)
      ds_model.compile(
          'sgd',
          'mse')
      ds_history = ds_model.fit(
          x, y, validation_data=(x, y), validation_steps=2, epochs=2)
      # includes stateful loss metric in eager.
      metrics_len = 2 if tf.executing_eagerly() else 1
      self.assertLen(ds_model.metrics, metrics_len)

    self.assertAllClose(history.history, ds_history.history)

  @tf.__internal__.distribute.combinations.generate(
      tf.__internal__.test.combinations.combine(
          distribution=[
              tf.__internal__.distribute.combinations.one_device_strategy,
              tf.__internal__.distribute.combinations.one_device_strategy_gpu,
              tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu,
              tf.__internal__.distribute.combinations.mirrored_strategy_with_two_gpus
          ],
          mode=['eager']))
  def test_distribution_strategy_with_add_metric_object(
      self, distribution):

    class Bias(keras.layers.Layer):

      def build(self, input_shape):
        self.bias = self.add_weight(name='bias', initializer='zeros', shape=())
        self.mean = keras.metrics.Mean(name='mean')

      def call(self, inputs):
        self.add_metric(self.mean(inputs))
        return inputs + self.bias

    def _make_model_with_add_metric_object():
      inputs = keras.Input((10,))
      x1 = keras.layers.Dense(10, kernel_initializer='zeros')(inputs)
      x2 = Bias()(x1)
      outputs = keras.layers.Dense(1, kernel_initializer='zeros')(x2)
      model = keras.Model(inputs, outputs)
      return model

    x = np.ones((64, 10)).astype('float32')
    y = np.ones((64, 1)).astype('float32')

    model = _make_model_with_add_metric_object()
    self.assertLen(model.metrics, 1)

    model.compile('sgd', 'mse')
    history = model.fit(
        x, y, validation_data=(x, y), validation_steps=2, epochs=2)

    with distribution.scope():
      ds_model = _make_model_with_add_metric_object()
      self.assertLen(ds_model.metrics, 1)
      ds_model.compile(
          'sgd',
          'mse')
      ds_history = ds_model.fit(
          x, y, validation_data=(x, y), validation_steps=2, epochs=2)
      # includes stateful loss metric in eager.
      metrics_len = 2 if tf.executing_eagerly() else 1
      self.assertLen(ds_model.metrics, metrics_len)

    self.assertAllClose(history.history, ds_history.history)

  @tf.__internal__.distribute.combinations.generate(
      # TODO(phillypham): Why does validation_steps > 1 not work on TPUs?
      tf.__internal__.test.combinations.times(
          all_strategy_minus_default_and_tpu_combinations()))
  def test_distribution_strategy_with_add_metric_outside_call(
      self, distribution):

    def _make_model_with_add_metric():
      inputs = keras.Input((10,))
      x1 = keras.layers.Dense(10, kernel_initializer='zeros')(inputs)
      outputs = keras.layers.Dense(1, kernel_initializer='zeros')(x1)
      model = keras.Model(inputs, outputs)
      model.add_metric(
          tf.reduce_mean(x1), name='mid_mean', aggregation='mean')
      return model

    x = np.ones((64, 10)).astype('float32')
    y = np.ones((64, 1)).astype('float32')

    model = _make_model_with_add_metric()
    self.assertLen(model.metrics, 1)

    model.compile('sgd', 'mse')
    history = model.fit(
        x, y, validation_data=(x, y), validation_steps=2, epochs=2)

    with distribution.scope():
      ds_model = _make_model_with_add_metric()
      self.assertLen(ds_model.metrics, 1)
      ds_model.compile(
          'sgd',
          'mse')
      ds_history = ds_model.fit(
          x, y, validation_data=(x, y), validation_steps=2, epochs=2)
      # includes stateful loss metric in eager.
      metrics_len = 2 if tf.executing_eagerly() else 1
      self.assertLen(ds_model.metrics, metrics_len)

    self.assertAllClose(history.history, ds_history.history)

  @tf.__internal__.distribute.combinations.generate(
      tf.__internal__.test.combinations.combine(
          distribution=strategies_minus_tpu + multi_worker_mirrored_strategies,
          mode=['eager']))
  def test_sparse_tensor_outputs(self, distribution):

    class ToSparse(keras.layers.Layer):
      """Create a sparse tensor based on a given dense tensor."""

      def call(self, inputs):
        indices = tf.where(tf.not_equal(inputs, 0))
        values = tf.gather_nd(inputs, indices)
        shape = tf.shape(inputs, out_type='int64')
        return tf.SparseTensor(indices, values, dense_shape=shape)

    model = keras.Sequential([ToSparse()])

    # Define some input data with additional padding.
    input_data = np.array([[1, 0, 0], [2, 3, 0]])
    output = model.predict(input_data, batch_size=2)

    expected_indices = np.array([[0, 0], [1, 0], [1, 1]])
    expected_values = np.array([1, 2, 3])
    expected_dense_shape = np.array([2, 3])

    self.assertAllEqual(output.indices, expected_indices)
    self.assertAllEqual(output.values, expected_values)
    self.assertAllEqual(output.dense_shape, expected_dense_shape)

  @tf.__internal__.distribute.combinations.generate(
      tf.__internal__.test.combinations.combine(
          distribution=strategies_minus_tpu + multi_worker_mirrored_strategies,
          mode=['eager']))
  def test_ragged_tensor_outputs(self, distribution):

    class ToRagged(keras.layers.Layer):
      """Create a ragged tensor based on a given dense tensor."""

      def __init__(self, padding, ragged_rank=1, **kwargs):
        super(ToRagged, self).__init__(**kwargs)
        self._padding = padding
        self._ragged_rank = ragged_rank

      def call(self, inputs):
        return tf.RaggedTensor.from_tensor(
            inputs, padding=self._padding, ragged_rank=self._ragged_rank)

    model = keras.Sequential([ToRagged(padding=0)])

    # Define some input data with additional padding.
    input_data = np.array([[1, 0, 0], [2, 3, 0]])
    output = model.predict(input_data, batch_size=2)

    expected_values = [[1], [2, 3]]
    self.assertAllEqual(expected_values, output)

  @tf.__internal__.distribute.combinations.generate(
      tf.__internal__.test.combinations.combine(
          distribution=strategies_minus_default_minus_tpu + tpu_strategies +
          multi_worker_mirrored_strategies,
          mode=['eager']))
  def test_correctness_of_add_loss_with_merge_call(self, distribution):
    batch_size = 32

    def _get_model():
      inputs = keras.layers.Input(shape=(1,))
      labels = keras.layers.Input(shape=(1,))
      x = keras.layers.Dense(10, activation='relu')(inputs)
      y = keras.layers.Dense(1)(x)
      model = keras.models.Model([inputs, labels], y)
      model.add_loss(keras.losses.mean_squared_error(labels, y))
      return model

    def _get_data():
      x_train = np.random.rand(64, 1)
      y_train = 3 * x_train
      x_train = x_train.astype('float32')
      y_train = y_train.astype('float32')
      dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
      dataset = dataset.batch(batch_size)
      return dataset

    with distribution.scope():
      model = _get_model()
      optimizer = gradient_descent_keras.SGD(0.2)

      @tf.function
      def train_step(dist_inputs):

        def step_fn(inputs):
          with tf.GradientTape() as tape:
            logits = model(inputs)

            # Invoke a merge_call()
            tf.distribute.get_replica_context().merge_call(
                lambda d: None)

            # Verify that there is only one loss on the model.
            assert len(model.losses) == 1
            loss_from_model = tf.reduce_sum(
                model.losses) * 1.0 / batch_size

            # Compute loss in this loop.
            loss = keras.losses.mean_squared_error(inputs[1], logits)
            loss = tf.nn.compute_average_loss(loss, global_batch_size=batch_size)

            # Verify that the loss computed in this loop is equivalent to the
            # loss from the model that was added via add_loss.
            tf.compat.v1.assert_equal(loss, loss_from_model)

          grads = tape.gradient(loss, model.trainable_variables)
          optimizer.apply_gradients(zip(grads, model.trainable_variables))
          return loss

        per_replica_losses = distribution.run(step_fn, args=(dist_inputs,))
        return distribution.reduce(
            tf.distribute.ReduceOp.SUM, per_replica_losses, axis=None)

      dataset = distribution.experimental_distribute_dataset(_get_data())
      for _ in range(2):
        for x in dataset:
          train_step(x)

  @tf.__internal__.distribute.combinations.generate(tf.__internal__.test.combinations.combine(mode=['graph', 'eager']))
  def test_unimplemented_parameter_server_strategy(self):
    cluster_spec = multi_worker_testing_utils.create_in_process_cluster(
        num_workers=3, num_ps=2)
    cluster_resolver = SimpleClusterResolver(
        cluster_spec=tf.train.ClusterSpec(cluster_spec),
        task_type='worker',
        task_id=1,
        num_accelerators={'GPU': 0})
    distribution = tf.compat.v1.distribute.experimental.ParameterServerStrategy(
        cluster_resolver)

    self.assertIsInstance(distribution,
                          tf.compat.v1.distribute.experimental.ParameterServerStrategy)

    with self.assertRaisesRegex(NotImplementedError,
                                'ParameterServerStrategy*'):
      with distribution.scope():
        model = simple_sequential_model()
        optimizer = tf.compat.v1.train.RMSPropOptimizer(learning_rate=0.001)
        loss = 'mse'
        model.compile(optimizer, loss)

Ancestors

  • tensorflow.python.framework.test_util.TensorFlowTestCase
  • absl.testing.parameterized.TestCase
  • absl.testing.absltest.TestCase
  • absl.third_party.unittest3_backport.case.TestCase
  • unittest.case.TestCase

Class variables

var DISABLED_test_distribution_strategy_with_callable_add_loss

Methods

def test_correctness_of_add_loss_with_merge_call_test_distribution_CentralStorageCPUAndGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_correctness_of_add_loss_with_merge_call_test_distribution_Mirrored2GPUs_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_correctness_of_add_loss_with_merge_call_test_distribution_MirroredCPUAndGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_correctness_of_add_loss_with_merge_call_test_distribution_MultiWorkerMirrored2x1CPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_correctness_of_add_loss_with_merge_call_test_distribution_MultiWorkerMirrored2x1GPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_correctness_of_add_loss_with_merge_call_test_distribution_MultiWorkerMirrored2x2GPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_correctness_of_add_loss_with_merge_call_test_distribution_OneDeviceCPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_correctness_of_add_loss_with_merge_call_test_distribution_OneDeviceGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_correctness_of_add_loss_with_merge_call_test_distribution_TPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_custom_gradient_transformation_test_distribution_CentralStorageCPUAndGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_custom_gradient_transformation_test_distribution_Default_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_custom_gradient_transformation_test_distribution_Mirrored2GPUs_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_custom_gradient_transformation_test_distribution_MirroredCPUAndGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_custom_gradient_transformation_test_distribution_OneDeviceCPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_custom_gradient_transformation_test_distribution_OneDeviceGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_custom_gradient_transformation_test_distribution_TPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distributed_dataset_test_distribution_CentralStorageCPUAndGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distributed_dataset_test_distribution_Default_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distributed_dataset_test_distribution_Mirrored2GPUs_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distributed_dataset_test_distribution_MirroredCPUAndGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distributed_dataset_test_distribution_OneDeviceCPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distributed_dataset_test_distribution_OneDeviceGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distributed_dataset_test_distribution_TPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distributed_datasets_from_function_test_distribution_CentralStorageCPUAndGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distributed_datasets_from_function_test_distribution_Default_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distributed_datasets_from_function_test_distribution_Mirrored2GPUs_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distributed_datasets_from_function_test_distribution_MirroredCPUAndGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distributed_datasets_from_function_test_distribution_OneDeviceCPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distributed_datasets_from_function_test_distribution_OneDeviceGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distributed_datasets_from_function_test_distribution_TPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_on_functional_model_test_distribution_CentralStorageCPUAndGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_on_functional_model_test_distribution_CentralStorageCPUAndGPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_on_functional_model_test_distribution_Default_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_on_functional_model_test_distribution_Default_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_on_functional_model_test_distribution_Mirrored2GPUs_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_on_functional_model_test_distribution_Mirrored2GPUs_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_on_functional_model_test_distribution_MirroredCPUAndGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_on_functional_model_test_distribution_MirroredCPUAndGPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_on_functional_model_test_distribution_MultiWorkerMirrored2x1CPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_on_functional_model_test_distribution_MultiWorkerMirrored2x1GPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_on_functional_model_test_distribution_MultiWorkerMirrored2x2GPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_on_functional_model_test_distribution_OneDeviceCPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_on_functional_model_test_distribution_OneDeviceCPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_on_functional_model_test_distribution_OneDeviceGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_on_functional_model_test_distribution_OneDeviceGPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_on_functional_model_test_distribution_TPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_on_functional_model_test_distribution_TPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_on_sequential_model_test_distribution_CentralStorageCPUAndGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_on_sequential_model_test_distribution_CentralStorageCPUAndGPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_on_sequential_model_test_distribution_Default_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_on_sequential_model_test_distribution_Default_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_on_sequential_model_test_distribution_Mirrored2GPUs_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_on_sequential_model_test_distribution_Mirrored2GPUs_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_on_sequential_model_test_distribution_MirroredCPUAndGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_on_sequential_model_test_distribution_MirroredCPUAndGPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_on_sequential_model_test_distribution_MultiWorkerMirrored2x1CPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_on_sequential_model_test_distribution_MultiWorkerMirrored2x1GPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_on_sequential_model_test_distribution_MultiWorkerMirrored2x2GPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_on_sequential_model_test_distribution_OneDeviceCPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_on_sequential_model_test_distribution_OneDeviceCPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_on_sequential_model_test_distribution_OneDeviceGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_on_sequential_model_test_distribution_OneDeviceGPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_on_sequential_model_test_distribution_TPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_on_sequential_model_test_distribution_TPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_one_dimensional_test_distribution_Mirrored2GPUs_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_one_dimensional_test_distribution_Mirrored2GPUs_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_one_dimensional_test_distribution_MirroredCPUAndGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_one_dimensional_test_distribution_MirroredCPUAndGPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_one_dimensional_test_distribution_MultiWorkerMirrored2x1CPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_one_dimensional_test_distribution_MultiWorkerMirrored2x1GPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_one_dimensional_test_distribution_MultiWorkerMirrored2x2GPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_one_dimensional_test_distribution_OneDeviceCPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_one_dimensional_test_distribution_OneDeviceCPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_one_dimensional_test_distribution_OneDeviceGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_one_dimensional_test_distribution_OneDeviceGPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_one_dimensional_test_distribution_TPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_one_dimensional_test_distribution_TPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_with_add_metric_in_call_test_distribution_Mirrored2GPUs_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_with_add_metric_in_call_test_distribution_Mirrored2GPUs_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_with_add_metric_in_call_test_distribution_MirroredCPUAndGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_with_add_metric_in_call_test_distribution_MirroredCPUAndGPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_with_add_metric_in_call_test_distribution_OneDeviceCPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_with_add_metric_in_call_test_distribution_OneDeviceCPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_with_add_metric_in_call_test_distribution_OneDeviceGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_with_add_metric_in_call_test_distribution_OneDeviceGPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_with_add_metric_object_test_distribution_Mirrored2GPUs_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_with_add_metric_object_test_distribution_MirroredCPUAndGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_with_add_metric_object_test_distribution_OneDeviceCPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_with_add_metric_object_test_distribution_OneDeviceGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_with_add_metric_outside_call_test_distribution_Mirrored2GPUs_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_with_add_metric_outside_call_test_distribution_Mirrored2GPUs_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_with_add_metric_outside_call_test_distribution_MirroredCPUAndGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_with_add_metric_outside_call_test_distribution_MirroredCPUAndGPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_with_add_metric_outside_call_test_distribution_OneDeviceCPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_with_add_metric_outside_call_test_distribution_OneDeviceCPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_with_add_metric_outside_call_test_distribution_OneDeviceGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_with_add_metric_outside_call_test_distribution_OneDeviceGPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_with_loss_reduction_types_test_distribution_Mirrored2GPUs_mode_eager_reduction_auto(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_with_loss_reduction_types_test_distribution_Mirrored2GPUs_mode_eager_reduction_sum(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_with_loss_reduction_types_test_distribution_Mirrored2GPUs_mode_eager_reduction_sumoverbatchsize(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_with_loss_reduction_types_test_distribution_Mirrored2GPUs_mode_graph_reduction_auto(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_with_loss_reduction_types_test_distribution_Mirrored2GPUs_mode_graph_reduction_sum(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_with_loss_reduction_types_test_distribution_Mirrored2GPUs_mode_graph_reduction_sumoverbatchsize(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_with_loss_reduction_types_test_distribution_MirroredCPUAndGPU_mode_eager_reduction_auto(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_with_loss_reduction_types_test_distribution_MirroredCPUAndGPU_mode_eager_reduction_sum(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_with_loss_reduction_types_test_distribution_MirroredCPUAndGPU_mode_eager_reduction_sumoverbatchsize(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_with_loss_reduction_types_test_distribution_MirroredCPUAndGPU_mode_graph_reduction_auto(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_with_loss_reduction_types_test_distribution_MirroredCPUAndGPU_mode_graph_reduction_sum(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_with_loss_reduction_types_test_distribution_MirroredCPUAndGPU_mode_graph_reduction_sumoverbatchsize(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_with_symbolic_add_loss_test_distribution_Mirrored2GPUs_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_with_symbolic_add_loss_test_distribution_Mirrored2GPUs_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_with_symbolic_add_loss_test_distribution_MirroredCPUAndGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_with_symbolic_add_loss_test_distribution_MirroredCPUAndGPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_with_symbolic_add_loss_test_distribution_MultiWorkerMirrored2x1CPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_with_symbolic_add_loss_test_distribution_MultiWorkerMirrored2x1GPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_with_symbolic_add_loss_test_distribution_MultiWorkerMirrored2x2GPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_with_symbolic_add_loss_test_distribution_OneDeviceCPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_with_symbolic_add_loss_test_distribution_OneDeviceCPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_with_symbolic_add_loss_test_distribution_OneDeviceGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_with_symbolic_add_loss_test_distribution_OneDeviceGPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_with_symbolic_add_loss_test_distribution_TPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_distribution_strategy_with_symbolic_add_loss_test_distribution_TPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_gradient_clipping_test_distribution_CentralStorageCPUAndGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_gradient_clipping_test_distribution_Default_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_gradient_clipping_test_distribution_Mirrored2GPUs_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_gradient_clipping_test_distribution_MirroredCPUAndGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_gradient_clipping_test_distribution_OneDeviceCPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_gradient_clipping_test_distribution_OneDeviceGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_gradient_clipping_test_distribution_TPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_host_training_loop_dataset_unknown_size_test_distribution_CentralStorageCPUAndGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_host_training_loop_dataset_unknown_size_test_distribution_Default_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_host_training_loop_dataset_unknown_size_test_distribution_Mirrored2GPUs_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_host_training_loop_dataset_unknown_size_test_distribution_MirroredCPUAndGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_host_training_loop_dataset_unknown_size_test_distribution_OneDeviceCPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_host_training_loop_dataset_unknown_size_test_distribution_OneDeviceGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_host_training_loop_dataset_unknown_size_test_distribution_TPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_host_training_loop_last_partial_execution_test_distribution_CentralStorageCPUAndGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_host_training_loop_last_partial_execution_test_distribution_Default_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_host_training_loop_last_partial_execution_test_distribution_Mirrored2GPUs_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_host_training_loop_last_partial_execution_test_distribution_MirroredCPUAndGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_host_training_loop_last_partial_execution_test_distribution_OneDeviceCPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_host_training_loop_last_partial_execution_test_distribution_OneDeviceGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_host_training_loop_last_partial_execution_test_distribution_TPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_host_training_loop_test_distribution_CentralStorageCPUAndGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_host_training_loop_test_distribution_Default_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_host_training_loop_test_distribution_Mirrored2GPUs_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_host_training_loop_test_distribution_MirroredCPUAndGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_host_training_loop_test_distribution_OneDeviceCPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_host_training_loop_test_distribution_OneDeviceGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_host_training_loop_test_distribution_TPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_host_training_loop_truncate_to_epoch_test_distribution_CentralStorageCPUAndGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_host_training_loop_truncate_to_epoch_test_distribution_Default_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_host_training_loop_truncate_to_epoch_test_distribution_Mirrored2GPUs_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_host_training_loop_truncate_to_epoch_test_distribution_MirroredCPUAndGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_host_training_loop_truncate_to_epoch_test_distribution_OneDeviceCPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_host_training_loop_truncate_to_epoch_test_distribution_OneDeviceGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_host_training_loop_truncate_to_epoch_test_distribution_TPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_ragged_tensor_outputs_test_distribution_CentralStorageCPUAndGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_ragged_tensor_outputs_test_distribution_Default_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_ragged_tensor_outputs_test_distribution_Mirrored2GPUs_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_ragged_tensor_outputs_test_distribution_MirroredCPUAndGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_ragged_tensor_outputs_test_distribution_MultiWorkerMirrored2x1CPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_ragged_tensor_outputs_test_distribution_MultiWorkerMirrored2x1GPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_ragged_tensor_outputs_test_distribution_MultiWorkerMirrored2x2GPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_ragged_tensor_outputs_test_distribution_OneDeviceCPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_ragged_tensor_outputs_test_distribution_OneDeviceGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_sparse_tensor_outputs_test_distribution_CentralStorageCPUAndGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_sparse_tensor_outputs_test_distribution_Default_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_sparse_tensor_outputs_test_distribution_Mirrored2GPUs_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_sparse_tensor_outputs_test_distribution_MirroredCPUAndGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_sparse_tensor_outputs_test_distribution_MultiWorkerMirrored2x1CPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_sparse_tensor_outputs_test_distribution_MultiWorkerMirrored2x1GPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_sparse_tensor_outputs_test_distribution_MultiWorkerMirrored2x2GPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_sparse_tensor_outputs_test_distribution_OneDeviceCPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_sparse_tensor_outputs_test_distribution_OneDeviceGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_unimplemented_parameter_server_strategy_test_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_unimplemented_parameter_server_strategy_test_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
class TestDistributionStrategyWithMultipleAddLossAndMetricCalls (methodName='runTest')

Tests complex models with multiple add loss and metric calls.

Create an instance of the class that will use the named test method when executed. Raises a ValueError if the instance does not have a method with the specified name.

Expand source code
class TestDistributionStrategyWithMultipleAddLossAndMetricCalls(
    tf.test.TestCase, parameterized.TestCase):
  """Tests complex models with multiple add loss and metric calls."""

  @tf.__internal__.distribute.combinations.generate(
      tf.__internal__.test.combinations.times(
          all_strategy_combinations_minus_default(),
          tf.__internal__.test.combinations.combine(
              model_fn=[
                  _functional_with_add_loss_and_metric,
                  _sequential_with_add_loss_and_metric,
                  _functional_with_layer_reuse,
              ],
              l1=[0.01],
              l2=[0.1])))
  def test_fit_and_evaluate(self, distribution, model_fn, l1, l2):
    # Make fake MNIST-like image data.
    np.random.seed(_RANDOM_SEED)
    dataset = tf.data.Dataset.from_tensor_slices(
        (np.random.uniform(size=(64, 28, 28, 1)).astype(np.float32),
         np.random.randint(0, 10, size=(64,))))
    dataset = dataset.shuffle(64).batch(
        8 * distribution.num_replicas_in_sync, drop_remainder=True)
    # Make model with distribution strategy and initialize with dataset shape.
    input_shape = tf.data.experimental.get_structure(dataset)[0].shape[1:]
    with distribution.scope():
      model = model_fn(input_shape, 10, l1, l2)
      model.compile(
          optimizer=keras.optimizers.adam_v2.Adam(1e-4),
          loss=keras.losses.SparseCategoricalCrossentropy(
              from_logits=True,
              reduction=losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE),
          metrics=[
              keras.metrics.SparseCategoricalAccuracy(),
              keras.metrics.SparseCategoricalCrossentropy(from_logits=True),
          ])
    # Non-eager training doesn't support steps_per_epoch=None.
    for unused_epoch in range(2):
      model.fit(dataset)
    results = dict(zip(model.metrics_names, model.evaluate(dataset)))
    # Sanity checks.
    self.assertBetween(results['sparse_categorical_accuracy'], 0.02, 1.)
    self.assertGreater(results['l2_loss'], 0.)
    self.assertGreater(results['l1_loss'], 0.)
    # Assert correctness of the loss calculation and updating of metrics.
    self.assertNear(
        results['l1_loss'] * l1 + results['l2_loss'] * l2 +
        results['sparse_categorical_crossentropy'], results['loss'], 1e-6)

Ancestors

  • tensorflow.python.framework.test_util.TensorFlowTestCase
  • absl.testing.parameterized.TestCase
  • absl.testing.absltest.TestCase
  • absl.third_party.unittest3_backport.case.TestCase
  • unittest.case.TestCase

Methods

def test_fit_and_evaluate_test_distribution_Mirrored2GPUs_mode_eager_l1_001_l2_01_modelfn_functionfunctionalwithaddlossandmetricat4(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_and_evaluate_test_distribution_Mirrored2GPUs_mode_eager_l1_001_l2_01_modelfn_functionfunctionalwithlayerreuseat4(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_and_evaluate_test_distribution_Mirrored2GPUs_mode_eager_l1_001_l2_01_modelfn_functionsequentialwithaddlossandmetricat4(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_and_evaluate_test_distribution_Mirrored2GPUs_mode_graph_l1_001_l2_01_modelfn_functionfunctionalwithaddlossandmetricat4(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_and_evaluate_test_distribution_Mirrored2GPUs_mode_graph_l1_001_l2_01_modelfn_functionfunctionalwithlayerreuseat4(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_and_evaluate_test_distribution_Mirrored2GPUs_mode_graph_l1_001_l2_01_modelfn_functionsequentialwithaddlossandmetricat4(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_and_evaluate_test_distribution_MirroredCPUAndGPU_mode_eager_l1_001_l2_01_modelfn_functionfunctionalwithaddlossandmetricat4(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_and_evaluate_test_distribution_MirroredCPUAndGPU_mode_eager_l1_001_l2_01_modelfn_functionfunctionalwithlayerreuseat4(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_and_evaluate_test_distribution_MirroredCPUAndGPU_mode_eager_l1_001_l2_01_modelfn_functionsequentialwithaddlossandmetricat4(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_and_evaluate_test_distribution_MirroredCPUAndGPU_mode_graph_l1_001_l2_01_modelfn_functionfunctionalwithaddlossandmetricat4(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_and_evaluate_test_distribution_MirroredCPUAndGPU_mode_graph_l1_001_l2_01_modelfn_functionfunctionalwithlayerreuseat4(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_and_evaluate_test_distribution_MirroredCPUAndGPU_mode_graph_l1_001_l2_01_modelfn_functionsequentialwithaddlossandmetricat4(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_and_evaluate_test_distribution_MultiWorkerMirrored2x1CPU_mode_eager_l1_001_l2_01_modelfn_functionfunctionalwithaddlossandmetricat4(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_and_evaluate_test_distribution_MultiWorkerMirrored2x1CPU_mode_eager_l1_001_l2_01_modelfn_functionfunctionalwithlayerreuseat4(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_and_evaluate_test_distribution_MultiWorkerMirrored2x1CPU_mode_eager_l1_001_l2_01_modelfn_functionsequentialwithaddlossandmetricat4(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_and_evaluate_test_distribution_MultiWorkerMirrored2x1GPU_mode_eager_l1_001_l2_01_modelfn_functionfunctionalwithaddlossandmetricat4(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_and_evaluate_test_distribution_MultiWorkerMirrored2x1GPU_mode_eager_l1_001_l2_01_modelfn_functionfunctionalwithlayerreuseat4(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_and_evaluate_test_distribution_MultiWorkerMirrored2x1GPU_mode_eager_l1_001_l2_01_modelfn_functionsequentialwithaddlossandmetricat4(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_and_evaluate_test_distribution_MultiWorkerMirrored2x2GPU_mode_eager_l1_001_l2_01_modelfn_functionfunctionalwithaddlossandmetricat4(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_and_evaluate_test_distribution_MultiWorkerMirrored2x2GPU_mode_eager_l1_001_l2_01_modelfn_functionfunctionalwithlayerreuseat4(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_and_evaluate_test_distribution_MultiWorkerMirrored2x2GPU_mode_eager_l1_001_l2_01_modelfn_functionsequentialwithaddlossandmetricat4(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_and_evaluate_test_distribution_OneDeviceCPU_mode_eager_l1_001_l2_01_modelfn_functionfunctionalwithaddlossandmetricat4(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_and_evaluate_test_distribution_OneDeviceCPU_mode_eager_l1_001_l2_01_modelfn_functionfunctionalwithlayerreuseat4(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_and_evaluate_test_distribution_OneDeviceCPU_mode_eager_l1_001_l2_01_modelfn_functionsequentialwithaddlossandmetricat4(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_and_evaluate_test_distribution_OneDeviceCPU_mode_graph_l1_001_l2_01_modelfn_functionfunctionalwithaddlossandmetricat4(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_and_evaluate_test_distribution_OneDeviceCPU_mode_graph_l1_001_l2_01_modelfn_functionfunctionalwithlayerreuseat4(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_and_evaluate_test_distribution_OneDeviceCPU_mode_graph_l1_001_l2_01_modelfn_functionsequentialwithaddlossandmetricat4(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_and_evaluate_test_distribution_OneDeviceGPU_mode_eager_l1_001_l2_01_modelfn_functionfunctionalwithaddlossandmetricat4(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_and_evaluate_test_distribution_OneDeviceGPU_mode_eager_l1_001_l2_01_modelfn_functionfunctionalwithlayerreuseat4(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_and_evaluate_test_distribution_OneDeviceGPU_mode_eager_l1_001_l2_01_modelfn_functionsequentialwithaddlossandmetricat4(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_and_evaluate_test_distribution_OneDeviceGPU_mode_graph_l1_001_l2_01_modelfn_functionfunctionalwithaddlossandmetricat4(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_and_evaluate_test_distribution_OneDeviceGPU_mode_graph_l1_001_l2_01_modelfn_functionfunctionalwithlayerreuseat4(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_and_evaluate_test_distribution_OneDeviceGPU_mode_graph_l1_001_l2_01_modelfn_functionsequentialwithaddlossandmetricat4(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_and_evaluate_test_distribution_TPU_mode_eager_l1_001_l2_01_modelfn_functionfunctionalwithaddlossandmetricat4(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_and_evaluate_test_distribution_TPU_mode_eager_l1_001_l2_01_modelfn_functionfunctionalwithlayerreuseat4(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_and_evaluate_test_distribution_TPU_mode_eager_l1_001_l2_01_modelfn_functionsequentialwithaddlossandmetricat4(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_and_evaluate_test_distribution_TPU_mode_graph_l1_001_l2_01_modelfn_functionfunctionalwithaddlossandmetricat4(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_and_evaluate_test_distribution_TPU_mode_graph_l1_001_l2_01_modelfn_functionfunctionalwithlayerreuseat4(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_and_evaluate_test_distribution_TPU_mode_graph_l1_001_l2_01_modelfn_functionsequentialwithaddlossandmetricat4(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
class TestDistributionStrategyWithNumpyArrays (methodName='runTest')

Base class for tests that need to test TensorFlow.

Create an instance of the class that will use the named test method when executed. Raises a ValueError if the instance does not have a method with the specified name.

Expand source code
class TestDistributionStrategyWithNumpyArrays(tf.test.TestCase,
                                              parameterized.TestCase):

  @tf.__internal__.distribute.combinations.generate(all_strategy_combinations())
  def test_calculating_input_params_no_steps_no_batch_size(self, distribution):
    # Calculate the per_replica_batch_size scaling factor for strategies
    # that use per_core_batch_size
    replica_scale_factor = 1.0
    if not distributed_training_utils.global_batch_size_supported(distribution):
      replica_scale_factor = distribution.num_replicas_in_sync

    with self.cached_session():
      # Default global batch size 32 for input with 64 samples run in 2 steps
      steps, batch_size = distributed_training_utils_v1.get_input_params(
          distribution, 64, steps=None, batch_size=None)
      self.assertEqual(batch_size, 32 // replica_scale_factor)
      self.assertEqual(steps, 2)

      # Computed global batch size 20 is lower than 32 if we pass less samples.
      steps, batch_size = distributed_training_utils_v1.get_input_params(
          distribution, 20, steps=None, batch_size=None)
      self.assertEqual(batch_size, 20 // replica_scale_factor)
      self.assertEqual(steps, 1)

  @tf.__internal__.distribute.combinations.generate(all_strategy_combinations())
  def test_calculating_input_params_with_steps_no_batch_size(
      self, distribution):
    # Calculate the per_replica_batch_size scaling factor for strategies
    # that use per_core_batch_size
    replica_scale_factor = 1.0
    if not distributed_training_utils.global_batch_size_supported(distribution):
      replica_scale_factor = distribution.num_replicas_in_sync

    with self.cached_session():
      # Computed global batch size is correct for number of specified 1 step
      steps, batch_size = distributed_training_utils_v1.get_input_params(
          distribution, 64, steps=1, batch_size=None)
      self.assertEqual(batch_size, 64 // replica_scale_factor)
      self.assertEqual(steps, 1)

      # Computed global batch size is correct for number of specified 2 steps
      steps, batch_size = distributed_training_utils_v1.get_input_params(
          distribution, 64, steps=2, batch_size=None)
      self.assertEqual(batch_size, 32 // replica_scale_factor)
      self.assertEqual(steps, 2)

      # All samples can not be consumed in specified number of steps
      with self.assertRaisesRegex(ValueError, 'not divisible by steps'):
        distributed_training_utils_v1.get_input_params(
            distribution, 63, steps=2, batch_size=None)

      # This cases is different for different strategies due to the
      # difference in supported batch size being global or per-replica.
      if replica_scale_factor == 1:
        # Computed global batch size is correct even if not sharadable
        steps, batch_size = distributed_training_utils_v1.get_input_params(
            distribution, 63, steps=3, batch_size=None)
        self.assertEqual(batch_size, 21)
        self.assertEqual(steps, 3)
      else:
        # Computed global batch size can not be sharded across replicas
        with self.assertRaisesRegex(
            ValueError, 'could not be sharded evenly '
            'across the sync replicas'):
          distributed_training_utils_v1.get_input_params(
              distribution, 63, steps=1, batch_size=None)

  @tf.__internal__.distribute.combinations.generate(all_strategy_combinations())
  def test_calculating_input_params_no_steps_with_batch_size(
      self, distribution):
    # Calculate the per_replica_batch_size scaling factor for strategies
    # that use per_core_batch_size
    replica_scale_factor = 1.0
    if not distributed_training_utils.global_batch_size_supported(distribution):
      replica_scale_factor = distribution.num_replicas_in_sync

    with self.cached_session():
      # Computed steps is correct for specified batch size
      steps, batch_size = distributed_training_utils_v1.get_input_params(
          distribution, 64, steps=None, batch_size=16)
      self.assertEqual(batch_size, 16)
      self.assertEqual(steps, 4 // replica_scale_factor)

      # Computed steps is correct for specified batch size
      steps, batch_size = distributed_training_utils_v1.get_input_params(
          distribution, 64, steps=None, batch_size=32)
      self.assertEqual(batch_size, 32)
      self.assertEqual(steps, 2 // replica_scale_factor)

  @tf.__internal__.distribute.combinations.generate(all_strategy_combinations())
  def test_calculating_input_params_with_steps_with_batch_size(
      self, distribution):
    with self.cached_session():
      # No change to steps and batch size if both specified and feasible
      steps, batch_size = distributed_training_utils_v1.get_input_params(
          distribution, 64, steps=5, batch_size=3)
      self.assertEqual(batch_size, 3)
      self.assertEqual(steps, 5)

      # Number of samples is less than global batch size * steps
      with self.assertRaisesRegex(ValueError, 'less than samples required'):
        distributed_training_utils_v1.get_input_params(
            distribution, 64, steps=10, batch_size=13)

  @tf.__internal__.distribute.combinations.generate(all_strategy_combinations())
  def test_calling_model_with_numpy_arrays(self, distribution):
    with self.cached_session():
      with distribution.scope():
        optimizer_fn = gradient_descent_keras.SGD
        optimizer = optimizer_fn(0.001)
        model = get_model()
        loss = 'mse'
        metrics = ['mae']
        model.compile(
            optimizer,
            loss,
            metrics=metrics)

        inputs = np.zeros((64, 3), dtype=np.float32)
        targets = np.zeros((64, 4), dtype=np.float32)

        # Call fit with validation data
        model.fit(
            inputs,
            targets,
            epochs=1,
            batch_size=2,
            verbose=0,
            validation_data=(inputs, targets))

        # TODO(anjalisridhar): We need tests for when the batch size and steps
        # are smaller and results in a 0 batch_size and steps value.
        model.evaluate(inputs, targets)
        model.evaluate(inputs, targets, batch_size=8)

        model.predict(inputs)
        model.predict(inputs, batch_size=8)

  @tf.__internal__.distribute.combinations.generate(all_strategy_combinations())
  def test_calling_model_with_mixed_precision(self, distribution):
    if isinstance(distribution,
                  (tf.compat.v1.distribute.experimental.ParameterServerStrategy,
                   tf.distribute.experimental.ParameterServerStrategy,
                   tf.distribute.experimental.CentralStorageStrategy,
                   tf.compat.v1.distribute.experimental.CentralStorageStrategy)):
      self.skipTest('b/152097775')
    if backend.is_tpu_strategy(distribution):
      policy_name = 'mixed_bfloat16'
    else:
      policy_name = 'mixed_float16'
    with self.cached_session(), \
         distribution.scope(), \
         policy.policy_scope(policy_name):
      optimizer_fn = gradient_descent_keras.SGD
      optimizer = optimizer_fn(0.001)
      x = keras.layers.Input(shape=(3,), name='input')
      y = keras.layers.Dense(4, name='dense')(x)
      y = keras.layers.Activation('softmax', dtype='float32')(y)
      model = keras.Model(x, y)
      loss = 'mse'
      metrics = ['mae']
      model.compile(
          optimizer,
          loss,
          metrics=metrics)

      # We need to pass float32 since TPUs do not support float64, even though
      # these arrays will immediately be casted to bfloat16 on TPUs. We also
      # cannot pass bfloat16, as Numpy does not support it.
      inputs = np.zeros((64, 3), dtype='float32')
      targets = np.zeros((64, 4), dtype='float32')

      model.fit(
          inputs,
          targets,
          epochs=1,
          batch_size=2,
          verbose=0,
          validation_data=(inputs, targets))

      model.evaluate(inputs, targets)
      model.evaluate(inputs, targets, batch_size=8)

      model.predict(inputs)
      model.predict(inputs, batch_size=8)

  @tf.__internal__.distribute.combinations.generate(all_strategy_combinations())
  def test_operator_overload_mixed_precision(self, distribution):
    # Regression test that tests a fixed bug does not reoccur. Adding an
    # AutoCastVariable to a tensor on a TPU, where the variable was the LHS of
    # the '+' operator, used to cause the gradient w.r.t. the variable to be
    # None.
    if isinstance(distribution,
                  (tf.compat.v1.distribute.experimental.ParameterServerStrategy,
                   tf.distribute.experimental.ParameterServerStrategy,
                   tf.distribute.experimental.CentralStorageStrategy,
                   tf.compat.v1.distribute.experimental.CentralStorageStrategy)):
      self.skipTest('b/152097775')

    if backend.is_tpu_strategy(distribution):
      policy_name = 'mixed_bfloat16'
    else:
      policy_name = 'mixed_float16'

    class MyLayer(keras.layers.Layer):

      def build(self, _):
        self.v1 = self.add_weight('v', ())
        self.v2 = self.add_weight('v', ())

      def call(self, inp):
        inp += self.v1
        return self.v2 + inp

    with self.cached_session(), distribution.scope():
      layer = MyLayer(dtype=policy_name)
      def run_fn():
        x = np.array([1.])
        with tf.GradientTape() as tape:
          y = layer(x)
        grad_v1, grad_v2 = tape.gradient(y, [layer.v1, layer.v2])
        return grad_v1, grad_v2
      if tf.executing_eagerly():
        run_fn = tf.function(run_fn)

      grad_v1, grad_v2 = distribution.run(run_fn)
      self.assertIsNotNone(grad_v1)
      self.assertIsNotNone(grad_v2)

  @tf.__internal__.distribute.combinations.generate(
      tf.__internal__.test.combinations.combine(
          distribution=[tf.__internal__.distribute.combinations.one_device_strategy],
          mode=['graph', 'eager']))
  def test_optimizer_in_cross_replica_context_raises_error(self, distribution):

    with self.cached_session(), distribution.scope():
      model = keras.models.Sequential([keras.layers.Dense(1)])
      x = np.array([[1.]])
      with tf.GradientTape() as tape:
        y = model(x)
      gradients = tape.gradient(y, model.trainable_variables)
      optimizer = gradient_descent_keras.SGD()

      with self.assertRaisesRegex(RuntimeError,
                                  'cannot be called in cross-replica context'):
        optimizer.apply_gradients(zip(gradients, model.trainable_variables))

  @tf.__internal__.distribute.combinations.generate(all_strategy_combinations())
  def test_calling_model_with_nested_numpy_arrays(self, distribution):
    with self.cached_session():
      with distribution.scope():
        optimizer_fn = gradient_descent_keras.SGD
        optimizer = optimizer_fn(learning_rate=0.001)
        model = multi_input_output_model()
        loss = 'mse'
        model.compile(
            optimizer,
            loss)

      input_a_np = np.asarray(np.random.random((64, 3)), dtype=np.float32)
      input_b_np = np.asarray(np.random.random((64, 5)), dtype=np.float32)
      inputs = [input_a_np, input_b_np]

      output_d_np = np.asarray(np.random.random((64, 7)), dtype=np.float32)
      output_e_np = np.asarray(np.random.random((64, 7)), dtype=np.float32)
      targets = [output_d_np, output_e_np]

      # Call fit with validation data
      model.fit(inputs, targets, epochs=1, batch_size=8, verbose=0)

      # TODO(anjalisridhar): We need tests for when the batch size and steps are
      # smaller and results in a 0 batch_size and steps value.
      model.evaluate(inputs, targets)
      model.evaluate(inputs, targets, batch_size=8)

      model.predict(inputs)
      model.predict(inputs, batch_size=8)

  @tf.__internal__.distribute.combinations.generate(
      tf.__internal__.test.combinations.combine(
          distribution=strategies_minus_tpu, mode=['graph', 'eager']) +
      tf.__internal__.test.combinations.combine(
          distribution=multi_worker_mirrored_strategies, mode=['eager']))
  def test_numpy_with_sample_weights(self, distribution):
    with self.cached_session(), distribution.scope():
      model = get_sample_weights_model()
      optimizer = tf.compat.v1.train.RMSPropOptimizer(learning_rate=0.001)
      loss = 'mse'
      model.compile(
          optimizer,
          loss)

      inputs = np.array([[0], [1], [2], [3]], np.float32)
      targets = np.array([[2], [4], [6], [8]], np.float32)
      sample_weights = np.array([0.25, 0.5, 0.75, 1], np.float32)

      result = model.evaluate(
          inputs,
          targets,
          batch_size=2,
          sample_weight=sample_weights,
          verbose=1)
      # The per sample loss is multipled by the corresponding sample weight. The
      # average of these weighted losses is the return value of the `evaluate`
      # call. For example, in the test above the average weighted loss is
      # calculated in the following manner:
      # batch_1 = (((2-0)^2) * 0.25 + ((4-1)^2) * 0.5) / 2 = 5.5 / 2 = 2.75
      # batch_2 = (((6-2)^2 * 0.75) + ((8-3)^2 * 1)) / 2 = 37 / 2 = 18.5
      # final result = (batch_1 + batch_2) / 2 = 10.625.
      # The first time we divide by number of input samples and the second time
      # we divide by number of steps/batches that the loss is aggregated over.
      self.assertAllClose(result, 10.625)

      # We now test without passing sample_weights:
      # batch_1 = ((2-0)^2) + ((4-1)^2) / 2 = 13 / 2 = 6.5
      # batch_2 = ((6-2)^2) + ((8-3)^2) / 2 = 41 / 2 = 20.5
      # final result = (batch_1 + batch_2) / 2 =  27 / 2 = 13.5
      result = model.evaluate(inputs, targets, batch_size=2, verbose=1)
      self.assertAllClose(result, 13.5)

  @tf.__internal__.distribute.combinations.generate(all_strategy_combinations())
  def test_flatten_predict_outputs(self, distribution):
    with self.cached_session():
      with distribution.scope():
        model = multi_input_output_model()
        optimizer_fn = gradient_descent_keras.SGD
        optimizer = optimizer_fn(learning_rate=0.001)
        loss = 'mse'
        model.compile(
            optimizer,
            loss)

      # We take 6 input samples with each input having a dimension of 3 or 5.
      input_a_np = np.asarray(np.random.random((6, 3)), dtype=np.float32)
      input_b_np = np.asarray(np.random.random((6, 5)), dtype=np.float32)
      inputs = [input_a_np, input_b_np]

      outs = model.predict(inputs)
      # `predict` a list that is equal in length to the number of model outputs.
      # In this test our model has two outputs and each element of `outs`
      # corresponds to all the samples of one of the model outputs.
      self.assertLen(outs, 2)
      # Each of the output samples have a dimension of 7. We should process all
      # the available input samples(6).
      self.assertAllEqual([6, 7], outs[0].shape)
      self.assertAllEqual([6, 7], outs[1].shape)

  @tf.__internal__.distribute.combinations.generate(
      tf.__internal__.test.combinations.times(tpu_strategy_combinations_graph_only(),
                         tf.__internal__.test.combinations.combine(batch_size=[4, 6])))
  def test_evaluate_with_partial_batch(self, distribution, batch_size):
    with self.cached_session():
      optimizer = tf.compat.v1.train.GradientDescentOptimizer(0.001)
      loss = 'mse'
      metrics = ['mae', keras.metrics.CategoricalAccuracy()]

      with distribution.scope():
        model_with_ds_strategy = get_model()
        model_with_ds_strategy.compile(optimizer, loss, metrics=metrics)

      cpu_model = get_model()
      cpu_model.compile(optimizer, loss, metrics=metrics)

      x = np.random.random((10, 3)).astype('float32')
      y = np.random.random((10, 4)).astype('float32')

      # As sample size is 10, we batch by 4 so that the last batch is
      # a partial batch. Also `evaluate()` using numpy array as inputs without
      # distribution strategy uses entire sample as a single batch. As so,
      # we remove parameters `batch_size` and `steps`.
      cpu_model.set_weights(model_with_ds_strategy.get_weights())
      evaluate_ground_truth = cpu_model.evaluate(x, y)

      # We don't compare the loss as loss is currently not computed as metric
      # in Keras, the loss value is inaccurate for last partial batch due to
      # more weights for the last batch samples.
      steps = np.ceil(10.0 / batch_size)
      self.assertAllClose(
          model_with_ds_strategy.evaluate(
              x, y, batch_size=batch_size, steps=steps)[1:],
          evaluate_ground_truth[1:],
          atol=1e-5,
          rtol=1e-5)
      # Test that `steps` is inferred correctly when final partial batch exists.
      self.assertAllClose(
          model_with_ds_strategy.evaluate(x, y, batch_size=batch_size)[1:],
          evaluate_ground_truth[1:],
          atol=1e-5,
          rtol=1e-5)

  @tf.__internal__.distribute.combinations.generate(
      tf.__internal__.test.combinations.times(
          tpu_strategy_combinations_graph_only()))
  def test_predict_with_partial_batch(self, distribution):
    with self.cached_session():
      optimizer = tf.compat.v1.train.GradientDescentOptimizer(0.001)
      loss = 'mse'

      with distribution.scope():
        model_with_ds_strategy = get_model()
        model_with_ds_strategy.compile(
            optimizer,
            loss)

      cpu_model = get_model()
      cpu_model.compile(optimizer, loss)

      inputs = np.random.random((10, 3)).astype(np.float32)

      # As sample size is 10, we batch by 4 so that the last batch is
      # a partial batch. Also `predict()` using numpy array as inputs without
      # distribution strategy uses entire sample as a single batch. As so,
      # we remove parameters `batch_size` and `steps`.
      cpu_model.set_weights(model_with_ds_strategy.get_weights())
      predict_ground_truth = cpu_model.predict(inputs)
      self.assertAllClose(
          model_with_ds_strategy.predict(inputs, batch_size=4, steps=3),
          predict_ground_truth,
          atol=1e-5,
          rtol=1e-5)
      # Test that `steps` is inferred correctly when final partial batch exists.
      self.assertAllClose(
          model_with_ds_strategy.predict(inputs, batch_size=4),
          predict_ground_truth,
          atol=1e-5,
          rtol=1e-5)

  @tf.__internal__.distribute.combinations.generate(tpu_strategy_combinations_graph_only())
  def test_no_target_model(self, distribution):
    with self.cached_session():
      optimizer = tf.compat.v1.train.GradientDescentOptimizer(0.001)

      class MyLayer(keras.layers.Layer):

        def call(self, inputs, training=None):
          self.add_loss(tf.reduce_sum(inputs), inputs=True)
          return inputs

      with distribution.scope():
        model = keras.models.Sequential()
        model.add(
            keras.layers.Dense(16, activation='relu', input_shape=_INPUT_SIZE))
        model.add(MyLayer())
        model.add(keras.layers.Dense(_NUM_CLASS, activation='softmax'))

        model.compile(optimizer)
        inputs = np.zeros((20, 10), np.float32)

        model.fit(inputs, epochs=1, steps_per_epoch=2)
        model.predict(inputs, steps=1)
        model.evaluate(inputs, steps=1)

  @tf.__internal__.distribute.combinations.generate(
      tf.__internal__.test.combinations.times(
          tpu_strategy_combinations_graph_only()))
  def test_predict_multi_output_model_with_partial_batch(
      self, distribution):
    with self.cached_session():
      optimizer = tf.compat.v1.train.GradientDescentOptimizer(0.001)
      loss = 'mse'

      with distribution.scope():
        model_with_ds_strategy = simple_multi_inputs_multi_outputs_model()
        model_with_ds_strategy.compile(
            optimizer,
            loss)

      cpu_model = simple_multi_inputs_multi_outputs_model()
      cpu_model.compile(optimizer, loss)

      input_data, _ = get_multi_inputs_multi_outputs_data()
      input_dict = {
          'input_a': input_data['input_a'],
          'input_b': input_data['input_b'],
      }

      # As sample size is 200, we batch by 18 so that the last batch is
      # a partial batch. Also `fit()` using numpy array as inputs without
      # distribution strategy uses entire sample as a single batch. As so,
      # we remove parameters `batch_size` and `steps`.
      cpu_model.set_weights(model_with_ds_strategy.get_weights())
      self.assertAllClose(
          model_with_ds_strategy.predict(input_dict, batch_size=18, steps=12),
          cpu_model.predict(input_dict),
          atol=1e-4,
          rtol=1e-4)

  @tf.__internal__.distribute.combinations.generate(all_strategy_combinations())
  def test_gradients_are_none(self, distribution):

    if not tf.executing_eagerly():
      self.skipTest('None gradients are not supported in graph mode')

    class DenseWithExtraWeight(keras.layers.Dense):

      def build(self, input_shape):
        # Gradients w.r.t. extra_weights are None
        self.extra_weight_1 = self.add_weight('extra_weight_1', shape=(),
                                              initializer='ones')
        super(DenseWithExtraWeight, self).build(input_shape)
        self.extra_weight_2 = self.add_weight('extra_weight_2', shape=(),
                                              initializer='ones')

    with distribution.scope():
      model = keras.Sequential([DenseWithExtraWeight(4, input_shape=(4,))])
      model.compile('adam', 'mse')

    inputs = np.random.normal(size=(64, 4))
    targets = np.random.normal(size=(64, 4))
    old_kernel = model.get_weights()[1]
    model.fit(inputs, targets)
    new_kernel = model.get_weights()[1]
    self.assertNotAllEqual(old_kernel, new_kernel)

Ancestors

  • tensorflow.python.framework.test_util.TensorFlowTestCase
  • absl.testing.parameterized.TestCase
  • absl.testing.absltest.TestCase
  • absl.third_party.unittest3_backport.case.TestCase
  • unittest.case.TestCase

Methods

def test_calculating_input_params_no_steps_no_batch_size_test_distribution_CentralStorageCPUAndGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calculating_input_params_no_steps_no_batch_size_test_distribution_CentralStorageCPUAndGPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calculating_input_params_no_steps_no_batch_size_test_distribution_Default_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calculating_input_params_no_steps_no_batch_size_test_distribution_Default_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calculating_input_params_no_steps_no_batch_size_test_distribution_Mirrored2GPUs_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calculating_input_params_no_steps_no_batch_size_test_distribution_Mirrored2GPUs_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calculating_input_params_no_steps_no_batch_size_test_distribution_MirroredCPUAndGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calculating_input_params_no_steps_no_batch_size_test_distribution_MirroredCPUAndGPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calculating_input_params_no_steps_no_batch_size_test_distribution_MultiWorkerMirrored2x1CPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calculating_input_params_no_steps_no_batch_size_test_distribution_MultiWorkerMirrored2x1GPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calculating_input_params_no_steps_no_batch_size_test_distribution_MultiWorkerMirrored2x2GPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calculating_input_params_no_steps_no_batch_size_test_distribution_OneDeviceCPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calculating_input_params_no_steps_no_batch_size_test_distribution_OneDeviceCPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calculating_input_params_no_steps_no_batch_size_test_distribution_OneDeviceGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calculating_input_params_no_steps_no_batch_size_test_distribution_OneDeviceGPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calculating_input_params_no_steps_no_batch_size_test_distribution_TPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calculating_input_params_no_steps_no_batch_size_test_distribution_TPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calculating_input_params_no_steps_with_batch_size_test_distribution_CentralStorageCPUAndGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calculating_input_params_no_steps_with_batch_size_test_distribution_CentralStorageCPUAndGPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calculating_input_params_no_steps_with_batch_size_test_distribution_Default_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calculating_input_params_no_steps_with_batch_size_test_distribution_Default_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calculating_input_params_no_steps_with_batch_size_test_distribution_Mirrored2GPUs_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calculating_input_params_no_steps_with_batch_size_test_distribution_Mirrored2GPUs_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calculating_input_params_no_steps_with_batch_size_test_distribution_MirroredCPUAndGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calculating_input_params_no_steps_with_batch_size_test_distribution_MirroredCPUAndGPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calculating_input_params_no_steps_with_batch_size_test_distribution_MultiWorkerMirrored2x1CPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calculating_input_params_no_steps_with_batch_size_test_distribution_MultiWorkerMirrored2x1GPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calculating_input_params_no_steps_with_batch_size_test_distribution_MultiWorkerMirrored2x2GPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calculating_input_params_no_steps_with_batch_size_test_distribution_OneDeviceCPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calculating_input_params_no_steps_with_batch_size_test_distribution_OneDeviceCPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calculating_input_params_no_steps_with_batch_size_test_distribution_OneDeviceGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calculating_input_params_no_steps_with_batch_size_test_distribution_OneDeviceGPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calculating_input_params_no_steps_with_batch_size_test_distribution_TPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calculating_input_params_no_steps_with_batch_size_test_distribution_TPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calculating_input_params_with_steps_no_batch_size_test_distribution_CentralStorageCPUAndGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calculating_input_params_with_steps_no_batch_size_test_distribution_CentralStorageCPUAndGPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calculating_input_params_with_steps_no_batch_size_test_distribution_Default_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calculating_input_params_with_steps_no_batch_size_test_distribution_Default_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calculating_input_params_with_steps_no_batch_size_test_distribution_Mirrored2GPUs_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calculating_input_params_with_steps_no_batch_size_test_distribution_Mirrored2GPUs_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calculating_input_params_with_steps_no_batch_size_test_distribution_MirroredCPUAndGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calculating_input_params_with_steps_no_batch_size_test_distribution_MirroredCPUAndGPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calculating_input_params_with_steps_no_batch_size_test_distribution_MultiWorkerMirrored2x1CPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calculating_input_params_with_steps_no_batch_size_test_distribution_MultiWorkerMirrored2x1GPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calculating_input_params_with_steps_no_batch_size_test_distribution_MultiWorkerMirrored2x2GPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calculating_input_params_with_steps_no_batch_size_test_distribution_OneDeviceCPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calculating_input_params_with_steps_no_batch_size_test_distribution_OneDeviceCPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calculating_input_params_with_steps_no_batch_size_test_distribution_OneDeviceGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calculating_input_params_with_steps_no_batch_size_test_distribution_OneDeviceGPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calculating_input_params_with_steps_no_batch_size_test_distribution_TPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calculating_input_params_with_steps_no_batch_size_test_distribution_TPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calculating_input_params_with_steps_with_batch_size_test_distribution_CentralStorageCPUAndGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calculating_input_params_with_steps_with_batch_size_test_distribution_CentralStorageCPUAndGPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calculating_input_params_with_steps_with_batch_size_test_distribution_Default_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calculating_input_params_with_steps_with_batch_size_test_distribution_Default_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calculating_input_params_with_steps_with_batch_size_test_distribution_Mirrored2GPUs_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calculating_input_params_with_steps_with_batch_size_test_distribution_Mirrored2GPUs_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calculating_input_params_with_steps_with_batch_size_test_distribution_MirroredCPUAndGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calculating_input_params_with_steps_with_batch_size_test_distribution_MirroredCPUAndGPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calculating_input_params_with_steps_with_batch_size_test_distribution_MultiWorkerMirrored2x1CPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calculating_input_params_with_steps_with_batch_size_test_distribution_MultiWorkerMirrored2x1GPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calculating_input_params_with_steps_with_batch_size_test_distribution_MultiWorkerMirrored2x2GPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calculating_input_params_with_steps_with_batch_size_test_distribution_OneDeviceCPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calculating_input_params_with_steps_with_batch_size_test_distribution_OneDeviceCPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calculating_input_params_with_steps_with_batch_size_test_distribution_OneDeviceGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calculating_input_params_with_steps_with_batch_size_test_distribution_OneDeviceGPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calculating_input_params_with_steps_with_batch_size_test_distribution_TPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calculating_input_params_with_steps_with_batch_size_test_distribution_TPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calling_model_with_mixed_precision_test_distribution_CentralStorageCPUAndGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calling_model_with_mixed_precision_test_distribution_CentralStorageCPUAndGPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calling_model_with_mixed_precision_test_distribution_Default_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calling_model_with_mixed_precision_test_distribution_Default_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calling_model_with_mixed_precision_test_distribution_Mirrored2GPUs_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calling_model_with_mixed_precision_test_distribution_Mirrored2GPUs_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calling_model_with_mixed_precision_test_distribution_MirroredCPUAndGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calling_model_with_mixed_precision_test_distribution_MirroredCPUAndGPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calling_model_with_mixed_precision_test_distribution_MultiWorkerMirrored2x1CPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calling_model_with_mixed_precision_test_distribution_MultiWorkerMirrored2x1GPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calling_model_with_mixed_precision_test_distribution_MultiWorkerMirrored2x2GPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calling_model_with_mixed_precision_test_distribution_OneDeviceCPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calling_model_with_mixed_precision_test_distribution_OneDeviceCPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calling_model_with_mixed_precision_test_distribution_OneDeviceGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calling_model_with_mixed_precision_test_distribution_OneDeviceGPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calling_model_with_mixed_precision_test_distribution_TPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calling_model_with_mixed_precision_test_distribution_TPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calling_model_with_nested_numpy_arrays_test_distribution_CentralStorageCPUAndGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calling_model_with_nested_numpy_arrays_test_distribution_CentralStorageCPUAndGPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calling_model_with_nested_numpy_arrays_test_distribution_Default_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calling_model_with_nested_numpy_arrays_test_distribution_Default_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calling_model_with_nested_numpy_arrays_test_distribution_Mirrored2GPUs_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calling_model_with_nested_numpy_arrays_test_distribution_Mirrored2GPUs_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calling_model_with_nested_numpy_arrays_test_distribution_MirroredCPUAndGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calling_model_with_nested_numpy_arrays_test_distribution_MirroredCPUAndGPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calling_model_with_nested_numpy_arrays_test_distribution_MultiWorkerMirrored2x1CPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calling_model_with_nested_numpy_arrays_test_distribution_MultiWorkerMirrored2x1GPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calling_model_with_nested_numpy_arrays_test_distribution_MultiWorkerMirrored2x2GPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calling_model_with_nested_numpy_arrays_test_distribution_OneDeviceCPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calling_model_with_nested_numpy_arrays_test_distribution_OneDeviceCPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calling_model_with_nested_numpy_arrays_test_distribution_OneDeviceGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calling_model_with_nested_numpy_arrays_test_distribution_OneDeviceGPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calling_model_with_nested_numpy_arrays_test_distribution_TPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calling_model_with_nested_numpy_arrays_test_distribution_TPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calling_model_with_numpy_arrays_test_distribution_CentralStorageCPUAndGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calling_model_with_numpy_arrays_test_distribution_CentralStorageCPUAndGPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calling_model_with_numpy_arrays_test_distribution_Default_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calling_model_with_numpy_arrays_test_distribution_Default_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calling_model_with_numpy_arrays_test_distribution_Mirrored2GPUs_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calling_model_with_numpy_arrays_test_distribution_Mirrored2GPUs_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calling_model_with_numpy_arrays_test_distribution_MirroredCPUAndGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calling_model_with_numpy_arrays_test_distribution_MirroredCPUAndGPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calling_model_with_numpy_arrays_test_distribution_MultiWorkerMirrored2x1CPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calling_model_with_numpy_arrays_test_distribution_MultiWorkerMirrored2x1GPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calling_model_with_numpy_arrays_test_distribution_MultiWorkerMirrored2x2GPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calling_model_with_numpy_arrays_test_distribution_OneDeviceCPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calling_model_with_numpy_arrays_test_distribution_OneDeviceCPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calling_model_with_numpy_arrays_test_distribution_OneDeviceGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calling_model_with_numpy_arrays_test_distribution_OneDeviceGPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calling_model_with_numpy_arrays_test_distribution_TPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_calling_model_with_numpy_arrays_test_distribution_TPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_evaluate_with_partial_batch_test_distribution_TPU_mode_graph_batchsize_4(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_evaluate_with_partial_batch_test_distribution_TPU_mode_graph_batchsize_6(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_flatten_predict_outputs_test_distribution_CentralStorageCPUAndGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_flatten_predict_outputs_test_distribution_CentralStorageCPUAndGPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_flatten_predict_outputs_test_distribution_Default_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_flatten_predict_outputs_test_distribution_Default_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_flatten_predict_outputs_test_distribution_Mirrored2GPUs_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_flatten_predict_outputs_test_distribution_Mirrored2GPUs_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_flatten_predict_outputs_test_distribution_MirroredCPUAndGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_flatten_predict_outputs_test_distribution_MirroredCPUAndGPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_flatten_predict_outputs_test_distribution_MultiWorkerMirrored2x1CPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_flatten_predict_outputs_test_distribution_MultiWorkerMirrored2x1GPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_flatten_predict_outputs_test_distribution_MultiWorkerMirrored2x2GPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_flatten_predict_outputs_test_distribution_OneDeviceCPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_flatten_predict_outputs_test_distribution_OneDeviceCPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_flatten_predict_outputs_test_distribution_OneDeviceGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_flatten_predict_outputs_test_distribution_OneDeviceGPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_flatten_predict_outputs_test_distribution_TPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_flatten_predict_outputs_test_distribution_TPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_gradients_are_none_test_distribution_CentralStorageCPUAndGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_gradients_are_none_test_distribution_CentralStorageCPUAndGPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_gradients_are_none_test_distribution_Default_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_gradients_are_none_test_distribution_Default_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_gradients_are_none_test_distribution_Mirrored2GPUs_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_gradients_are_none_test_distribution_Mirrored2GPUs_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_gradients_are_none_test_distribution_MirroredCPUAndGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_gradients_are_none_test_distribution_MirroredCPUAndGPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_gradients_are_none_test_distribution_MultiWorkerMirrored2x1CPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_gradients_are_none_test_distribution_MultiWorkerMirrored2x1GPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_gradients_are_none_test_distribution_MultiWorkerMirrored2x2GPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_gradients_are_none_test_distribution_OneDeviceCPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_gradients_are_none_test_distribution_OneDeviceCPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_gradients_are_none_test_distribution_OneDeviceGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_gradients_are_none_test_distribution_OneDeviceGPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_gradients_are_none_test_distribution_TPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_gradients_are_none_test_distribution_TPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_no_target_model_test_distribution_TPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_numpy_with_sample_weights_test_distribution_CentralStorageCPUAndGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_numpy_with_sample_weights_test_distribution_CentralStorageCPUAndGPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_numpy_with_sample_weights_test_distribution_Default_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_numpy_with_sample_weights_test_distribution_Default_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_numpy_with_sample_weights_test_distribution_Mirrored2GPUs_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_numpy_with_sample_weights_test_distribution_Mirrored2GPUs_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_numpy_with_sample_weights_test_distribution_MirroredCPUAndGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_numpy_with_sample_weights_test_distribution_MirroredCPUAndGPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_numpy_with_sample_weights_test_distribution_MultiWorkerMirrored2x1CPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_numpy_with_sample_weights_test_distribution_MultiWorkerMirrored2x1GPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_numpy_with_sample_weights_test_distribution_MultiWorkerMirrored2x2GPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_numpy_with_sample_weights_test_distribution_OneDeviceCPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_numpy_with_sample_weights_test_distribution_OneDeviceCPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_numpy_with_sample_weights_test_distribution_OneDeviceGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_numpy_with_sample_weights_test_distribution_OneDeviceGPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_operator_overload_mixed_precision_test_distribution_CentralStorageCPUAndGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_operator_overload_mixed_precision_test_distribution_CentralStorageCPUAndGPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_operator_overload_mixed_precision_test_distribution_Default_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_operator_overload_mixed_precision_test_distribution_Default_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_operator_overload_mixed_precision_test_distribution_Mirrored2GPUs_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_operator_overload_mixed_precision_test_distribution_Mirrored2GPUs_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_operator_overload_mixed_precision_test_distribution_MirroredCPUAndGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_operator_overload_mixed_precision_test_distribution_MirroredCPUAndGPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_operator_overload_mixed_precision_test_distribution_MultiWorkerMirrored2x1CPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_operator_overload_mixed_precision_test_distribution_MultiWorkerMirrored2x1GPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_operator_overload_mixed_precision_test_distribution_MultiWorkerMirrored2x2GPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_operator_overload_mixed_precision_test_distribution_OneDeviceCPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_operator_overload_mixed_precision_test_distribution_OneDeviceCPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_operator_overload_mixed_precision_test_distribution_OneDeviceGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_operator_overload_mixed_precision_test_distribution_OneDeviceGPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_operator_overload_mixed_precision_test_distribution_TPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_operator_overload_mixed_precision_test_distribution_TPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_optimizer_in_cross_replica_context_raises_error_test_distribution_OneDeviceCPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_optimizer_in_cross_replica_context_raises_error_test_distribution_OneDeviceCPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_predict_multi_output_model_with_partial_batch_test_distribution_TPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_predict_with_partial_batch_test_distribution_TPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
class TestModelCapturesStrategy (methodName='runTest')

Tests that model creation captures the strategy.

Create an instance of the class that will use the named test method when executed. Raises a ValueError if the instance does not have a method with the specified name.

Expand source code
class TestModelCapturesStrategy(tf.test.TestCase, parameterized.TestCase):
  """Tests that model creation captures the strategy."""

  @tf.__internal__.distribute.combinations.generate(
      tf.__internal__.test.combinations.combine(distribution=all_strategies, mode=['eager']))
  def test_fit_and_evaluate(self, distribution):
    dataset = tf.data.Dataset.from_tensor_slices(
        (tf.ones(shape=(64,)), tf.ones(shape=(64,))))
    dataset = dataset.batch(8 * distribution.num_replicas_in_sync)
    # Make model with distribution strategy
    with distribution.scope():
      model = DeterministicModel(distribution)
      optimizer = keras.optimizers.adam_v2.Adam(1e-4)

    # Compile & evaluate the model outside of the distribution strategy scope
    model.compile(
        optimizer=optimizer,
        loss=keras.losses.MeanSquaredError(),
        metrics=['binary_accuracy'])

    # Call `optimizer.iterations` out of strategy scope.
    self.assertEqual(model.optimizer.iterations.numpy(), 0)

    # Non-eager training doesn't support steps_per_epoch=None.
    for unused_epoch in range(2):
      model.fit(dataset)

    results = model.evaluate(dataset)
    results = dict(zip(model.metrics_names, results))

    # Check that the metrics have a result we expect
    self.assertEqual(results['binary_accuracy'], 1.0)
    self.assertAllClose(results['loss'], 0.0)

    # Assert that all metric/optimizer/model variables were made in the
    # distribution strategy (Test that compile uses the captured
    # distribution strategy)
    metric_vars = tf.nest.flatten(
        [metric.variables for metric in model.metrics])
    for var in metric_vars:
      self.assertTrue(distribution.extended.variable_created_in_scope(var))
    for var in model.optimizer._weights:
      self.assertTrue(distribution.extended.variable_created_in_scope(var))
    for var in model.variables:
      self.assertTrue(distribution.extended.variable_created_in_scope(var))

    # Make sure the metric must be created in the same scope as the model:
    # This shouldn't raise any validation errors
    with distribution.scope():
      metric = keras.metrics.BinaryAccuracy()
    model.compile(
        optimizer=optimizer,
        loss=keras.losses.MeanSquaredError(),
        metrics=[metric])

    # This should raise an error because the metric is constructed
    # outside of the scope, and not by compile
    if tf.distribute.has_strategy():
      with self.assertRaisesRegex(ValueError, 'All metrics must be created in'):
        model.compile(
            optimizer=keras.optimizers.adam_v2.Adam(1e-4),
            loss=keras.losses.MeanSquaredError(),
            metrics=[keras.metrics.BinaryAccuracy()])

  @tf.__internal__.distribute.combinations.generate(
      tf.__internal__.test.combinations.combine(
          distribution=tf.__internal__.distribute.combinations.mirrored_strategy_with_one_cpu,
          mode=['eager']))
  def test_optimizer(self, distribution):
    temp_dir = os.path.join(self.get_temp_dir(), 'ckpt')

    def create_model():
      model = keras.models.Sequential([
          keras.layers.Dense(1),
      ])
      model.compile(optimizer='adam', loss='mse')
      model.build([None, 1])  # create weights.
      self.assertEmpty(model.optimizer.weights)
      return model

    model = create_model()
    x = y = tf.ones(shape=(1, 1))
    model.fit(x=x, y=y, batch_size=1)
    model.save_weights(temp_dir)

    with distribution.scope():
      model = create_model()
      model.load_weights(temp_dir)
      self.assertNotEmpty(model.optimizer.weights)
      self.assertTrue(
          distributed_training_utils.is_distributed_variable(
              model.optimizer.weights[0]))

    with distribution.scope():
      model = create_model()
    # create/restore slot variables outside of scope is fine.
    model.load_weights(temp_dir)
    self.assertNotEmpty(model.optimizer.weights)
    self.assertTrue(
        distributed_training_utils.is_distributed_variable(
            model.optimizer.weights[0]))

Ancestors

  • tensorflow.python.framework.test_util.TensorFlowTestCase
  • absl.testing.parameterized.TestCase
  • absl.testing.absltest.TestCase
  • absl.third_party.unittest3_backport.case.TestCase
  • unittest.case.TestCase

Methods

def test_fit_and_evaluate_test_distribution_CentralStorageCPUAndGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_and_evaluate_test_distribution_Default_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_and_evaluate_test_distribution_Mirrored2GPUs_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_and_evaluate_test_distribution_MirroredCPUAndGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_and_evaluate_test_distribution_OneDeviceCPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_and_evaluate_test_distribution_OneDeviceGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_fit_and_evaluate_test_distribution_TPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_optimizer_test_distribution_Mirrored1CPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
class TestRegularizerLoss (methodName='runTest')

Base class for tests that need to test TensorFlow.

Create an instance of the class that will use the named test method when executed. Raises a ValueError if the instance does not have a method with the specified name.

Expand source code
class TestRegularizerLoss(tf.test.TestCase, parameterized.TestCase):

  class IdentityRegularizer(keras.regularizers.Regularizer):

    def __call__(self, x):
      return tf.identity(x)

  class AddLayer(keras.layers.Layer):

    def build(self, _):
      self.v = self.add_weight(
          'v', (),
          initializer='ones',
          regularizer=TestRegularizerLoss.IdentityRegularizer())

    def call(self, inputs):
      return inputs + self.v

  @staticmethod
  def loss_fn(_, y_pred):
    return tf.reduce_mean(y_pred)

  @tf.__internal__.distribute.combinations.generate(
      tf.__internal__.test.combinations.times(all_strategy_combinations_minus_default()))
  def test_regularizer_loss(self, distribution):
    batch_size = 2
    if not distributed_training_utils.global_batch_size_supported(distribution):
      batch_size //= distribution.num_replicas_in_sync

      # Given an input x, which is always 1, and variable v, this model computes
      # Loss=x+v+regularizer_loss, where regularizer_loss=v and the variable is
      # initialized to 1. Therefore, this model computes Loss=1+2v, and so the
      # gradient dLoss/dv = 2. This gradient of 2 is averaged over all examples
      # in a batch and then multiplied by the learning rate of 1. As a result,
      # the model update for one batch should subtract 2 from v, resulting in v
      # being -1. If the regularizer loss is not scaled correctly by number of
      # replicas, the variable value will be incorrect when number of replicas
      # >1. For e.g. it will be -2 if num replicas = 2.
    with distribution.scope():
      x = keras.layers.Input(shape=(1,), batch_size=batch_size)
      y = TestRegularizerLoss.AddLayer()(x)
      model = keras.models.Model(inputs=x, outputs=y)
      opt = gradient_descent_keras.SGD(1.)
      model.compile(
          opt,
          loss=TestRegularizerLoss.loss_fn)
      model.fit(
          x=np.array([[1.], [1.]], dtype=np.float32),
          y=np.array([[1.], [1.]], dtype=np.float32),
          batch_size=batch_size)
      v = model.get_weights()[0]
      self.assertEqual(-1.0, v)

Ancestors

  • tensorflow.python.framework.test_util.TensorFlowTestCase
  • absl.testing.parameterized.TestCase
  • absl.testing.absltest.TestCase
  • absl.third_party.unittest3_backport.case.TestCase
  • unittest.case.TestCase

Class variables

var AddLayer

This is the class from which all layers inherit.

A layer is a callable object that takes as input one or more tensors and that outputs one or more tensors. It involves computation, defined in the call() method, and a state (weight variables), defined either in the constructor __init__() or in the build() method.

Users will just instantiate a layer and then treat it as a callable.

Args

trainable
Boolean, whether the layer's variables should be trainable.
name
String name of the layer.
dtype
The dtype of the layer's computations and weights. Can also be a tf.keras.mixed_precision.Policy, which allows the computation and weight dtype to differ. Default of None means to use tf.keras.mixed_precision.global_policy(), which is a float32 policy unless set to different value.
dynamic
Set this to True if your layer should only be run eagerly, and should not be used to generate a static computation graph. This would be the case for a Tree-RNN or a recursive network, for example, or generally for any layer that manipulates tensors using Python control flow. If False, we assume that the layer can safely be used to generate a static computation graph.

Attributes

name
The name of the layer (string).
dtype
The dtype of the layer's weights.
variable_dtype
Alias of dtype.
compute_dtype
The dtype of the layer's computations. Layers automatically cast inputs to this dtype which causes the computations and output to also be in this dtype. When mixed precision is used with a tf.keras.mixed_precision.Policy, this will be different than variable_dtype.
dtype_policy
The layer's dtype policy. See the tf.keras.mixed_precision.Policy documentation for details.
trainable_weights
List of variables to be included in backprop.
non_trainable_weights
List of variables that should not be included in backprop.
weights
The concatenation of the lists trainable_weights and non_trainable_weights (in this order).
trainable
Whether the layer should be trained (boolean), i.e. whether its potentially-trainable weights should be returned as part of layer.trainable_weights.
input_spec
Optional (list of) InputSpec object(s) specifying the constraints on inputs that can be accepted by the layer.

We recommend that descendants of Layer implement the following methods:

  • __init__(): Defines custom layer attributes, and creates layer state variables that do not depend on input shapes, using add_weight().
  • build(self, input_shape): This method can be used to create weights that depend on the shape(s) of the input(s), using add_weight(). __call__() will automatically build the layer (if it has not been built yet) by calling build().
  • call(self, inputs, *args, **kwargs): Called in __call__ after making sure build() has been called. call() performs the logic of applying the layer to the input tensors (which should be passed in as argument). Two reserved keyword arguments you can optionally use in call() are:
    • training (boolean, whether the call is in inference mode or training mode). See more details in the layer/model subclassing guide
    • mask (boolean tensor encoding masked timesteps in the input, used in RNN layers). See more details in the layer/model subclassing guide A typical signature for this method is call(self, inputs), and user could optionally add training and mask if the layer need them. *args and **kwargs is only useful for future extension when more input parameters are planned to be added.
  • get_config(self): Returns a dictionary containing the configuration used to initialize this layer. If the keys differ from the arguments in __init__, then override from_config(self) as well. This method is used when saving the layer or a model that contains this layer.

Examples:

Here's a basic example: a layer with two variables, w and b, that returns y = w . x + b. It shows how to implement build() and call(). Variables set as attributes of a layer are tracked as weights of the layers (in layer.weights).

class SimpleDense(Layer):

  def __init__(self, units=32):
      super(SimpleDense, self).__init__()
      self.units = units

  def build(self, input_shape):  # Create the state of the layer (weights)
    w_init = tf.random_normal_initializer()
    self.w = tf.Variable(
        initial_value=w_init(shape=(input_shape[-1], self.units),
                             dtype='float32'),
        trainable=True)
    b_init = tf.zeros_initializer()
    self.b = tf.Variable(
        initial_value=b_init(shape=(self.units,), dtype='float32'),
        trainable=True)

  def call(self, inputs):  # Defines the computation from inputs to outputs
      return tf.matmul(inputs, self.w) + self.b

# Instantiates the layer.
linear_layer = SimpleDense(4)

# This will also call `build(input_shape)` and create the weights.
y = linear_layer(tf.ones((2, 2)))
assert len(linear_layer.weights) == 2

# These weights are trainable, so they're listed in `trainable_weights`:
assert len(linear_layer.trainable_weights) == 2

Note that the method add_weight() offers a shortcut to create weights:

class SimpleDense(Layer):

  def __init__(self, units=32):
      super(SimpleDense, self).__init__()
      self.units = units

  def build(self, input_shape):
      self.w = self.add_weight(shape=(input_shape[-1], self.units),
                               initializer='random_normal',
                               trainable=True)
      self.b = self.add_weight(shape=(self.units,),
                               initializer='random_normal',
                               trainable=True)

  def call(self, inputs):
      return tf.matmul(inputs, self.w) + self.b

Besides trainable weights, updated via backpropagation during training, layers can also have non-trainable weights. These weights are meant to be updated manually during call(). Here's a example layer that computes the running sum of its inputs:

class ComputeSum(Layer):

  def __init__(self, input_dim):
      super(ComputeSum, self).__init__()
      # Create a non-trainable weight.
      self.total = tf.Variable(initial_value=tf.zeros((input_dim,)),
                               trainable=False)

  def call(self, inputs):
      self.total.assign_add(tf.reduce_sum(inputs, axis=0))
      return self.total

my_sum = ComputeSum(2)
x = tf.ones((2, 2))

y = my_sum(x)
print(y.numpy())  # [2. 2.]

y = my_sum(x)
print(y.numpy())  # [4. 4.]

assert my_sum.weights == [my_sum.total]
assert my_sum.non_trainable_weights == [my_sum.total]
assert my_sum.trainable_weights == []

For more information about creating layers, see the guide Making new Layers and Models via subclassing

var IdentityRegularizer

Regularizer base class.

Regularizers allow you to apply penalties on layer parameters or layer activity during optimization. These penalties are summed into the loss function that the network optimizes.

Regularization penalties are applied on a per-layer basis. The exact API will depend on the layer, but many layers (e.g. Dense, Conv1D, Conv2D and Conv3D) have a unified API.

These layers expose 3 keyword arguments:

  • kernel_regularizer: Regularizer to apply a penalty on the layer's kernel
  • bias_regularizer: Regularizer to apply a penalty on the layer's bias
  • activity_regularizer: Regularizer to apply a penalty on the layer's output

All layers (including custom layers) expose activity_regularizer as a settable property, whether or not it is in the constructor arguments.

The value returned by the activity_regularizer is divided by the input batch size so that the relative weighting between the weight regularizers and the activity regularizers does not change with the batch size.

You can access a layer's regularization penalties by calling layer.losses after calling the layer on inputs.

Example

>>> layer = tf.keras.layers.Dense(
...     5, input_dim=5,
...     kernel_initializer='ones',
...     kernel_regularizer=tf.keras.regularizers.L1(0.01),
...     activity_regularizer=tf.keras.regularizers.L2(0.01))
>>> tensor = tf.ones(shape=(5, 5)) * 2.0
>>> out = layer(tensor)
>>> # The kernel regularization term is 0.25
>>> # The activity regularization term (after dividing by the batch size) is 5
>>> tf.math.reduce_sum(layer.losses)
<tf.Tensor: shape=(), dtype=float32, numpy=5.25>

Available penalties

tf.keras.regularizers.L1(0.3)  # L1 Regularization Penalty
tf.keras.regularizers.L2(0.1)  # L2 Regularization Penalty
tf.keras.regularizers.L1L2(l1=0.01, l2=0.01)  # L1 + L2 penalties

Directly calling a regularizer

Compute a regularization loss on a tensor by directly calling a regularizer as if it is a one-argument function.

E.g.

>>> regularizer = tf.keras.regularizers.L2(2.)
>>> tensor = tf.ones(shape=(5, 5))
>>> regularizer(tensor)
<tf.Tensor: shape=(), dtype=float32, numpy=50.0>

Developing new regularizers

Any function that takes in a weight matrix and returns a scalar tensor can be used as a regularizer, e.g.:

>>> @tf.keras.utils.register_keras_serializable(package='Custom', name='l1')
... def l1_reg(weight_matrix):
...    return 0.01 * tf.math.reduce_sum(tf.math.abs(weight_matrix))
...
>>> layer = tf.keras.layers.Dense(5, input_dim=5,
...     kernel_initializer='ones', kernel_regularizer=l1_reg)
>>> tensor = tf.ones(shape=(5, 5))
>>> out = layer(tensor)
>>> layer.losses
[<tf.Tensor: shape=(), dtype=float32, numpy=0.25>]

Alternatively, you can write your custom regularizers in an object-oriented way by extending this regularizer base class, e.g.:

>>> @tf.keras.utils.register_keras_serializable(package='Custom', name='l2')
... class L2Regularizer(tf.keras.regularizers.Regularizer):
...   def __init__(self, l2=0.):  # pylint: disable=redefined-outer-name
...     self.l2 = l2
...
...   def __call__(self, x):
...     return self.l2 * tf.math.reduce_sum(tf.math.square(x))
...
...   def get_config(self):
...     return {'l2': float(self.l2)}
...
>>> layer = tf.keras.layers.Dense(
...   5, input_dim=5, kernel_initializer='ones',
...   kernel_regularizer=L2Regularizer(l2=0.5))
>>> tensor = tf.ones(shape=(5, 5))
>>> out = layer(tensor)
>>> layer.losses
[<tf.Tensor: shape=(), dtype=float32, numpy=12.5>]

A note on serialization and deserialization:

Registering the regularizers as serializable is optional if you are just training and executing models, exporting to and from SavedModels, or saving and loading weight checkpoints.

Registration is required for Keras model_to_estimator, saving and loading models to HDF5 formats, Keras model cloning, some visualization utilities, and exporting models to and from JSON. If using this functionality, you must make sure any python process running your model has also defined and registered your custom regularizer.

tf.keras.utils.register_keras_serializable is only available in TF 2.1 and beyond. In earlier versions of TensorFlow you must pass your custom regularizer to the custom_objects argument of methods that expect custom regularizers to be registered as serializable.

Static methods

def loss_fn(_, y_pred)
Expand source code
@staticmethod
def loss_fn(_, y_pred):
  return tf.reduce_mean(y_pred)

Methods

def test_regularizer_loss_test_distribution_Mirrored2GPUs_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_regularizer_loss_test_distribution_Mirrored2GPUs_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_regularizer_loss_test_distribution_MirroredCPUAndGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_regularizer_loss_test_distribution_MirroredCPUAndGPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_regularizer_loss_test_distribution_MultiWorkerMirrored2x1CPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_regularizer_loss_test_distribution_MultiWorkerMirrored2x1GPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_regularizer_loss_test_distribution_MultiWorkerMirrored2x2GPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_regularizer_loss_test_distribution_OneDeviceCPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_regularizer_loss_test_distribution_OneDeviceCPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_regularizer_loss_test_distribution_OneDeviceGPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_regularizer_loss_test_distribution_OneDeviceGPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_regularizer_loss_test_distribution_TPU_mode_eager(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()
def test_regularizer_loss_test_distribution_TPU_mode_graph(self, **kwargs)

A wrapped test method that can treat some arguments in a special way.

Expand source code
def decorated(self, **kwargs):
  """A wrapped test method that can treat some arguments in a special way."""
  original_kwargs = kwargs.copy()

  # Skip combinations that are going to be executed in a different testing
  # environment.
  reasons_to_skip = []
  for combination in test_combinations:
    should_execute, reason = combination.should_execute_combination(
        original_kwargs.copy())
    if not should_execute:
      reasons_to_skip.append(" - " + reason)

  if reasons_to_skip:
    self.skipTest("\n".join(reasons_to_skip))

  customized_parameters = []
  for combination in test_combinations:
    customized_parameters.extend(combination.parameter_modifiers())
  customized_parameters = set(customized_parameters)

  # The function for running the test under the total set of
  # `context_managers`:
  def execute_test_method():
    requested_parameters = tf_inspect.getfullargspec(test_method).args
    for customized_parameter in customized_parameters:
      for argument, value in customized_parameter.modified_arguments(
          original_kwargs.copy(), requested_parameters).items():
        if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
          kwargs.pop(argument, None)
        else:
          kwargs[argument] = value

    omitted_arguments = set(requested_parameters).difference(
        set(list(kwargs.keys()) + ["self"]))
    if omitted_arguments:
      raise ValueError("The test requires parameters whose arguments "
                       "were not passed: {} .".format(omitted_arguments))
    missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
        set(requested_parameters))
    if missing_arguments:
      raise ValueError("The test does not take parameters that were passed "
                       ": {} .".format(missing_arguments))

    kwargs_to_pass = {}
    for parameter in requested_parameters:
      if parameter == "self":
        kwargs_to_pass[parameter] = self
      else:
        kwargs_to_pass[parameter] = kwargs[parameter]
    test_method(**kwargs_to_pass)

  # Install `context_managers` before running the test:
  context_managers = []
  for combination in test_combinations:
    for manager in combination.context_managers(
        original_kwargs.copy()):
      context_managers.append(manager)

  if hasattr(contextlib, "nested"):  # Python 2
    # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
    with contextlib.nested(*context_managers):
      execute_test_method()
  else:  # Python 3
    with contextlib.ExitStack() as context_stack:
      for manager in context_managers:
        context_stack.enter_context(manager)
      execute_test_method()