Module keras.applications.inception_resnet_v2
Inception-ResNet V2 model for Keras.
Reference
Expand source code
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""Inception-ResNet V2 model for Keras.
Reference:
- [Inception-v4, Inception-ResNet and the Impact of
Residual Connections on Learning](https://arxiv.org/abs/1602.07261)
(AAAI 2017)
"""
import tensorflow.compat.v2 as tf
from keras import backend
from keras.applications import imagenet_utils
from keras.engine import training
from keras.layers import VersionAwareLayers
from keras.utils import data_utils
from keras.utils import layer_utils
from tensorflow.python.util.tf_export import keras_export
BASE_WEIGHT_URL = ('https://storage.googleapis.com/tensorflow/'
'keras-applications/inception_resnet_v2/')
layers = None
@keras_export('keras.applications.inception_resnet_v2.InceptionResNetV2',
'keras.applications.InceptionResNetV2')
def InceptionResNetV2(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation='softmax',
**kwargs):
"""Instantiates the Inception-ResNet v2 architecture.
Reference:
- [Inception-v4, Inception-ResNet and the Impact of
Residual Connections on Learning](https://arxiv.org/abs/1602.07261)
(AAAI 2017)
This function returns a Keras image classification model,
optionally loaded with weights pre-trained on ImageNet.
For image classification use cases, see
[this page for detailed examples](
https://keras.io/api/applications/#usage-examples-for-image-classification-models).
For transfer learning use cases, make sure to read the
[guide to transfer learning & fine-tuning](
https://keras.io/guides/transfer_learning/).
Note: each Keras Application expects a specific kind of input preprocessing.
For InceptionResNetV2, call
`tf.keras.applications.inception_resnet_v2.preprocess_input`
on your inputs before passing them to the model.
`inception_resnet_v2.preprocess_input`
will scale input pixels between -1 and 1.
Args:
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization),
'imagenet' (pre-training on ImageNet),
or the path to the weights file to be loaded.
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is `False` (otherwise the input shape
has to be `(299, 299, 3)` (with `'channels_last'` data format)
or `(3, 299, 299)` (with `'channels_first'` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 75.
E.g. `(150, 150, 3)` would be one valid value.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the last convolutional block.
- `'avg'` means that global average pooling
will be applied to the output of the
last convolutional block, and thus
the output of the model will be a 2D tensor.
- `'max'` means that global max pooling will be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is `True`, and
if no `weights` argument is specified.
classifier_activation: A `str` or callable. The activation function to use
on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top" layer.
When loading pretrained weights, `classifier_activation` can only
be `None` or `"softmax"`.
**kwargs: For backwards compatibility only.
Returns:
A `keras.Model` instance.
"""
global layers
if 'layers' in kwargs:
layers = kwargs.pop('layers')
else:
layers = VersionAwareLayers()
if kwargs:
raise ValueError('Unknown argument(s): %s' % (kwargs,))
if not (weights in {'imagenet', None} or tf.io.gfile.exists(weights)):
raise ValueError('The `weights` argument should be either '
'`None` (random initialization), `imagenet` '
'(pre-training on ImageNet), '
'or the path to the weights file to be loaded.')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as `"imagenet"` with `include_top`'
' as true, `classes` should be 1000')
# Determine proper input shape
input_shape = imagenet_utils.obtain_input_shape(
input_shape,
default_size=299,
min_size=75,
data_format=backend.image_data_format(),
require_flatten=include_top,
weights=weights)
if input_tensor is None:
img_input = layers.Input(shape=input_shape)
else:
if not backend.is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
# Stem block: 35 x 35 x 192
x = conv2d_bn(img_input, 32, 3, strides=2, padding='valid')
x = conv2d_bn(x, 32, 3, padding='valid')
x = conv2d_bn(x, 64, 3)
x = layers.MaxPooling2D(3, strides=2)(x)
x = conv2d_bn(x, 80, 1, padding='valid')
x = conv2d_bn(x, 192, 3, padding='valid')
x = layers.MaxPooling2D(3, strides=2)(x)
# Mixed 5b (Inception-A block): 35 x 35 x 320
branch_0 = conv2d_bn(x, 96, 1)
branch_1 = conv2d_bn(x, 48, 1)
branch_1 = conv2d_bn(branch_1, 64, 5)
branch_2 = conv2d_bn(x, 64, 1)
branch_2 = conv2d_bn(branch_2, 96, 3)
branch_2 = conv2d_bn(branch_2, 96, 3)
branch_pool = layers.AveragePooling2D(3, strides=1, padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 64, 1)
branches = [branch_0, branch_1, branch_2, branch_pool]
channel_axis = 1 if backend.image_data_format() == 'channels_first' else 3
x = layers.Concatenate(axis=channel_axis, name='mixed_5b')(branches)
# 10x block35 (Inception-ResNet-A block): 35 x 35 x 320
for block_idx in range(1, 11):
x = inception_resnet_block(
x, scale=0.17, block_type='block35', block_idx=block_idx)
# Mixed 6a (Reduction-A block): 17 x 17 x 1088
branch_0 = conv2d_bn(x, 384, 3, strides=2, padding='valid')
branch_1 = conv2d_bn(x, 256, 1)
branch_1 = conv2d_bn(branch_1, 256, 3)
branch_1 = conv2d_bn(branch_1, 384, 3, strides=2, padding='valid')
branch_pool = layers.MaxPooling2D(3, strides=2, padding='valid')(x)
branches = [branch_0, branch_1, branch_pool]
x = layers.Concatenate(axis=channel_axis, name='mixed_6a')(branches)
# 20x block17 (Inception-ResNet-B block): 17 x 17 x 1088
for block_idx in range(1, 21):
x = inception_resnet_block(
x, scale=0.1, block_type='block17', block_idx=block_idx)
# Mixed 7a (Reduction-B block): 8 x 8 x 2080
branch_0 = conv2d_bn(x, 256, 1)
branch_0 = conv2d_bn(branch_0, 384, 3, strides=2, padding='valid')
branch_1 = conv2d_bn(x, 256, 1)
branch_1 = conv2d_bn(branch_1, 288, 3, strides=2, padding='valid')
branch_2 = conv2d_bn(x, 256, 1)
branch_2 = conv2d_bn(branch_2, 288, 3)
branch_2 = conv2d_bn(branch_2, 320, 3, strides=2, padding='valid')
branch_pool = layers.MaxPooling2D(3, strides=2, padding='valid')(x)
branches = [branch_0, branch_1, branch_2, branch_pool]
x = layers.Concatenate(axis=channel_axis, name='mixed_7a')(branches)
# 10x block8 (Inception-ResNet-C block): 8 x 8 x 2080
for block_idx in range(1, 10):
x = inception_resnet_block(
x, scale=0.2, block_type='block8', block_idx=block_idx)
x = inception_resnet_block(
x, scale=1., activation=None, block_type='block8', block_idx=10)
# Final convolution block: 8 x 8 x 1536
x = conv2d_bn(x, 1536, 1, name='conv_7b')
if include_top:
# Classification block
x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
imagenet_utils.validate_activation(classifier_activation, weights)
x = layers.Dense(classes, activation=classifier_activation,
name='predictions')(x)
else:
if pooling == 'avg':
x = layers.GlobalAveragePooling2D()(x)
elif pooling == 'max':
x = layers.GlobalMaxPooling2D()(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = layer_utils.get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = training.Model(inputs, x, name='inception_resnet_v2')
# Load weights.
if weights == 'imagenet':
if include_top:
fname = 'inception_resnet_v2_weights_tf_dim_ordering_tf_kernels.h5'
weights_path = data_utils.get_file(
fname,
BASE_WEIGHT_URL + fname,
cache_subdir='models',
file_hash='e693bd0210a403b3192acc6073ad2e96')
else:
fname = ('inception_resnet_v2_weights_'
'tf_dim_ordering_tf_kernels_notop.h5')
weights_path = data_utils.get_file(
fname,
BASE_WEIGHT_URL + fname,
cache_subdir='models',
file_hash='d19885ff4a710c122648d3b5c3b684e4')
model.load_weights(weights_path)
elif weights is not None:
model.load_weights(weights)
return model
def conv2d_bn(x,
filters,
kernel_size,
strides=1,
padding='same',
activation='relu',
use_bias=False,
name=None):
"""Utility function to apply conv + BN.
Args:
x: input tensor.
filters: filters in `Conv2D`.
kernel_size: kernel size as in `Conv2D`.
strides: strides in `Conv2D`.
padding: padding mode in `Conv2D`.
activation: activation in `Conv2D`.
use_bias: whether to use a bias in `Conv2D`.
name: name of the ops; will become `name + '_ac'` for the activation
and `name + '_bn'` for the batch norm layer.
Returns:
Output tensor after applying `Conv2D` and `BatchNormalization`.
"""
x = layers.Conv2D(
filters,
kernel_size,
strides=strides,
padding=padding,
use_bias=use_bias,
name=name)(
x)
if not use_bias:
bn_axis = 1 if backend.image_data_format() == 'channels_first' else 3
bn_name = None if name is None else name + '_bn'
x = layers.BatchNormalization(axis=bn_axis, scale=False, name=bn_name)(x)
if activation is not None:
ac_name = None if name is None else name + '_ac'
x = layers.Activation(activation, name=ac_name)(x)
return x
def inception_resnet_block(x, scale, block_type, block_idx, activation='relu'):
"""Adds an Inception-ResNet block.
This function builds 3 types of Inception-ResNet blocks mentioned
in the paper, controlled by the `block_type` argument (which is the
block name used in the official TF-slim implementation):
- Inception-ResNet-A: `block_type='block35'`
- Inception-ResNet-B: `block_type='block17'`
- Inception-ResNet-C: `block_type='block8'`
Args:
x: input tensor.
scale: scaling factor to scale the residuals (i.e., the output of passing
`x` through an inception module) before adding them to the shortcut
branch. Let `r` be the output from the residual branch, the output of this
block will be `x + scale * r`.
block_type: `'block35'`, `'block17'` or `'block8'`, determines the network
structure in the residual branch.
block_idx: an `int` used for generating layer names. The Inception-ResNet
blocks are repeated many times in this network. We use `block_idx` to
identify each of the repetitions. For example, the first
Inception-ResNet-A block will have `block_type='block35', block_idx=0`,
and the layer names will have a common prefix `'block35_0'`.
activation: activation function to use at the end of the block (see
[activations](../activations.md)). When `activation=None`, no activation
is applied
(i.e., "linear" activation: `a(x) = x`).
Returns:
Output tensor for the block.
Raises:
ValueError: if `block_type` is not one of `'block35'`,
`'block17'` or `'block8'`.
"""
if block_type == 'block35':
branch_0 = conv2d_bn(x, 32, 1)
branch_1 = conv2d_bn(x, 32, 1)
branch_1 = conv2d_bn(branch_1, 32, 3)
branch_2 = conv2d_bn(x, 32, 1)
branch_2 = conv2d_bn(branch_2, 48, 3)
branch_2 = conv2d_bn(branch_2, 64, 3)
branches = [branch_0, branch_1, branch_2]
elif block_type == 'block17':
branch_0 = conv2d_bn(x, 192, 1)
branch_1 = conv2d_bn(x, 128, 1)
branch_1 = conv2d_bn(branch_1, 160, [1, 7])
branch_1 = conv2d_bn(branch_1, 192, [7, 1])
branches = [branch_0, branch_1]
elif block_type == 'block8':
branch_0 = conv2d_bn(x, 192, 1)
branch_1 = conv2d_bn(x, 192, 1)
branch_1 = conv2d_bn(branch_1, 224, [1, 3])
branch_1 = conv2d_bn(branch_1, 256, [3, 1])
branches = [branch_0, branch_1]
else:
raise ValueError('Unknown Inception-ResNet block type. '
'Expects "block35", "block17" or "block8", '
'but got: ' + str(block_type))
block_name = block_type + '_' + str(block_idx)
channel_axis = 1 if backend.image_data_format() == 'channels_first' else 3
mixed = layers.Concatenate(
axis=channel_axis, name=block_name + '_mixed')(
branches)
up = conv2d_bn(
mixed,
backend.int_shape(x)[channel_axis],
1,
activation=None,
use_bias=True,
name=block_name + '_conv')
x = layers.Lambda(
lambda inputs, scale: inputs[0] + inputs[1] * scale,
output_shape=backend.int_shape(x)[1:],
arguments={'scale': scale},
name=block_name)([x, up])
if activation is not None:
x = layers.Activation(activation, name=block_name + '_ac')(x)
return x
@keras_export('keras.applications.inception_resnet_v2.preprocess_input')
def preprocess_input(x, data_format=None):
return imagenet_utils.preprocess_input(x, data_format=data_format, mode='tf')
@keras_export('keras.applications.inception_resnet_v2.decode_predictions')
def decode_predictions(preds, top=5):
return imagenet_utils.decode_predictions(preds, top=top)
preprocess_input.__doc__ = imagenet_utils.PREPROCESS_INPUT_DOC.format(
mode='',
ret=imagenet_utils.PREPROCESS_INPUT_RET_DOC_TF,
error=imagenet_utils.PREPROCESS_INPUT_ERROR_DOC)
decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__
Functions
def InceptionResNetV2(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000, classifier_activation='softmax', **kwargs)
-
Instantiates the Inception-ResNet v2 architecture.
Reference: - Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning (AAAI 2017)
This function returns a Keras image classification model, optionally loaded with weights pre-trained on ImageNet.
For image classification use cases, see this page for detailed examples.
For transfer learning use cases, make sure to read the guide to transfer learning & fine-tuning.
Note: each Keras Application expects a specific kind of input preprocessing. For InceptionResNetV2, call
tf.keras.applications.inception_resnet_v2.preprocess_input
on your inputs before passing them to the model.inception_resnet_v2.preprocess_input
will scale input pixels between -1 and 1.Args
include_top
- whether to include the fully-connected layer at the top of the network.
weights
- one of
None
(random initialization), 'imagenet' (pre-training on ImageNet), or the path to the weights file to be loaded. input_tensor
- optional Keras tensor (i.e. output of
layers.Input()
) to use as image input for the model. input_shape
- optional shape tuple, only to be specified
if
include_top
isFalse
(otherwise the input shape has to be(299, 299, 3)
(with'channels_last'
data format) or(3, 299, 299)
(with'channels_first'
data format). It should have exactly 3 inputs channels, and width and height should be no smaller than 75. E.g.(150, 150, 3)
would be one valid value. pooling
- Optional pooling mode for feature extraction
when
include_top
isFalse
. -None
means that the output of the model will be the 4D tensor output of the last convolutional block. -'avg'
means that global average pooling will be applied to the output of the last convolutional block, and thus the output of the model will be a 2D tensor. -'max'
means that global max pooling will be applied. classes
- optional number of classes to classify images
into, only to be specified if
include_top
isTrue
, and if noweights
argument is specified. classifier_activation
- A
str
or callable. The activation function to use on the "top" layer. Ignored unlessinclude_top=True
. Setclassifier_activation=None
to return the logits of the "top" layer. When loading pretrained weights,classifier_activation
can only beNone
or"softmax"
. **kwargs
- For backwards compatibility only.
Returns
A
keras.Model
instance.Expand source code
@keras_export('keras.applications.inception_resnet_v2.InceptionResNetV2', 'keras.applications.InceptionResNetV2') def InceptionResNetV2(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000, classifier_activation='softmax', **kwargs): """Instantiates the Inception-ResNet v2 architecture. Reference: - [Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning](https://arxiv.org/abs/1602.07261) (AAAI 2017) This function returns a Keras image classification model, optionally loaded with weights pre-trained on ImageNet. For image classification use cases, see [this page for detailed examples]( https://keras.io/api/applications/#usage-examples-for-image-classification-models). For transfer learning use cases, make sure to read the [guide to transfer learning & fine-tuning]( https://keras.io/guides/transfer_learning/). Note: each Keras Application expects a specific kind of input preprocessing. For InceptionResNetV2, call `tf.keras.applications.inception_resnet_v2.preprocess_input` on your inputs before passing them to the model. `inception_resnet_v2.preprocess_input` will scale input pixels between -1 and 1. Args: include_top: whether to include the fully-connected layer at the top of the network. weights: one of `None` (random initialization), 'imagenet' (pre-training on ImageNet), or the path to the weights file to be loaded. input_tensor: optional Keras tensor (i.e. output of `layers.Input()`) to use as image input for the model. input_shape: optional shape tuple, only to be specified if `include_top` is `False` (otherwise the input shape has to be `(299, 299, 3)` (with `'channels_last'` data format) or `(3, 299, 299)` (with `'channels_first'` data format). It should have exactly 3 inputs channels, and width and height should be no smaller than 75. E.g. `(150, 150, 3)` would be one valid value. pooling: Optional pooling mode for feature extraction when `include_top` is `False`. - `None` means that the output of the model will be the 4D tensor output of the last convolutional block. - `'avg'` means that global average pooling will be applied to the output of the last convolutional block, and thus the output of the model will be a 2D tensor. - `'max'` means that global max pooling will be applied. classes: optional number of classes to classify images into, only to be specified if `include_top` is `True`, and if no `weights` argument is specified. classifier_activation: A `str` or callable. The activation function to use on the "top" layer. Ignored unless `include_top=True`. Set `classifier_activation=None` to return the logits of the "top" layer. When loading pretrained weights, `classifier_activation` can only be `None` or `"softmax"`. **kwargs: For backwards compatibility only. Returns: A `keras.Model` instance. """ global layers if 'layers' in kwargs: layers = kwargs.pop('layers') else: layers = VersionAwareLayers() if kwargs: raise ValueError('Unknown argument(s): %s' % (kwargs,)) if not (weights in {'imagenet', None} or tf.io.gfile.exists(weights)): raise ValueError('The `weights` argument should be either ' '`None` (random initialization), `imagenet` ' '(pre-training on ImageNet), ' 'or the path to the weights file to be loaded.') if weights == 'imagenet' and include_top and classes != 1000: raise ValueError('If using `weights` as `"imagenet"` with `include_top`' ' as true, `classes` should be 1000') # Determine proper input shape input_shape = imagenet_utils.obtain_input_shape( input_shape, default_size=299, min_size=75, data_format=backend.image_data_format(), require_flatten=include_top, weights=weights) if input_tensor is None: img_input = layers.Input(shape=input_shape) else: if not backend.is_keras_tensor(input_tensor): img_input = layers.Input(tensor=input_tensor, shape=input_shape) else: img_input = input_tensor # Stem block: 35 x 35 x 192 x = conv2d_bn(img_input, 32, 3, strides=2, padding='valid') x = conv2d_bn(x, 32, 3, padding='valid') x = conv2d_bn(x, 64, 3) x = layers.MaxPooling2D(3, strides=2)(x) x = conv2d_bn(x, 80, 1, padding='valid') x = conv2d_bn(x, 192, 3, padding='valid') x = layers.MaxPooling2D(3, strides=2)(x) # Mixed 5b (Inception-A block): 35 x 35 x 320 branch_0 = conv2d_bn(x, 96, 1) branch_1 = conv2d_bn(x, 48, 1) branch_1 = conv2d_bn(branch_1, 64, 5) branch_2 = conv2d_bn(x, 64, 1) branch_2 = conv2d_bn(branch_2, 96, 3) branch_2 = conv2d_bn(branch_2, 96, 3) branch_pool = layers.AveragePooling2D(3, strides=1, padding='same')(x) branch_pool = conv2d_bn(branch_pool, 64, 1) branches = [branch_0, branch_1, branch_2, branch_pool] channel_axis = 1 if backend.image_data_format() == 'channels_first' else 3 x = layers.Concatenate(axis=channel_axis, name='mixed_5b')(branches) # 10x block35 (Inception-ResNet-A block): 35 x 35 x 320 for block_idx in range(1, 11): x = inception_resnet_block( x, scale=0.17, block_type='block35', block_idx=block_idx) # Mixed 6a (Reduction-A block): 17 x 17 x 1088 branch_0 = conv2d_bn(x, 384, 3, strides=2, padding='valid') branch_1 = conv2d_bn(x, 256, 1) branch_1 = conv2d_bn(branch_1, 256, 3) branch_1 = conv2d_bn(branch_1, 384, 3, strides=2, padding='valid') branch_pool = layers.MaxPooling2D(3, strides=2, padding='valid')(x) branches = [branch_0, branch_1, branch_pool] x = layers.Concatenate(axis=channel_axis, name='mixed_6a')(branches) # 20x block17 (Inception-ResNet-B block): 17 x 17 x 1088 for block_idx in range(1, 21): x = inception_resnet_block( x, scale=0.1, block_type='block17', block_idx=block_idx) # Mixed 7a (Reduction-B block): 8 x 8 x 2080 branch_0 = conv2d_bn(x, 256, 1) branch_0 = conv2d_bn(branch_0, 384, 3, strides=2, padding='valid') branch_1 = conv2d_bn(x, 256, 1) branch_1 = conv2d_bn(branch_1, 288, 3, strides=2, padding='valid') branch_2 = conv2d_bn(x, 256, 1) branch_2 = conv2d_bn(branch_2, 288, 3) branch_2 = conv2d_bn(branch_2, 320, 3, strides=2, padding='valid') branch_pool = layers.MaxPooling2D(3, strides=2, padding='valid')(x) branches = [branch_0, branch_1, branch_2, branch_pool] x = layers.Concatenate(axis=channel_axis, name='mixed_7a')(branches) # 10x block8 (Inception-ResNet-C block): 8 x 8 x 2080 for block_idx in range(1, 10): x = inception_resnet_block( x, scale=0.2, block_type='block8', block_idx=block_idx) x = inception_resnet_block( x, scale=1., activation=None, block_type='block8', block_idx=10) # Final convolution block: 8 x 8 x 1536 x = conv2d_bn(x, 1536, 1, name='conv_7b') if include_top: # Classification block x = layers.GlobalAveragePooling2D(name='avg_pool')(x) imagenet_utils.validate_activation(classifier_activation, weights) x = layers.Dense(classes, activation=classifier_activation, name='predictions')(x) else: if pooling == 'avg': x = layers.GlobalAveragePooling2D()(x) elif pooling == 'max': x = layers.GlobalMaxPooling2D()(x) # Ensure that the model takes into account # any potential predecessors of `input_tensor`. if input_tensor is not None: inputs = layer_utils.get_source_inputs(input_tensor) else: inputs = img_input # Create model. model = training.Model(inputs, x, name='inception_resnet_v2') # Load weights. if weights == 'imagenet': if include_top: fname = 'inception_resnet_v2_weights_tf_dim_ordering_tf_kernels.h5' weights_path = data_utils.get_file( fname, BASE_WEIGHT_URL + fname, cache_subdir='models', file_hash='e693bd0210a403b3192acc6073ad2e96') else: fname = ('inception_resnet_v2_weights_' 'tf_dim_ordering_tf_kernels_notop.h5') weights_path = data_utils.get_file( fname, BASE_WEIGHT_URL + fname, cache_subdir='models', file_hash='d19885ff4a710c122648d3b5c3b684e4') model.load_weights(weights_path) elif weights is not None: model.load_weights(weights) return model
def conv2d_bn(x, filters, kernel_size, strides=1, padding='same', activation='relu', use_bias=False, name=None)
-
Utility function to apply conv + BN.
Args
x
- input tensor.
filters
- filters in
Conv2D
. kernel_size
- kernel size as in
Conv2D
. strides
- strides in
Conv2D
. padding
- padding mode in
Conv2D
. activation
- activation in
Conv2D
. use_bias
- whether to use a bias in
Conv2D
. name
- name of the ops; will become
name + '_ac'
for the activation andname + '_bn'
for the batch norm layer.
Returns
Output tensor after applying
Conv2D
andBatchNormalization
.Expand source code
def conv2d_bn(x, filters, kernel_size, strides=1, padding='same', activation='relu', use_bias=False, name=None): """Utility function to apply conv + BN. Args: x: input tensor. filters: filters in `Conv2D`. kernel_size: kernel size as in `Conv2D`. strides: strides in `Conv2D`. padding: padding mode in `Conv2D`. activation: activation in `Conv2D`. use_bias: whether to use a bias in `Conv2D`. name: name of the ops; will become `name + '_ac'` for the activation and `name + '_bn'` for the batch norm layer. Returns: Output tensor after applying `Conv2D` and `BatchNormalization`. """ x = layers.Conv2D( filters, kernel_size, strides=strides, padding=padding, use_bias=use_bias, name=name)( x) if not use_bias: bn_axis = 1 if backend.image_data_format() == 'channels_first' else 3 bn_name = None if name is None else name + '_bn' x = layers.BatchNormalization(axis=bn_axis, scale=False, name=bn_name)(x) if activation is not None: ac_name = None if name is None else name + '_ac' x = layers.Activation(activation, name=ac_name)(x) return x
def decode_predictions(preds, top=5)
-
Decodes the prediction of an ImageNet model.
Args
preds
- Numpy array encoding a batch of predictions.
top
- Integer, how many top-guesses to return. Defaults to 5.
Returns
A list of lists of top class prediction tuples
(class_name, class_description, score)
. One list of tuples per sample in batch input.Raises
ValueError
- In case of invalid shape of the
pred
array (must be 2D).
Expand source code
@keras_export('keras.applications.inception_resnet_v2.decode_predictions') def decode_predictions(preds, top=5): return imagenet_utils.decode_predictions(preds, top=top)
def inception_resnet_block(x, scale, block_type, block_idx, activation='relu')
-
Adds an Inception-ResNet block.
This function builds 3 types of Inception-ResNet blocks mentioned in the paper, controlled by the
block_type
argument (which is the block name used in the official TF-slim implementation): - Inception-ResNet-A:block_type='block35'
- Inception-ResNet-B:block_type='block17'
- Inception-ResNet-C:block_type='block8'
Args
x
- input tensor.
scale
- scaling factor to scale the residuals (i.e., the output of passing
x
through an inception module) before adding them to the shortcut branch. Letr
be the output from the residual branch, the output of this block will bex + scale * r
. block_type
'block35'
,'block17'
or'block8'
, determines the network structure in the residual branch.block_idx
- an
int
used for generating layer names. The Inception-ResNet blocks are repeated many times in this network. We useblock_idx
to identify each of the repetitions. For example, the first Inception-ResNet-A block will haveblock_type='block35', block_idx=0
, and the layer names will have a common prefix'block35_0'
. activation
- activation function to use at the end of the block (see
activations). When
activation=None
, no activation is applied (i.e., "linear" activation:a(x) = x
).
Returns
Output tensor for the block.
Raises
ValueError
- if
block_type
is not one of'block35'
,'block17'
or'block8'
.
Expand source code
def inception_resnet_block(x, scale, block_type, block_idx, activation='relu'): """Adds an Inception-ResNet block. This function builds 3 types of Inception-ResNet blocks mentioned in the paper, controlled by the `block_type` argument (which is the block name used in the official TF-slim implementation): - Inception-ResNet-A: `block_type='block35'` - Inception-ResNet-B: `block_type='block17'` - Inception-ResNet-C: `block_type='block8'` Args: x: input tensor. scale: scaling factor to scale the residuals (i.e., the output of passing `x` through an inception module) before adding them to the shortcut branch. Let `r` be the output from the residual branch, the output of this block will be `x + scale * r`. block_type: `'block35'`, `'block17'` or `'block8'`, determines the network structure in the residual branch. block_idx: an `int` used for generating layer names. The Inception-ResNet blocks are repeated many times in this network. We use `block_idx` to identify each of the repetitions. For example, the first Inception-ResNet-A block will have `block_type='block35', block_idx=0`, and the layer names will have a common prefix `'block35_0'`. activation: activation function to use at the end of the block (see [activations](../activations.md)). When `activation=None`, no activation is applied (i.e., "linear" activation: `a(x) = x`). Returns: Output tensor for the block. Raises: ValueError: if `block_type` is not one of `'block35'`, `'block17'` or `'block8'`. """ if block_type == 'block35': branch_0 = conv2d_bn(x, 32, 1) branch_1 = conv2d_bn(x, 32, 1) branch_1 = conv2d_bn(branch_1, 32, 3) branch_2 = conv2d_bn(x, 32, 1) branch_2 = conv2d_bn(branch_2, 48, 3) branch_2 = conv2d_bn(branch_2, 64, 3) branches = [branch_0, branch_1, branch_2] elif block_type == 'block17': branch_0 = conv2d_bn(x, 192, 1) branch_1 = conv2d_bn(x, 128, 1) branch_1 = conv2d_bn(branch_1, 160, [1, 7]) branch_1 = conv2d_bn(branch_1, 192, [7, 1]) branches = [branch_0, branch_1] elif block_type == 'block8': branch_0 = conv2d_bn(x, 192, 1) branch_1 = conv2d_bn(x, 192, 1) branch_1 = conv2d_bn(branch_1, 224, [1, 3]) branch_1 = conv2d_bn(branch_1, 256, [3, 1]) branches = [branch_0, branch_1] else: raise ValueError('Unknown Inception-ResNet block type. ' 'Expects "block35", "block17" or "block8", ' 'but got: ' + str(block_type)) block_name = block_type + '_' + str(block_idx) channel_axis = 1 if backend.image_data_format() == 'channels_first' else 3 mixed = layers.Concatenate( axis=channel_axis, name=block_name + '_mixed')( branches) up = conv2d_bn( mixed, backend.int_shape(x)[channel_axis], 1, activation=None, use_bias=True, name=block_name + '_conv') x = layers.Lambda( lambda inputs, scale: inputs[0] + inputs[1] * scale, output_shape=backend.int_shape(x)[1:], arguments={'scale': scale}, name=block_name)([x, up]) if activation is not None: x = layers.Activation(activation, name=block_name + '_ac')(x) return x
def preprocess_input(x, data_format=None)
-
Preprocesses a tensor or Numpy array encoding a batch of images.
Usage example with
applications.MobileNet
:i = tf.keras.layers.Input([None, None, 3], dtype = tf.uint8) x = tf.cast(i, tf.float32) x = tf.keras.applications.mobilenet.preprocess_input(x) core = tf.keras.applications.MobileNet() x = core(x) model = tf.keras.Model(inputs=[i], outputs=[x]) image = tf.image.decode_png(tf.io.read_file('file.png')) result = model(image)
Args
x
- A floating point
numpy.array
or atf.Tensor
, 3D or 4D with 3 color channels, with values in the range [0, 255]. The preprocessed data are written over the input data if the data types are compatible. To avoid this behaviour,numpy.copy(x)
can be used. data_format
- Optional data format of the image tensor/array. Defaults to
None, in which case the global setting
tf.keras.backend.image_data_format()
is used (unless you changed it, it defaults to "channels_last").
Returns
Preprocessed
numpy.array
or atf.Tensor
with typefloat32
.The inputs pixel values are scaled between -1 and 1, sample-wise.
Raises
ValueError
- In case of unknown
data_format
argument.
Expand source code
@keras_export('keras.applications.inception_resnet_v2.preprocess_input') def preprocess_input(x, data_format=None): return imagenet_utils.preprocess_input(x, data_format=data_format, mode='tf')