from
__future__
import
absolute_import
from
__future__
import
division
from
__future__
import
print_function
import
numpy
as
np
import
warnings
from
..engine
import
Layer
,
InputSpec
from
..
import
backend
as
K
from
..utils
import
conv_utils
from
..utils.generic_utils
import
to_list
from
..
import
regularizers
from
..
import
constraints
from
..
import
activations
from
..
import
initializers
class
MaxoutDense
(
Layer
):
"""A dense maxout layer.
A `MaxoutDense` layer takes the element-wise maximum of
`nb_feature` `Dense(input_dim, output_dim)` linear layers.
This allows the layer to learn a convex,
piecewise linear activation function over the inputs.
Note that this is a *linear* layer;
if you wish to apply activation function
(you shouldn't need to --they are universal function approximators),
an `Activation` layer must be added after.
# Arguments
output_dim: int > 0.
nb_feature: number of Dense layers to use internally.
init: name of initialization function for the weights of the layer
(see [initializations](../initializations.md)),
or alternatively, Theano function to use for weights
initialization. This parameter is only relevant
if you don't pass a `weights` argument.
weights: list of Numpy arrays to set as initial weights.
The list should have 2 elements, of shape `(input_dim, output_dim)`
and (output_dim,) for weights and biases respectively.
W_regularizer: instance of [WeightRegularizer](../regularizers.md)
(eg. L1 or L2 regularization), applied to the main weights matrix.
b_regularizer: instance of [WeightRegularizer](../regularizers.md),
applied to the bias.
activity_regularizer: instance of [ActivityRegularizer](../regularizers.md),
applied to the network output.
W_constraint: instance of the [constraints](../constraints.md) module
(eg. maxnorm, nonneg), applied to the main weights matrix.
b_constraint: instance of the [constraints](../constraints.md) module,
applied to the bias.
bias: whether to include a bias
(i.e. make the layer affine rather than linear).
input_dim: dimensionality of the input (integer). This argument
(or alternatively, the keyword argument `input_shape`)
is required when using this layer as the first layer in a model.
# Input shape
2D tensor with shape: `(nb_samples, input_dim)`.
# Output shape
2D tensor with shape: `(nb_samples, output_dim)`.
# References
- [Maxout Networks](http://arxiv.org/abs/1302.4389)
"""
def
__init__
(
self
,
output_dim
,
nb_feature
=
4
,
init
=
'glorot_uniform'
,
weights
=
None
,
W_regularizer
=
None
,
b_regularizer
=
None
,
activity_regularizer
=
None
,
W_constraint
=
None
,
b_constraint
=
None
,
bias
=
True
,
input_dim
=
None
,
**
kwargs
):
warnings
.
warn
(
'The `MaxoutDense` layer is deprecated '
'and will be removed after 06/2017.'
)
self
.
output_dim
=
output_dim
self
.
nb_feature
=
nb_feature
self
.
init
=
initializers
.
get
(
init
)
self
.
W_regularizer
=
regularizers
.
get
(
W_regularizer
)
self
.
b_regularizer
=
regularizers
.
get
(
b_regularizer
)
self
.
activity_regularizer
=
regularizers
.
get
(
activity_regularizer
)
self
.
W_constraint
=
constraints
.
get
(
W_constraint
)
self
.
b_constraint
=
constraints
.
get
(
b_constraint
)
self
.
bias
=
bias
self
.
initial_weights
=
weights
self
.
input_spec
=
InputSpec
(
ndim
=
2
)
self
.
input_dim
=
input_dim
if
self
.
input_dim
:
kwargs
[
'input_shape'
]
=
(
self
.
input_dim
,)
super
(
MaxoutDense
,
self
)
.
__init__
(
**
kwargs
)
def
build
(
self
,
input_shape
):
input_dim
=
input_shape
[
1
]
self
.
input_spec
=
InputSpec
(
dtype
=
K
.
floatx
(),
shape
=
(
None
,
input_dim
))
self
.
W
=
self
.
add_weight
((
self
.
nb_feature
,
input_dim
,
self
.
output_dim
),
initializer
=
self
.
init
,
name
=
'W'
,
regularizer
=
self
.
W_regularizer
,
constraint
=
self
.
W_constraint
)
if
self
.
bias
:
self
.
b
=
self
.
add_weight
((
self
.
nb_feature
,
self
.
output_dim
,),
initializer
=
'zero'
,
name
=
'b'
,
regularizer
=
self
.
b_regularizer
,
constraint
=
self
.
b_constraint
)
else
:
self
.
b
=
None
if
self
.
initial_weights
is
not
None
:
self
.
set_weights
(
self
.
initial_weights
)
del
self
.
initial_weights
self
.
built
=
True
def
compute_output_shape
(
self
,
input_shape
):
assert
input_shape
and
len
(
input_shape
)
==
2
return
(
input_shape
[
0
],
self
.
output_dim
)
def
call
(
self
,
x
):
# no activation, this layer is only linear.
output
=
K
.
dot
(
x
,
self
.
W
)
if
self
.
bias
:
output
+=
self
.
b
output
=
K
.
max
(
output
,
axis
=
1
)
return
output
def
get_config
(
self
):
config
=
{
'output_dim'
:
self
.
output_dim
,
'init'
:
initializers
.
serialize
(
self
.
init
),
'nb_feature'
:
self
.
nb_feature
,
'W_regularizer'
:
regularizers
.
serialize
(
self
.
W_regularizer
),
'b_regularizer'
:
regularizers
.
serialize
(
self
.
b_regularizer
),
'activity_regularizer'
:
regularizers
.
serialize
(
self
.
activity_regularizer
),
'W_constraint'
:
constraints
.
serialize
(
self
.
W_constraint
),
'b_constraint'
:
constraints
.
serialize
(
self
.
b_constraint
),
'bias'
:
self
.
bias
,
'input_dim'
:
self
.
input_dim
}
base_config
=
super
(
MaxoutDense
,
self
)
.
get_config
()
return
dict
(
list
(
base_config
.
items
())
+
list
(
config
.
items
()))
class
Highway
(
Layer
):
"""Densely connected highway network.
Highway layers are a natural extension of LSTMs to feedforward networks.
# Arguments
init: name of initialization function for the weights of the layer
(see [initializations](../initializations.md)),
or alternatively, Theano function to use for weights
initialization. This parameter is only relevant
if you don't pass a `weights` argument.
activation: name of activation function to use
(see [activations](../activations.md)),
or alternatively, elementwise Theano function.
If you don't specify anything, no activation is applied
(ie. "linear" activation: a(x) = x).
weights: list of Numpy arrays to set as initial weights.
The list should have 2 elements, of shape `(input_dim, output_dim)`
and (output_dim,) for weights and biases respectively.
W_regularizer: instance of [WeightRegularizer](../regularizers.md)
(eg. L1 or L2 regularization), applied to the main weights matrix.
b_regularizer: instance of [WeightRegularizer](../regularizers.md),
applied to the bias.
activity_regularizer: instance of [ActivityRegularizer](../regularizers.md),
applied to the network output.
W_constraint: instance of the [constraints](../constraints.md) module
(eg. maxnorm, nonneg), applied to the main weights matrix.
b_constraint: instance of the [constraints](../constraints.md) module,
applied to the bias.
bias: whether to include a bias
(i.e. make the layer affine rather than linear).
input_dim: dimensionality of the input (integer). This argument
(or alternatively, the keyword argument `input_shape`)
is required when using this layer as the first layer in a model.
# Input shape
2D tensor with shape: `(nb_samples, input_dim)`.
# Output shape
2D tensor with shape: `(nb_samples, input_dim)`.
# References
- [Highway Networks](http://arxiv.org/abs/1505.00387v2)
"""
def
__init__
(
self
,
init
=
'glorot_uniform'
,
activation
=
None
,
weights
=
None
,
W_regularizer
=
None
,
b_regularizer
=
None
,
activity_regularizer
=
None
,
W_constraint
=
None
,
b_constraint
=
None
,
bias
=
True
,
input_dim
=
None
,
**
kwargs
):
warnings
.
warn
(
'The `Highway` layer is deprecated '
'and will be removed after 06/2017.'
)
if
'transform_bias'
in
kwargs
:
kwargs
.
pop
(
'transform_bias'
)
warnings
.
warn
(
'`transform_bias` argument is deprecated and '
'has been removed.'
)
self
.
init
=
initializers
.
get
(
init
)
self
.
activation
=
activations
.
get
(
activation
)
self
.
W_regularizer
=
regularizers
.
get
(
W_regularizer
)
self
.
b_regularizer
=
regularizers
.
get
(
b_regularizer
)
self
.
activity_regularizer
=
regularizers
.
get
(
activity_regularizer
)
self
.
W_constraint
=
constraints
.
get
(
W_constraint
)
self
.
b_constraint
=
constraints
.
get
(
b_constraint
)
self
.
bias
=
bias
self
.
initial_weights
=
weights
self
.
input_spec
=
InputSpec
(
ndim
=
2
)
self
.
input_dim
=
input_dim
if
self
.
input_dim
:
kwargs
[
'input_shape'
]
=
(
self
.
input_dim
,)
super
(
Highway
,
self
)
.
__init__
(
**
kwargs
)
def
build
(
self
,
input_shape
):
input_dim
=
input_shape
[
1
]
self
.
input_spec
=
InputSpec
(
dtype
=
K
.
floatx
(),
shape
=
(
None
,
input_dim
))
self
.
W
=
self
.
add_weight
((
input_dim
,
input_dim
),
initializer
=
self
.
init
,
name
=
'W'
,
regularizer
=
self
.
W_regularizer
,
constraint
=
self
.
W_constraint
)
self
.
W_carry
=
self
.
add_weight
((
input_dim
,
input_dim
),
initializer
=
self
.
init
,
name
=
'W_carry'
)
if
self
.
bias
:
self
.
b
=
self
.
add_weight
((
input_dim
,),
initializer
=
'zero'
,
name
=
'b'
,
regularizer
=
self
.
b_regularizer
,
constraint
=
self
.
b_constraint
)
self
.
b_carry
=
self
.
add_weight
((
input_dim
,),
initializer
=
'one'
,
name
=
'b_carry'
)
else
:
self
.
b_carry
=
None
if
self
.
initial_weights
is
not
None
:
self
.
set_weights
(
self
.
initial_weights
)
del
self
.
initial_weights
self
.
built
=
True
def
call
(
self
,
x
):
y
=
K
.
dot
(
x
,
self
.
W_carry
)
if
self
.
bias
:
y
+=
self
.
b_carry
transform_weight
=
activations
.
sigmoid
(
y
)
y
=
K
.
dot
(
x
,
self
.
W
)
if
self
.
bias
:
y
+=
self
.
b
act
=
self
.
activation
(
y
)
act
*=
transform_weight
output
=
act
+
(
1
-
transform_weight
)
*
x
return
output
def
get_config
(
self
):
config
=
{
'init'
:
initializers
.
serialize
(
self
.
init
),
'activation'
:
activations
.
serialize
(
self
.
activation
),
'W_regularizer'
:
regularizers
.
serialize
(
self
.
W_regularizer
),
'b_regularizer'
:
regularizers
.
serialize
(
self
.
b_regularizer
),
'activity_regularizer'
:
regularizers
.
serialize
(
self
.
activity_regularizer
),
'W_constraint'
:
constraints
.
serialize
(
self
.
W_constraint
),
'b_constraint'
:
constraints
.
serialize
(
self
.
b_constraint
),
'bias'
:
self
.
bias
,
'input_dim'
:
self
.
input_dim
}
base_config
=
super
(
Highway
,
self
)
.
get_config
()
return
dict
(
list
(
base_config
.
items
())
+
list
(
config
.
items
()))
def
AtrousConvolution1D
(
*
args
,
**
kwargs
):
from
..layers
import
Conv1D
if
'atrous_rate'
in
kwargs
:
rate
=
kwargs
.
pop
(
'atrous_rate'
)
else
:
rate
=
1
kwargs
[
'dilation_rate'
]
=
rate
warnings
.
warn
(
'The `AtrousConvolution1D` layer '
' has been deprecated. Use instead '
'the `Conv1D` layer with the `dilation_rate` '
'argument.'
)
return
Conv1D
(
*
args
,
**
kwargs
)
def
AtrousConvolution2D
(
*
args
,
**
kwargs
):
from
..layers
import
Conv2D
if
'atrous_rate'
in
kwargs
:
rate
=
kwargs
.
pop
(
'atrous_rate'
)
else
:
rate
=
1
kwargs
[
'dilation_rate'
]
=
rate
warnings
.
warn
(
'The `AtrousConvolution2D` layer '
' has been deprecated. Use instead '
'the `Conv2D` layer with the `dilation_rate` '
'argument.'
)
return
Conv2D
(
*
args
,
**
kwargs
)
class
Recurrent
(
Layer
):
"""Abstract base class for recurrent layers.
Do not use in a model -- it's not a valid layer!
Use its children classes `LSTM`, `GRU` and `SimpleRNN` instead.
All recurrent layers (`LSTM`, `GRU`, `SimpleRNN`) also
follow the specifications of this class and accept
the keyword arguments listed below.
# Example
```python
# as the first layer in a Sequential model
model = Sequential()
model.add(LSTM(32, input_shape=(10, 64)))
# now model.output_shape == (None, 32)
# note: `None` is the batch dimension.
# for subsequent layers, no need to specify the input size:
model.add(LSTM(16))
# to stack recurrent layers, you must use return_sequences=True
# on any recurrent layer that feeds into another recurrent layer.
# note that you only need to specify the input size on the first layer.
model = Sequential()
model.add(LSTM(64, input_dim=64, input_length=10, return_sequences=True))
model.add(LSTM(32, return_sequences=True))
model.add(LSTM(10))
```
# Arguments
weights: list of Numpy arrays to set as initial weights.
The list should have 3 elements, of shapes:
`[(input_dim, output_dim), (output_dim, output_dim), (output_dim,)]`.
return_sequences: Boolean. Whether to return the last output
in the output sequence, or the full sequence.
return_state: Boolean. Whether to return the last state
in addition to the output.
go_backwards: Boolean (default False).
If True, process the input sequence backwards and return the
reversed sequence.
stateful: Boolean (default False). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
unroll: Boolean (default False).
If True, the network will be unrolled,
else a symbolic loop will be used.
Unrolling can speed-up a RNN,
although it tends to be more memory-intensive.
Unrolling is only suitable for short sequences.
implementation: one of {0, 1, or 2}.
If set to 0, the RNN will use
an implementation that uses fewer, larger matrix products,
thus running faster on CPU but consuming more memory.
If set to 1, the RNN will use more matrix products,
but smaller ones, thus running slower
(may actually be faster on GPU) while consuming less memory.
If set to 2 (LSTM/GRU only),
the RNN will combine the input gate,
the forget gate and the output gate into a single matrix,
enabling more time-efficient parallelization on the GPU.
Note: RNN dropout must be shared for all gates,
resulting in a slightly reduced regularization.
input_dim: dimensionality of the input (integer).
This argument (or alternatively, the keyword argument `input_shape`)
is required when using this layer as the first layer in a model.
input_length: Length of input sequences, to be specified
when it is constant.
This argument is required if you are going to connect
`Flatten` then `Dense` layers upstream
(without it, the shape of the dense outputs cannot be computed).
Note that if the recurrent layer is not the first layer
in your model, you would need to specify the input length
at the level of the first layer
(e.g. via the `input_shape` argument)
# Input shapes
3D tensor with shape `(batch_size, timesteps, input_dim)`,
(Optional) 2D tensors with shape `(batch_size, output_dim)`.
# Output shape
- if `return_state`: a list of tensors. The first tensor is
the output. The remaining tensors are the last states,
each with shape `(batch_size, units)`.
- if `return_sequences`: 3D tensor with shape
`(batch_size, timesteps, units)`.
- else, 2D tensor with shape `(batch_size, units)`.
# Masking
This layer supports masking for input data with a variable number
of timesteps. To introduce masks to your data,
use an [Embedding](embeddings.md) layer with the `mask_zero` parameter
set to `True`.
# Note on using statefulness in RNNs
You can set RNN layers to be 'stateful', which means that the states
computed for the samples in one batch will be reused as initial states
for the samples in the next batch. This assumes a one-to-one mapping
between samples in different successive batches.
To enable statefulness:
- specify `stateful=True` in the layer constructor.
- specify a fixed batch size for your model, by passing
if sequential model:
`batch_input_shape=(...)` to the first layer in your model.
else for functional model with 1 or more Input layers:
`batch_shape=(...)` to all the first layers in your model.
This is the expected shape of your inputs
*including the batch size*.
It should be a tuple of integers, e.g. `(32, 10, 100)`.
- specify `shuffle=False` when calling fit().
To reset the states of your model, call `.reset_states()` on either
a specific layer, or on your entire model.
# Note on specifying the initial state of RNNs
You can specify the initial state of RNN layers symbolically by
calling them with the keyword argument `initial_state`. The value of
`initial_state` should be a tensor or list of tensors representing
the initial state of the RNN layer.
You can specify the initial state of RNN layers numerically by
calling `reset_states` with the keyword argument `states`. The value of
`states` should be a numpy array or list of numpy arrays representing
the initial state of the RNN layer.
"""
def
__init__
(
self
,
return_sequences
=
False
,
return_state
=
False
,
go_backwards
=
False
,
stateful
=
False
,
unroll
=
False
,
implementation
=
0
,
**
kwargs
):
super
(
Recurrent
,
self
)
.
__init__
(
**
kwargs
)
self
.
return_sequences
=
return_sequences
self
.
return_state
=
return_state
self
.
go_backwards
=
go_backwards
self
.
stateful
=
stateful
self
.
unroll
=
unroll
self
.
implementation
=
implementation
self
.
supports_masking
=
True
self
.
input_spec
=
[
InputSpec
(
ndim
=
3
)]
self
.
state_spec
=
None
self
.
dropout
=
0
self
.
recurrent_dropout
=
0
def
compute_output_shape
(
self
,
input_shape
):
if
isinstance
(
input_shape
,
list
):
input_shape
=
input_shape
[
0
]
if
self
.
return_sequences
:
output_shape
=
(
input_shape
[
0
],
input_shape
[
1
],
self
.
units
)
else
:
output_shape
=
(
input_shape
[
0
],
self
.
units
)
if
self
.
return_state
:
state_shape
=
[(
input_shape
[
0
],
self
.
units
)
for
_
in
self
.
states
]
return
[
output_shape
]
+
state_shape
else
:
return
output_shape
def
compute_mask
(
self
,
inputs
,
mask
):
if
isinstance
(
mask
,
list
):
mask
=
mask
[
0
]
output_mask
=
mask
if
self
.
return_sequences
else
None
if
self
.
return_state
:
state_mask
=
[
None
for
_
in
self
.
states
]
return
[
output_mask
]
+
state_mask
else
:
return
output_mask
def
step
(
self
,
inputs
,
states
):
raise
NotImplementedError
def
get_constants
(
self
,
inputs
,
training
=
None
):
return
[]
def
get_initial_state
(
self
,
inputs
):
# build an all-zero tensor of shape (samples, output_dim)
initial_state
=
K
.
zeros_like
(
inputs
)
# (samples, timesteps, input_dim)
initial_state
=
K
.
sum
(
initial_state
,
axis
=
(
1
,
2
))
# (samples,)
initial_state
=
K
.
expand_dims
(
initial_state
)
# (samples, 1)
initial_state
=
K
.
tile
(
initial_state
,
[
1
,
self
.
units
])
# (samples, output_dim)
initial_state
=
[
initial_state
for
_
in
range
(
len
(
self
.
states
))]
return
initial_state
def
preprocess_input
(
self
,
inputs
,
training
=
None
):
return
inputs
def
__call__
(
self
,
inputs
,
initial_state
=
None
,
**
kwargs
):
# If there are multiple inputs, then
# they should be the main input and `initial_state`
# e.g. when loading model from file
if
isinstance
(
inputs
,
(
list
,
tuple
))
and
len
(
inputs
)
>
1
and
initial_state
is
None
:
initial_state
=
inputs
[
1
:]
inputs
=
inputs
[
0
]
# If `initial_state` is specified,
# and if it a Keras tensor,
# then add it to the inputs and temporarily
# modify the input spec to include the state.
if
initial_state
is
None
:
return
super
(
Recurrent
,
self
)
.
__call__
(
inputs
,
**
kwargs
)
if
not
isinstance
(
initial_state
,
(
list
,
tuple
)):
initial_state
=
[
initial_state
]
is_keras_tensor
=
hasattr
(
initial_state
[
0
],
'_keras_history'
)
for
tensor
in
initial_state
:
if
hasattr
(
tensor
,
'_keras_history'
)
!=
is_keras_tensor
:
raise
ValueError
(
'The initial state of an RNN layer cannot be'
' specified with a mix of Keras tensors and'
' non-Keras tensors'
)
if
is_keras_tensor
:
# Compute the full input spec, including state
input_spec
=
self
.
input_spec
state_spec
=
self
.
state_spec
input_spec
=
to_list
(
input_spec
)
state_spec
=
to_list
(
state_spec
)
self
.
input_spec
=
input_spec
+
state_spec
# Compute the full inputs, including state
inputs
=
[
inputs
]
+
list
(
initial_state
)
# Perform the call
output
=
super
(
Recurrent
,
self
)
.
__call__
(
inputs
,
**
kwargs
)
# Restore original input spec
self
.
input_spec
=
input_spec
return
output
else
:
kwargs
[
'initial_state'
]
=
initial_state
return
super
(
Recurrent
,
self
)
.
__call__
(
inputs
,
**
kwargs
)
def
call
(
self
,
inputs
,
mask
=
None
,
training
=
None
,
initial_state
=
None
):
# input shape: `(samples, time (padded with zeros), input_dim)`
# note that the .build() method of subclasses MUST define
# self.input_spec and self.state_spec with complete input shapes.
if
isinstance
(
inputs
,
list
):
initial_state
=
inputs
[
1
:]
inputs
=
inputs
[
0
]
elif
initial_state
is
not
None
:
pass
elif
self
.
stateful
:
initial_state
=
self
.
states
else
:
initial_state
=
self
.
get_initial_state
(
inputs
)
if
isinstance
(
mask
,
list
):
mask
=
mask
[
0
]
if
len
(
initial_state
)
!=
len
(
self
.
states
):
raise
ValueError
(
'Layer has '
+
str
(
len
(
self
.
states
))
+
' states but was passed '
+
str
(
len
(
initial_state
))
+
' initial states.'
)
input_shape
=
K
.
int_shape
(
inputs
)
timesteps
=
input_shape
[
1
]
if
self
.
unroll
and
timesteps
in
[
None
,
1
]:
raise
ValueError
(
'Cannot unroll a RNN if the '
'time dimension is undefined or equal to 1.
\n
'
'- If using a Sequential model, '
'specify the time dimension by passing '
'an `input_shape` or `batch_input_shape` '
'argument to your first layer. If your '
'first layer is an Embedding, you can '
'also use the `input_length` argument.
\n
'
'- If using the functional API, specify '
'the time dimension by passing a `shape` '
'or `batch_shape` argument to your Input layer.'
)
constants
=
self
.
get_constants
(
inputs
,
training
=
None
)
preprocessed_input
=
self
.
preprocess_input
(
inputs
,
training
=
None
)
last_output
,
outputs
,
states
=
K
.
rnn
(
self
.
step
,
preprocessed_input
,
initial_state
,
go_backwards
=
self
.
go_backwards
,
mask
=
mask
,
constants
=
constants
,
unroll
=
self
.
unroll
,
input_length
=
timesteps
)
if
self
.
stateful
:
updates
=
[]
for
i
in
range
(
len
(
states
)):
updates
.
append
((
self
.
states
[
i
],
states
[
i
]))
self
.
add_update
(
updates
,
inputs
)
# Properly set learning phase
if
0
<
self
.
dropout
+
self
.
recurrent_dropout
:
last_output
.
_uses_learning_phase
=
True
outputs
.
_uses_learning_phase
=
True
if
self
.
return_sequences
:
output
=
outputs
else
:
output
=
last_output
if
self
.
return_state
:
if
not
isinstance
(
states
,
(
list
,
tuple
)):
states
=
[
states
]
else
:
states
=
list
(
states
)
return
[
output
]
+
states
else
:
return
output
def
reset_states
(
self
,
states
=
None
):
if
not
self
.
stateful
:
raise
AttributeError
(
'Layer must be stateful.'
)
batch_size
=
self
.
input_spec
[
0
]
.
shape
[
0
]
if
not
batch_size
:
raise
ValueError
(
'If a RNN is stateful, it needs to know '
'its batch size. Specify the batch size '
'of your input tensors:
\n
'
'- If using a Sequential model, '
'specify the batch size by passing '
'a `batch_input_shape` '
'argument to your first layer.
\n
'
'- If using the functional API, specify '
'the time dimension by passing a '
'`batch_shape` argument to your Input layer.'
)
# initialize state if None
if
self
.
states
[
0
]
is
None
:
self
.
states
=
[
K
.
zeros
((
batch_size
,
self
.
units
))
for
_
in
self
.
states
]
elif
states
is
None
:
for
state
in
self
.
states
:
K
.
set_value
(
state
,
np
.
zeros
((
batch_size
,
self
.
units
)))
else
:
if
not
isinstance
(
states
,
(
list
,
tuple
)):
states
=
[
states
]
if
len
(
states
)
!=
len
(
self
.
states
):
raise
ValueError
(
'Layer '
+
self
.
name
+
' expects '
+
str
(
len
(
self
.
states
))
+
' states, '
'but it received '
+
str
(
len
(
states
))
+
' state values. Input received: '
+
str
(
states
))
for
index
,
(
value
,
state
)
in
enumerate
(
zip
(
states
,
self
.
states
)):
if
value
.
shape
!=
(
batch_size
,
self
.
units
):
raise
ValueError
(
'State '
+
str
(
index
)
+
' is incompatible with layer '
+
self
.
name
+
': expected shape='
+
str
((
batch_size
,
self
.
units
))
+
', found shape='
+
str
(
value
.
shape
))
K
.
set_value
(
state
,
value
)
def
get_config
(
self
):
config
=
{
'return_sequences'
:
self
.
return_sequences
,
'return_state'
:
self
.
return_state
,
'go_backwards'
:
self
.
go_backwards
,
'stateful'
:
self
.
stateful
,
'unroll'
:
self
.
unroll
,
'implementation'
:
self
.
implementation
}
base_config
=
super
(
Recurrent
,
self
)
.
get_config
()
return
dict
(
list
(
base_config
.
items
())
+
list
(
config
.
items
()))
class
ConvRecurrent2D
(
Recurrent
):
"""Abstract base class for convolutional recurrent layers.
Do not use in a model -- it's not a functional layer!
# Arguments
filters: Integer, the dimensionality of the output space
(i.e. the number output of filters in the convolution).
kernel_size: An integer or tuple/list of n integers, specifying the
dimensions of the convolution window.
strides: An integer or tuple/list of n integers,
specifying the strides of the convolution.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, time, ..., channels)`
while `channels_first` corresponds to
inputs with shape `(batch, time, channels, ...)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
dilation_rate: An integer or tuple/list of n integers, specifying
the dilation rate to use for dilated convolution.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any `strides` value != 1.
return_sequences: Boolean. Whether to return the last output
in the output sequence, or the full sequence.
go_backwards: Boolean (default False).
If True, process the input sequence backwards.
stateful: Boolean (default False). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
# Input shape
5D tensor with shape `(num_samples, timesteps, channels, rows, cols)`.
# Output shape
- if `return_sequences`: 5D tensor with shape
`(num_samples, timesteps, channels, rows, cols)`.
- else, 4D tensor with shape `(num_samples, channels, rows, cols)`.
# Masking
This layer supports masking for input data with a variable number
of timesteps. To introduce masks to your data,
use an [Embedding](embeddings.md) layer with the `mask_zero` parameter
set to `True`.
**Note:** for the time being, masking is only supported with Theano.
# Note on using statefulness in RNNs
You can set RNN layers to be 'stateful', which means that the states
computed for the samples in one batch will be reused as initial states
for the samples in the next batch.
This assumes a one-to-one mapping between
samples in different successive batches.
To enable statefulness:
- specify `stateful=True` in the layer constructor.
- specify a fixed batch size for your model, by passing
a `batch_input_size=(...)` to the first layer in your model.
This is the expected shape of your inputs *including the batch
size*.
It should be a tuple of integers, e.g. `(32, 10, 100)`.
To reset the states of your model, call `.reset_states()` on either
a specific layer, or on your entire model.
"""
def
__init__
(
self
,
filters
,
kernel_size
,
strides
=
(
1
,
1
),
padding
=
'valid'
,
data_format
=
None
,
dilation_rate
=
(
1
,
1
),
return_sequences
=
False
,
go_backwards
=
False
,
stateful
=
False
,
**
kwargs
):
super
(
ConvRecurrent2D
,
self
)
.
__init__
(
**
kwargs
)
self
.
filters
=
filters
self
.
kernel_size
=
conv_utils
.
normalize_tuple
(
kernel_size
,
2
,
'kernel_size'
)
self
.
strides
=
conv_utils
.
normalize_tuple
(
strides
,
2
,
'strides'
)
self
.
padding
=
conv_utils
.
normalize_padding
(
padding
)
self
.
data_format
=
K
.
normalize_data_format
(
data_format
)
self
.
dilation_rate
=
conv_utils
.
normalize_tuple
(
dilation_rate
,
2
,
'dilation_rate'
)
self
.
return_sequences
=
return_sequences
self
.
go_backwards
=
go_backwards
self
.
stateful
=
stateful
self
.
input_spec
=
[
InputSpec
(
ndim
=
5
)]
self
.
state_spec
=
None
def
compute_output_shape
(
self
,
input_shape
):
if
isinstance
(
input_shape
,
list
):
input_shape
=
input_shape
[
0
]
if
self
.
data_format
==
'channels_first'
:
rows
=
input_shape
[
3
]
cols
=
input_shape
[
4
]
elif
self
.
data_format
==
'channels_last'
:
rows
=
input_shape
[
2
]
cols
=
input_shape
[
3
]
rows
=
conv_utils
.
conv_output_length
(
rows
,
self
.
kernel_size
[
0
],
padding
=
self
.
padding
,
stride
=
self
.
strides
[
0
],
dilation
=
self
.
dilation_rate
[
0
])
cols
=
conv_utils
.
conv_output_length
(
cols
,
self
.
kernel_size
[
1
],
padding
=
self
.
padding
,
stride
=
self
.
strides
[
1
],
dilation
=
self
.
dilation_rate
[
1
])
if
self
.
return_sequences
:
if
self
.
data_format
==
'channels_first'
:
output_shape
=
(
input_shape
[
0
],
input_shape
[
1
],
self
.
filters
,
rows
,
cols
)
elif
self
.
data_format
==
'channels_last'
:
output_shape
=
(
input_shape
[
0
],
input_shape
[
1
],
rows
,
cols
,
self
.
filters
)
else
:
if
self
.
data_format
==
'channels_first'
:
output_shape
=
(
input_shape
[
0
],
self
.
filters
,
rows
,
cols
)
elif
self
.
data_format
==
'channels_last'
:
output_shape
=
(
input_shape
[
0
],
rows
,
cols
,
self
.
filters
)
if
self
.
return_state
:
if
self
.
data_format
==
'channels_first'
:
output_shape
=
[
output_shape
]
+
[(
input_shape
[
0
],
self
.
filters
,
rows
,
cols
)
for
_
in
range
(
2
)]
elif
self
.
data_format
==
'channels_last'
:
output_shape
=
[
output_shape
]
+
[(
input_shape
[
0
],
rows
,
cols
,
self
.
filters
)
for
_
in
range
(
2
)]
return
output_shape
def
get_config
(
self
):
config
=
{
'filters'
:
self
.
filters
,
'kernel_size'
:
self
.
kernel_size
,
'strides'
:
self
.
strides
,
'padding'
:
self
.
padding
,
'data_format'
:
self
.
data_format
,
'dilation_rate'
:
self
.
dilation_rate
,
'return_sequences'
:
self
.
return_sequences
,
'go_backwards'
:
self
.
go_backwards
,
'stateful'
:
self
.
stateful
}
base_config
=
super
(
ConvRecurrent2D
,
self
)
.
get_config
()
return
dict
(
list
(
base_config
.
items
())
+
list
(
config
.
items
()))