from ..core import MPBase, MPEnums
from ..kernel import Kernel
from ..graph import Node, Parameter
import numpy as np
import warnings
[docs]
class ConcatenationKernel(Kernel):
"""
Kernel to concatenate multiple tensors into a single tensor
Parameters
----------
graph : Graph
Graph that the kernel should be added to
inA : Tensor
Input 1 data
inB : Tensor
Input 2 data
outA : Tensor
Output data
axis : int or tuple of ints, default = 0
The axis along which the arrays will be joined. If axis is None, arrays are flattened before use. Default is 0.
See numpy.concatenate for more information
"""
def __init__(self,graph,outA,inA,inB,axis=0):
""" Init """
super().__init__('Concatenation',MPEnums.INIT_FROM_NONE,graph)
self.inputs = [inA,inB]
self.outputs = [outA]
self._axis = axis
def _initialize(self, init_inputs, init_outputs, labels):
"""
This kernel has no internal state that must be initialized
"""
# get the init inputs and outputs
init_inA = init_inputs[0]
init_inB = init_inputs[1]
init_out = init_outputs[0]
for init_in in (init_inA, init_inB):
if init_in is not None and init_in.mp_type != MPEnums.TENSOR:
init_in = init_in.to_tensor()
if init_out is not None and (init_inA is not None and init_inA.shape != ()):
# determine if the axis needs to be adjusted for init
# start by getting the inputs to compare the rank of the init and non-init data
d_inA = self.inputs[0]
d_inB = self.inputs[1]
axis_adjusted = False
if (len(d_inA.shape)+1 == len(init_inA.shape) and
len(d_inB.shape)+1 == len(init_inB.shape) and
self._axis >= 0):
# adjust axis to accomodate stack of input data
self._axis += 1
axis_adjusted = True
# adjust the output shape if it is virtual
if init_out.virtual:
output_sz, _, _ = self._resolve_dims(init_inA, init_inB)
init_out.shape = output_sz
# process the init data
self._process_data([init_inA, init_inB], init_outputs)
# adjust the axis back if it was adjusted
if axis_adjusted:
self._axis -= 1
def _resolve_dims(self, inA, inB):
"""
Determine dimensions of concatenated tensor
Parameters
----------
inA : Tensor
Input 1 data
inB : Tensor
Input 2 data
Returns
-------
output_sz: Tuple
Dimensions of concatenated tensor
noncat_sz_A: np array
Non concatendated size of tensor A
noncat_size_B: np array
Non concatendated size of tensor B
"""
sz_A = inA.shape
sz_B = inB.shape
concat_axis = self._axis
if len(sz_A) == len(sz_B):
noncat_sz_A = [d for i,d in enumerate(sz_A) if i!=concat_axis]
noncat_sz_B = [d for i,d in enumerate(sz_B) if i!=concat_axis]
output_sz = noncat_sz_A[:]
output_sz.insert(concat_axis,sz_A[concat_axis]+sz_B[concat_axis])
elif len(sz_A) == len(sz_B)+1:
# appending B to A
noncat_sz_A = [d for i,d in enumerate(sz_A) if i!=concat_axis]
noncat_sz_B = sz_B
output_sz = noncat_sz_A[:]
output_sz.insert(concat_axis,sz_A[concat_axis]+1)
elif len(sz_A) == len(sz_B)-1:
noncat_sz_B = [d for i,d in enumerate(sz_B) if i!=concat_axis]
noncat_sz_A = sz_A
output_sz = noncat_sz_B[:]
output_sz.insert(concat_axis,sz_B[concat_axis]+1)
else:
output_sz = []
noncat_sz_A = []
noncat_sz_B = []
return tuple(output_sz), noncat_sz_A, noncat_sz_B
def _verify(self):
"""
Verify the inputs and outputs are appropriately sized
"""
# inA, inB, and outA must be a tensor
d_inA, d_inB = self.inputs
d_out = self.outputs[0]
for param in (d_inA, d_inB, d_out):
if param.mp_type != MPEnums.TENSOR:
raise TypeError("ConcatenationKernel requires Tensor inputs and outputs")
# the dimensions along the catcat axis must be equal
output_sz, noncat_sz_A, noncat_sz_B = self._resolve_dims(d_inA,d_inB)
if len(output_sz) == 0:
raise TypeError("ConcatenationKernel could not resolve output dimensions")
# check if the remaining dimensions are the same
if ((len(noncat_sz_A) != len(noncat_sz_B)) or
len(noncat_sz_A) != sum([1 for i,j in
zip(noncat_sz_A,noncat_sz_B) if i==j])):
raise ValueError("ConcatenationKernel requires non-concatenation dimensions to be equal")
if d_out.virtual and len(d_out.shape) == 0:
d_out.shape = output_sz
# ensure the output shape equals the expected output shape
if d_out.shape != output_sz:
raise ValueError("ConcatenationKernel output shape does not match expected output shape")
def _process_data(self, inputs, outputs):
"""
Concatenate tensors into single tensor
Parameters
----------
inputs: list of Tensors
Input data container, list of length 2
outputs: list of Tensors
Output data container, list of length 1
"""
inA_data = inputs[0].data
inB_data = inputs[1].data
concat_axis = self._axis if self._axis != None else 0
if len(inA_data.shape) == len(inB_data.shape)+1:
# add a leading dimension for input B
inB_data = np.expand_dims(inB_data,axis=0)
elif len(inB_data.shape) == len(inA_data.shape)+1:
inA_data = np.expand_dims(inA_data,axis=0)
outputs[0].data = np.concatenate((inA_data,
inB_data),
axis=concat_axis)
[docs]
@classmethod
def add_to_graph(cls,graph,inA,inB,outA,axis=0,init_inputs=None,init_labels=None):
"""
Factory method to create a concatenation kernel and add it to a graph
as a generic node object.
Parameters
----------
graph : Graph
Graph that the kernel should be added to
inA : Tensor
Input 1 data
inB : Tensor
Input 2 data
outA : Tensor
Output data
axis : int or tuple of ints, default = 0
The axis along which the arrays will be joined. If axis is None, arrays are flattened before use. Default is 0.
See numpy.concatenate for more information
Returns
-------
node : Node
The node object that was added to the graph containing the concatenation kernel
"""
# create the kernel object
k = cls(graph,outA,inA,inB,axis)
# create parameter objects for the input and output
params = (Parameter(outA,MPEnums.OUTPUT),
Parameter(inA,MPEnums.INPUT),
Parameter(inB,MPEnums.INPUT))
# add the kernel to a generic node object
node = Node(graph,k,params)
# add the node to the graph
graph.add_node(node)
# if initialization data is provided, add it to the node
if init_inputs is not None:
node.add_initialization_data(init_inputs,init_labels)
return node
[docs]
class EnqueueKernel(Kernel):
"""
Kernel to enqueue a MindPype object into a MindPype circle buffer
Parameters
----------
graph : Graph
Graph that the kernel should be added to
inA : MPBase
Input data to enqueue into circle buffer
queue : CircleBuffer
Circle buffer to have data enqueued to
"""
def __init__(self,graph,inA,queue,enqueue_flag):
""" Init """
super().__init__('Enqueue',MPEnums.INIT_FROM_NONE,graph)
self.inputs = [inA, queue, enqueue_flag]
if enqueue_flag is not None:
self._gated = True
else:
self._gated = False
def _verify(self):
"""
Verify the inputs and outputs are appropriately sized
"""
# first ensure the inputs and outputs are the appropriate type
d_in = self.inputs[0]
d_io = self.inputs[1]
if not isinstance(d_in,MPBase):
raise TypeError("EnqueueKernel requires MPBase input")
if d_io.mp_type != MPEnums.CIRCLE_BUFFER:
raise TypeError("EnqueueKernel requires CircleBuffer output")
# check that the buffer's capacity is at least 1
if d_io.capacity <= 1:
raise ValueError("EnqueueKernel requires CircleBuffer capacity to be at least 1")
# if gated, check that the flag is a scalar
if self._gated:
enqueue_flag = self.inputs[2]
if (enqueue_flag.mp_type != MPEnums.SCALAR or
enqueue_flag.data_type not in (int, bool)):
raise TypeError("EnqueueKernel requires enqueue flag to be a scalar boolean")
# check that the datatypes match
if d_in.mp_type != d_io.get_element(0).mp_type:
raise TypeError("Enqueue kernel requires input type to match element type of circle buffer")
# if the input is a tensor, check that the dimensions match
if d_in.mp_type == MPEnums.TENSOR:
if d_in.shape != d_io.get_element(0).shape:
raise ValueError("Enqueue kernel requires input tensor shape to match element shape of circle buffer")
def _process_data(self, inputs, outputs):
"""
Execute the kernel function using numpy function
Parameters
----------
inputs: list of MPBases
Input data container, list of length 1
"""
# need to make a deep copy of the object to enqueue
if not self._gated or inputs[2].data:
cpy = inputs[0].make_copy()
inputs[1].enqueue(cpy)
[docs]
@classmethod
def add_to_graph(cls,graph,inA,queue,enqueue_flag=None):
"""
Factory method to create a enqueue kernel and add it to a graph as a generic node object.
Parameters
----------
graph : Graph
Graph that the kernel should be added to
inA : Tensor or Scalar or Array or CircleBuffer
Input data to enqueue into circle buffer
queue : CircleBuffer
Circle buffer to have data enqueued to
enqueue_flag : bool
(optional) Scalar boolean used to determine if the inputs to be added to the queue
Returns
-------
node : Node
The node object that was added to the graph containing the enqueue kernel
"""
# create the kernel object
k = cls(graph,inA,queue,enqueue_flag)
# create parameter objects for the input and output
params = (Parameter(inA,MPEnums.INPUT),
Parameter(queue,MPEnums.INOUT))
if enqueue_flag is not None:
params += (Parameter(enqueue_flag, MPEnums.INPUT),)
# add the kernel to a generic node object
node = Node(graph,k,params)
# add the node to the graph
graph.add_node(node)
return node
[docs]
class StackKernel(Kernel):
"""
Kernel to stack multiple tensors into a single tensor
Parameters
----------
graph : Graph
The graph where the RunningAverageKernel object should be added
inA : Array
Container where specified data will be added to
outA : Tensor
Tensor of stacked tensors
axis : int or None, default = None
The axis in the result array along which the input arrays are stacked.
"""
def __init__(self,graph,inA,outA,axis=None):
""" Init """
super().__init__('stack',MPEnums.INIT_FROM_NONE,graph)
self.inputs = [inA]
self.outputs = [outA]
self._axis = axis
def _verify(self):
"""
Verify the inputs and outputs are appropriately sized
"""
inA = self.inputs[0]
outA = self.outputs[0]
# inA must be an array and outA must be a tensor
if (not ((inA.mp_type == MPEnums.ARRAY or
inA.mp_type == MPEnums.CIRCLE_BUFFER) and
outA.mp_type == MPEnums.TENSOR)):
raise TypeError("StackKernel requires Array input and Tensor output")
# if an axis was provided, it must be a scalar
if self._axis != None and self._axis.mp_type != MPEnums.SCALAR:
raise TypeError("StackKernel requires Scalar axis")
stack_axis = self._axis.data if self._axis != None else 0
# ensure that all the tensors in inA are the same size
tensor_shapes = [inA.get_element(i).shape for i in range(inA.capacity)]
if len(set(tensor_shapes)) != 1:
# tensors in array are different sizes OR array is empty
raise ValueError("StackKernel requires all tensors in input array to be the same size")
# determine the output dimensions
output_shape = (tensor_shapes[0][:stack_axis] + (inA.capacity,)
+ tensor_shapes[0][stack_axis:])
# check the output dimensions are valid
if outA.virtual and len(outA.shape) == 0:
outA.shape = output_shape
# ensure the output shape equals the expected output shape
if outA.shape != output_shape:
raise ValueError("StackKernel output shape does not match expected output shape")
def _process_data(self, inputs, outputs):
"""
Execute the kernel function using numpy functions
Parameters
----------
inA : list of Arrays
Container where specified data will be added to
outA : list of Tensors
Tensor of stacked tensors
"""
inA = inputs[0]
outA = outputs[0]
stack_axis = self._axis.data if self._axis != None else 0
input_tensors = [inA.get_element(i) for i in range(inA.capacity)]
input_data = [t.data for t in input_tensors]
outA.data = np.stack(input_data,axis=stack_axis)
[docs]
@classmethod
def add_to_graph(cls,graph,inA,outA,axis=None):
"""
Factory method to create a stack kernel and add it to a graph
as a generic node object.
Parameters
----------
graph : Graph
The graph where the RunningAverageKernel object should be added
inA : Array
Container where specified data will be added to
outA : Tensor
Tensor of stacked tensors
axis : int or None, default = None
The axis in the result array along which the input arrays are stacked.
Returns
-------
node : Node
The node object that was added to the graph containing the stack kernel
"""
# create the kernel object
k = cls(graph,inA,outA,axis)
# create parameter objects for the input and output
params = (Parameter(inA,MPEnums.INPUT),
Parameter(outA,MPEnums.OUTPUT))
# add the kernel to a generic node object
node = Node(graph,k,params)
# add the node to the graph
graph.add_node(node)
return node
[docs]
class TensorStackKernel(Kernel):
"""
Kernel to stack 2 tensors into a single tensor
Parameters
----------
graph : Graph
Graph that the kernel should be added to
inA : Tensor
Input 1 data
inB : Tensor
Input 2 data
outA : Tensor
Output data
axis : int, default=None
Axis over which to stack the tensors. If none, the tensors are flattened before they are stacked
"""
def __init__(self,graph,inA,inB,outA,axis=None):
""" Init """
super().__init__('TensorStack',MPEnums.INIT_FROM_NONE,graph)
self.inputs = [inA,inB]
self.outputs = [outA]
self._axis = axis
def _initialize(self, init_inputs, init_outputs, labels):
"""
This kernel has no internal state that must be initialized.
"""
init_inA, init_inB = init_inputs
init_out = init_outputs[0]
for init_in in (init_inA, init_inB):
if init_in is not None and init_in.mp_type != MPEnums.TENSOR:
init_in = init_in.to_tensor()
if init_out is not None and (init_inA is not None and init_inA.shape != ()):
# adjust the init output shape
if init_out.virtual:
init_out.shape = init_inA.shape[:self._axis+1] + (2,) + init_inA.shape[self._axis+1:]
axis_adjusted = False
if len(init_inA.shape) == len(self.inputs[0].shape)+1 and self._axis >= 0:
# adjust axis to accomodate stack of input data
self._axis += 1
axis_adjusted = True
self._process_data([init_inA, init_inB], init_outputs)
if axis_adjusted:
self._axis -= 1 # adjust back for trial processing
def _verify(self):
"""
Verify the inputs and outputs are appropriately sized
"""
inA, inB = self.inputs
outA = self.outputs[0]
# all params must be tensors
for param in (inA, inB, outA):
if param.mp_type != MPEnums.TENSOR:
raise TypeError("TensorStackKernel requires Tensor inputs and outputs")
stack_axis = self._axis
if stack_axis >= len(inA.shape) and stack_axis < -len(inA.shape):
raise ValueError("TensorStackKernel requires stack axis to be within the rank of the input tensors")
# ensure that all the tensors in inA are the same size
tensor_shapes = [inA.shape, inB.shape]
if len(set(tensor_shapes)) != 1:
# tensors in array are different sizes OR array is empty
raise ValueError("TensorStackKernel requires all tensors in input array to be the same size")
# determine the output dimensions
output_shape = inA.shape[:stack_axis] + (2,) + inA.shape[stack_axis:]
# check the output dimensions are valid
if outA.virtual and len(outA.shape) == 0:
outA.shape = output_shape
# ensure the output shape equals the expected output shape
if outA.shape != output_shape:
raise ValueError("TensorStackKernel output shape does not match expected output shape")
def _process_data(self, inputs, outputs):
"""
Stack 2 tensors into single tensor
Parameters
----------
inputs: list of Tensors
Input data container, list of length 2
outputs: list of Tensors
Output data container, list of length 1
"""
input_tensors = [inputs[i].data for i in range(len(inputs))]
outputs[0].data = np.stack(input_tensors,axis=self._axis)
[docs]
@classmethod
def add_to_graph(cls,graph,inA,inB,outA,axis=0,init_inputs=None,init_labels=None):
"""
Factory method to create a tensor stack kernel and add it to a graph
as a generic node object.
Parameters
----------
graph : Graph
Graph that the kernel should be added to
inA : Tensor or Scalar
Input 1 data
inB : Tensor or Scalar
Input 2 data
outA : Tensor or Scalar
Output data
axis : int, default=None
Axis over which to stack the tensors. If none, the tensors are flattened before they are stacked
Returns
-------
node : Node
The node object that was added to the graph containing the tensor stack kernel
"""
# create the kernel object
k = cls(graph,inA,inB,outA,axis)
# create parameter objects for the input and output
params = (Parameter(inA,MPEnums.INPUT),
Parameter(inB,MPEnums.INPUT),
Parameter(outA,MPEnums.OUTPUT))
# add the kernel to a generic node object
node = Node(graph,k,params)
# add the node to the graph
graph.add_node(node)
# if initialization data is provided, add it to the node
if init_inputs is not None:
node.add_initialization_data(init_inputs,init_labels)
return node
[docs]
class ReshapeKernel(Kernel):
"""
Kernel to reshape a tensor
Parameters
----------
graph : Graph
Graph that the kernel should be added to
inA : Tensor
Input tensor
outA : Tensor
Output tensor
shape : tuple of ints
Shape of the output tensor
"""
def __init__(self, graph, inA, outA, shape):
""" Init """
super().__init__('Reshape',MPEnums.INIT_FROM_NONE, graph)
self.inputs = [inA]
self.outputs = [outA]
self._shape = shape
def _initialize(self, init_inputs, init_outputs, labels):
"""
This kernel has no internal state that must be initialized
"""
init_in = init_inputs[0]
init_out = init_outputs[0]
if init_in is not None and init_in.mp_type != MPEnums.TENSOR:
init_in = init_in.to_tensor()
if init_out is not None and (init_in is not None and init_in.shape != ()):
# adjust the init output shape
if init_out.virtual:
if len(init_in.shape) == len(self.inputs[0].shape)+1:
init_out.shape = (init_in.shape[0],) + self.outputs[0].shape
else:
init_out.shape = self.outputs[0].shape
self._process_data([init_in], init_outputs)
def _verify(self):
"""
Verify the inputs and outputs are appropriately typed
"""
inA = self.inputs[0]
outA = self.outputs[0]
# inA must be a tensor and outA must be a tensor
if (inA.mp_type != MPEnums.TENSOR or
outA.mp_type != MPEnums.TENSOR):
raise TypeError("ReshapeKernel requires Tensor input and Tensor output")
# determine and set the output shape
try:
test_in = np.zeros(inA.shape)
test_out = test_in.reshape(self._shape)
except:
raise ValueError("ReshapeKernel requires output shape to be compatible with input shape")
if outA.virtual and len(outA.shape) == 0:
outA.shape = test_out.shape
if outA.shape != test_out.shape:
raise ValueError("ReshapeKernel output shape does not match expected output shape")
def _process_data(self, inputs, outputs):
"""
Execute the kernel function using numpy functions
Parameters
----------
inputs: list of Tensors
Input data container, list of length 1
outputs: list of Tensors
Output data container, list of length 1
"""
inA = inputs[0]
outA = outputs[0]
outA.data = inA.data.reshape(outA.shape)
[docs]
@classmethod
def add_to_graph(cls, graph, inA, outA, shape,
init_inputs=None, init_labels=None):
"""
Factory method to create a reshape kernel and add it to a graph
as a generic node object.
Parameters
----------
graph : Graph
Graph that the kernel should be added to
inA : Tensor
Input tensor
outA : Tensor
Output tensor
shape : tuple of ints
Shape of the output tensor
init_inputs: Tensor
(optional) Initialization data for the graph
init_labels: Tensor
(optional) Initialization labels for the graph
Returns
-------
node : Node
The node object that was added to the graph containing the reshape kernel
"""
# create the kernel object
k = cls(graph, inA, outA, shape)
# create parameter objects for the input and output
params = (Parameter(inA, MPEnums.INPUT),
Parameter(outA, MPEnums.OUTPUT))
# add the kernel to a generic node object
node = Node(graph, k, params)
# add the node to the graph
graph.add_node(node)
# if initialization data is provided, add it to the node
if init_inputs is not None:
node.add_initialization_data(init_inputs, init_labels)
return node