|
| 1 | +/** |
| 2 | + * @file models/transformer/decoder.hpp |
| 3 | + * @author Mikhail Lozhnikov |
| 4 | + * @author Mrityunjay Tripathi |
| 5 | + * |
| 6 | + * Definition of the Transformer Decoder layer. |
| 7 | + * |
| 8 | + * mlpack is free software; you may redistribute it and/or modify it under the |
| 9 | + * terms of the 3-clause BSD license. You should have received a copy of the |
| 10 | + * 3-clause BSD license along with mlpack. If not, see |
| 11 | + * http://www.opensource.org/licenses/BSD-3-Clause for more information. |
| 12 | + */ |
| 13 | + |
| 14 | +#ifndef MODELS_TRANSFORMER_DECODER_HPP |
| 15 | +#define MODELS_TRANSFORMER_DECODER_HPP |
| 16 | + |
| 17 | +#include <mlpack/prereqs.hpp> |
| 18 | +#include <mlpack/methods/ann/layer/layer_types.hpp> |
| 19 | +#include <mlpack/methods/ann/layer/base_layer.hpp> |
| 20 | +#include <mlpack/methods/ann/regularizer/no_regularizer.hpp> |
| 21 | + |
| 22 | +namespace mlpack { |
| 23 | +namespace ann /** Artificial Neural Network. */ { |
| 24 | + |
| 25 | +/** |
| 26 | + * In addition to the two sub-layers in each encoder layer, the decoder inserts |
| 27 | + * a third sub-layer, which performs multi-head attention over the output of the |
| 28 | + * encoder stack. Similar to the encoder, we employ residual connections around |
| 29 | + * each of the sub-layers, followed by layer normalization. We also modify the |
| 30 | + * self-attention sub-layer in the decoder stack to prevent positions from |
| 31 | + * attending to subsequent positions. This masking, combined with fact that the |
| 32 | + * output embeddings are offset by one position, ensures that the predictions |
| 33 | + * for position i can depend only on the known outputs at positions less than i. |
| 34 | + * |
| 35 | + * @tparam ActivationFunction The type of the activation function to be used in |
| 36 | + * the position-wise feed forward neural network. |
| 37 | + * @tparam RegularizerType The type of regularizer to be applied to layer |
| 38 | + * parameters. |
| 39 | + * @tparam InputDataType Type of the input data (arma::colvec, arma::mat, |
| 40 | + * arma::sp_mat or arma::cube). |
| 41 | + * @tparam OutputDataType Type of the output data (arma::colvec, arma::mat, |
| 42 | + * arma::sp_mat or arma::cube). |
| 43 | + */ |
| 44 | +template < |
| 45 | + typename ActivationFunction = ReLULayer<>, |
| 46 | + typename RegularizerType = NoRegularizer, |
| 47 | + typename InputDataType = arma::mat, |
| 48 | + typename OutputDataType = arma::mat |
| 49 | +> |
| 50 | +class TransformerDecoder |
| 51 | +{ |
| 52 | + public: |
| 53 | + TransformerDecoder(); |
| 54 | + |
| 55 | + /** |
| 56 | + * Create the TransformerDecoder object using the specified parameters. |
| 57 | + * |
| 58 | + * @param numLayers The number of decoder blocks. |
| 59 | + * @param tgtSeqLen Target Sequence Length. |
| 60 | + * @param srcSeqLen Source Sequence Length. |
| 61 | + * @param memoryModule The last Encoder module. |
| 62 | + * @param dModel The number of features in the input. Also, same as the |
| 63 | + * 'embedDim' in 'MultiheadAttention' layer. |
| 64 | + * @param numHeads The number of attention heads. |
| 65 | + * @param dimFFN The dimentionality of feedforward network. |
| 66 | + * @param dropout The dropout rate. |
| 67 | + * @param attentionMask The attention mask used to black-out future sequences. |
| 68 | + * @param keyPaddingMask The padding mask used to black-out particular token. |
| 69 | + */ |
| 70 | + TransformerDecoder(const size_t numLayers, |
| 71 | + const size_t tgtSeqLen, |
| 72 | + const size_t srcSeqLen, |
| 73 | + const size_t dModel = 512, |
| 74 | + const size_t numHeads = 8, |
| 75 | + const size_t dimFFN = 1024, |
| 76 | + const double dropout = 0.1, |
| 77 | + const InputDataType& attentionMask = InputDataType(), |
| 78 | + const InputDataType& keyPaddingMask = InputDataType()); |
| 79 | + |
| 80 | + /** |
| 81 | + * Get the Transformer Decoder model. |
| 82 | + */ |
| 83 | + Sequential<>* Model() { return decoder; } |
| 84 | + /** |
| 85 | + * Load the network from a local directory. |
| 86 | + * |
| 87 | + * @param filepath The location of the stored model. |
| 88 | + */ |
| 89 | + void LoadModel(const std::string& filepath); |
| 90 | + |
| 91 | + /** |
| 92 | + * Save the network locally. |
| 93 | + * |
| 94 | + * @param filepath The location where the model is to be saved. |
| 95 | + */ |
| 96 | + void SaveModel(const std::string& filepath); |
| 97 | + |
| 98 | + //! Get the key matrix, the output of the Transformer Encoder. |
| 99 | + InputDataType const& Key() const { return key; } |
| 100 | + |
| 101 | + //! Modify the key matrix. |
| 102 | + InputDataType& Key() { return key; } |
| 103 | + |
| 104 | + private: |
| 105 | + /** |
| 106 | + * This method adds the attention block to the decoder. |
| 107 | + */ |
| 108 | + void AttentionBlock() |
| 109 | + { |
| 110 | + Sequential<>* decoderBlockBottom = new Sequential<>(); |
| 111 | + decoderBlockBottom->Add<Subview<>>(1, 0, dModel * tgtSeqLen - 1, 0, -1); |
| 112 | + |
| 113 | + // Broadcast the incoming input to decoder |
| 114 | + // i.e. query into (query, key, value). |
| 115 | + Concat<>* decoderInput = new Concat<>(); |
| 116 | + decoderInput->Add<IdentityLayer<>>(); |
| 117 | + decoderInput->Add<IdentityLayer<>>(); |
| 118 | + decoderInput->Add<IdentityLayer<>>(); |
| 119 | + |
| 120 | + // Masked Self attention layer. |
| 121 | + Sequential<>* maskedSelfAttention = new Sequential<>(); |
| 122 | + maskedSelfAttention->Add(decoderInput); |
| 123 | + maskedSelfAttention->Add<MultiheadAttention< |
| 124 | + InputDataType, OutputDataType, RegularizerType>>( |
| 125 | + tgtSeqLen, |
| 126 | + tgtSeqLen, |
| 127 | + dModel, |
| 128 | + numHeads, |
| 129 | + attentionMask |
| 130 | + ); |
| 131 | + |
| 132 | + // Residual connection. |
| 133 | + AddMerge<>* residualAdd = new AddMerge<>(); |
| 134 | + residualAdd->Add(maskedSelfAttention); |
| 135 | + residualAdd->Add<IdentityLayer<>>(); |
| 136 | + |
| 137 | + decoderBlockBottom->Add(residualAddMerge); |
| 138 | + |
| 139 | + // Add the LayerNorm layer with required parameters. |
| 140 | + decoderBlockBottom->Add<LayerNorm<>>(dModel * tgtSeqLen); |
| 141 | + |
| 142 | + // This layer broadcasts the output of encoder i.e. key into (key, value). |
| 143 | + Concat<>* broadcastEncoderOutput = new Concat<>(); |
| 144 | + broadcastEncoderOutput->Add<Subview<>>(1, dModel * tgtSeqLen, -1, 0, -1); |
| 145 | + broadcastEncoderOutput->Add<Subview<>>(1, dModel * tgtSeqLen, -1, 0, -1); |
| 146 | + |
| 147 | + // This layer concatenates the output of the bottom decoder block (query) |
| 148 | + // and the output of the encoder (key, value). |
| 149 | + Concat<>* encoderDecoderAttentionInput = new Concat<>(); |
| 150 | + encoderDecoderAttentionInput->Add(decoderBlockBottom); |
| 151 | + encoderDecoderAttentionInput->Add(broadcastEncoderOutput); |
| 152 | + |
| 153 | + // Encoder-decoder attention. |
| 154 | + Sequential<>* encoderDecoderAttention = new Sequential<>(); |
| 155 | + encoderDecoderAttention->Add(encoderDecoderAttentionInput); |
| 156 | + encoderDecoderAttention->Add<MultiheadAttention< |
| 157 | + InputDataType, OutputDataType, RegularizerType>>( |
| 158 | + tgtSeqLen, |
| 159 | + srcSeqLen, |
| 160 | + dModel, |
| 161 | + numHeads, |
| 162 | + InputDatatype(), // No attention mask to encoder-decoder attention. |
| 163 | + keyPaddingMask); |
| 164 | + |
| 165 | + // Residual connection. |
| 166 | + AddMerge<>* residualAdd = new AddMerge<>(); |
| 167 | + residualAdd->Add(encoderDecoderAttention); |
| 168 | + residualAdd->Add<IdentityLayer<>>(); |
| 169 | + |
| 170 | + decoder->Add(residualAdd); |
| 171 | + decoder->Add<LayerNorm<>>(dModel * tgtSeqLen); |
| 172 | + } |
| 173 | + |
| 174 | + /** |
| 175 | + * This method adds the position-wise feed forward network to the decoder. |
| 176 | + */ |
| 177 | + void PositionWiseFFNBlock() |
| 178 | + { |
| 179 | + Sequential<>* positionWiseFFN = new Sequential<>(); |
| 180 | + positionWiseFFN->Add<Linear3D<>>(dModel, dimFFN); |
| 181 | + positionWiseFFN->Add<ActivationFunction>(); |
| 182 | + positionWiseFFN->Add<Linear3D<>>(dimFFN, dModel); |
| 183 | + positionWiseFFN->Add<Dropout<>>(dropout); |
| 184 | + |
| 185 | + /* Residual connection. */ |
| 186 | + AddMerge<>* residualAdd = new AddMerge<>(); |
| 187 | + residualAdd->Add(positionWiseFFN); |
| 188 | + residualAdd->Add<IdentityLayer<>>(); |
| 189 | + decoder->Add(residualAdd); |
| 190 | + } |
| 191 | + |
| 192 | + //! Locally-stored number of decoder layers. |
| 193 | + size_t numLayers; |
| 194 | + |
| 195 | + //! Locally-stored target sequence length. |
| 196 | + size_t tgtSeqLen; |
| 197 | + |
| 198 | + //! Locally-stored source sequence length. |
| 199 | + size_t srcSeqLen; |
| 200 | + |
| 201 | + //! Locally-stored number of input units. |
| 202 | + size_t dModel; |
| 203 | + |
| 204 | + //! Locally-stored number of output units. |
| 205 | + size_t numHeads; |
| 206 | + |
| 207 | + //! Locally-stored weight object. |
| 208 | + size_t dimFFN; |
| 209 | + |
| 210 | + //! Locally-stored weight parameters. |
| 211 | + double dropout; |
| 212 | + |
| 213 | + //! Locally-stored attention mask. |
| 214 | + InputDataType attentionMask; |
| 215 | + |
| 216 | + //! Locally-stored key padding mask. |
| 217 | + InputDataType keyPaddingMask; |
| 218 | + |
| 219 | + //! Locally-stored complete decoder network. |
| 220 | + Sequential<InputDataType, OutputDataType, false>* decoder; |
| 221 | + |
| 222 | +}; // class TransformerDecoder |
| 223 | + |
| 224 | +} // namespace ann |
| 225 | +} // namespace mlpack |
| 226 | + |
| 227 | +// Include implementation. |
| 228 | +#include "decoder_impl.hpp" |
| 229 | + |
| 230 | +#endif |
0 commit comments