Definitions of all models available in Transformers.js.
Example: Load and run an AutoModel
.
import { AutoModel, AutoTokenizer } from '@xenova/transformers';
let tokenizer = await AutoTokenizer.from_pretrained('Xenova/bert-base-uncased');
let model = await AutoModel.from_pretrained('Xenova/bert-base-uncased');
let inputs = await tokenizer('I love transformers!');
let { logits } = await model(inputs);
// Tensor {
// data: Float32Array(183132) [-7.117443084716797, -7.107812881469727, -7.092104911804199, ...]
// dims: (3) [1, 6, 30522],
// type: "float32",
// size: 183132,
// }
We also provide other AutoModel
s (listed below), which you can use in the same way as the Python library. For example:
Example: Load and run a AutoModelForSeq2SeqLM
.
import { AutoModelForSeq2SeqLM, AutoTokenizer } from '@xenova/transformers';
let tokenizer = await AutoTokenizer.from_pretrained('Xenova/t5-small');
let model = await AutoModelForSeq2SeqLM.from_pretrained('Xenova/t5-small');
let { input_ids } = await tokenizer('translate English to German: I love transformers!');
let outputs = await model.generate(input_ids);
let decoded = tokenizer.decode(outputs[0], { skip_special_tokens: true });
// 'Ich liebe Transformatoren!'
new PreTrainedModel(config, session)
.dispose()
⇒ Promise.<Array<unknown>>
._call(model_inputs)
⇒ Promise.<Object>
.forward(model_inputs)
⇒ Promise.<Object>
._get_generation_config(generation_config)
⇒ GenerationConfig
.groupBeams(beams)
⇒ Array
.getPastKeyValues(decoderResults, pastKeyValues)
⇒ Object
.getAttentions(decoderResults)
⇒ Object
.addPastKeyValues(decoderFeeds, pastKeyValues)
.from_pretrained(pretrained_model_name_or_path, options)
⇒ Promise.<PreTrainedModel>
._call(model_inputs)
⇒ Promise.<MaskedLMOutput>
._call(model_inputs)
⇒ Promise.<SequenceClassifierOutput>
._call(model_inputs)
⇒ Promise.<TokenClassifierOutput>
._call(model_inputs)
⇒ Promise.<QuestionAnsweringModelOutput>
._call(model_inputs)
⇒ Promise.<MaskedLMOutput>
._call(model_inputs)
⇒ Promise.<SequenceClassifierOutput>
._call(model_inputs)
⇒ Promise.<TokenClassifierOutput>
._call(model_inputs)
⇒ Promise.<QuestionAnsweringModelOutput>
._call(model_inputs)
⇒ Promise.<MaskedLMOutput>
._call(model_inputs)
⇒ Promise.<SequenceClassifierOutput>
._call(model_inputs)
⇒ Promise.<TokenClassifierOutput>
._call(model_inputs)
⇒ Promise.<QuestionAnsweringModelOutput>
._call(model_inputs)
⇒ Promise.<MaskedLMOutput>
._call(model_inputs)
⇒ Promise.<SequenceClassifierOutput>
._call(model_inputs)
⇒ Promise.<TokenClassifierOutput>
._call(model_inputs)
⇒ Promise.<QuestionAnsweringModelOutput>
._call(model_inputs)
⇒ Promise.<SequenceClassifierOutput>
._call(model_inputs)
⇒ Promise.<TokenClassifierOutput>
._call(model_inputs)
⇒ Promise.<QuestionAnsweringModelOutput>
._call(model_inputs)
⇒ Promise.<MaskedLMOutput>
._call(model_inputs)
⇒ Promise.<MaskedLMOutput>
._call(model_inputs)
⇒ Promise.<SequenceClassifierOutput>
._call(model_inputs)
⇒ Promise.<QuestionAnsweringModelOutput>
._call(model_inputs)
⇒ Promise.<MaskedLMOutput>
._call(model_inputs)
⇒ Promise.<SequenceClassifierOutput>
._call(model_inputs)
⇒ Promise.<TokenClassifierOutput>
._call(model_inputs)
⇒ Promise.<QuestionAnsweringModelOutput>
._call(model_inputs)
⇒ Promise.<SequenceClassifierOutput>
._call(model_inputs)
⇒ Promise.<SequenceClassifierOutput>
._call(model_inputs)
⇒ Promise.<MaskedLMOutput>
._call(model_inputs)
⇒ Promise.<SequenceClassifierOutput>
._call(model_inputs)
⇒ Promise.<TokenClassifierOutput>
._call(model_inputs)
⇒ Promise.<QuestionAnsweringModelOutput>
._call(model_inputs)
⇒ Promise.<MaskedLMOutput>
._call(model_inputs)
⇒ Promise.<SequenceClassifierOutput>
._call(model_inputs)
⇒ Promise.<TokenClassifierOutput>
._call(model_inputs)
⇒ Promise.<QuestionAnsweringModelOutput>
._call(model_inputs)
⇒ Promise.<MaskedLMOutput>
._call(model_inputs)
⇒ Promise.<SequenceClassifierOutput>
._call(model_inputs)
⇒ Promise.<TokenClassifierOutput>
._call(model_inputs)
⇒ Promise.<QuestionAnsweringModelOutput>
.from_pretrained()
: PreTrainedModel.from_pretrained
.from_pretrained()
: PreTrainedModel.from_pretrained
._call(model_inputs)
⇒ Promise.<SequenceClassifierOutput>
.from_pretrained()
: PreTrainedModel.from_pretrained
~TypedArray
: *
~DecoderOutput
⇒ Promise.<(Array<Array<number>>|EncoderDecoderOutput|DecoderOutput)>
~WhisperGenerationConfig
: Object
~SpeechOutput
: Object
A base class for pre-trained models that provides the model configuration and an ONNX session.
Kind: static class of models
new PreTrainedModel(config, session)
.dispose()
⇒ Promise.<Array<unknown>>
._call(model_inputs)
⇒ Promise.<Object>
.forward(model_inputs)
⇒ Promise.<Object>
._get_generation_config(generation_config)
⇒ GenerationConfig
.groupBeams(beams)
⇒ Array
.getPastKeyValues(decoderResults, pastKeyValues)
⇒ Object
.getAttentions(decoderResults)
⇒ Object
.addPastKeyValues(decoderFeeds, pastKeyValues)
.from_pretrained(pretrained_model_name_or_path, options)
⇒ Promise.<PreTrainedModel>
Creates a new instance of the PreTrainedModel
class.
Param | Type | Description |
---|---|---|
config | Object | The model configuration. |
session | any | session for the model. |
Disposes of all the ONNX sessions that were created during inference.
Kind: instance method of PreTrainedModel
Returns: Promise.<Array<unknown>>
- An array of promises, one for each ONNX session that is being disposed.
Todo
Runs the model with the provided inputs
Kind: instance method of PreTrainedModel
Returns: Promise.<Object>
- Object containing output tensors
Param | Type | Description |
---|---|---|
model_inputs | Object | Object containing input tensors |
Forward method for a pretrained model. If not overridden by a subclass, the correct forward method will be chosen based on the model type.
Kind: instance method of PreTrainedModel
Returns: Promise.<Object>
- The output data from the model in the format specified in the ONNX model.
Throws:
Error
This method must be implemented in subclasses.Param | Type | Description |
---|---|---|
model_inputs | Object | The input data to the model in the format specified in the ONNX model. |
This function merges multiple generation configs together to form a final generation config to be used by the model for text generation.
It first creates an empty GenerationConfig
object, then it applies the model’s own generation_config
property to it. Finally, if a generation_config
object was passed in the arguments, it overwrites the corresponding properties in the final config with those of the passed config object.
Kind: instance method of PreTrainedModel
Returns: GenerationConfig
- The final generation config object to be used by the model for text generation.
Param | Type | Description |
---|---|---|
generation_config | GenerationConfig | A |
Groups an array of beam objects by their ids.
Kind: instance method of PreTrainedModel
Returns: Array
- An array of arrays, where each inner array contains beam objects with the same id.
Param | Type | Description |
---|---|---|
beams | Array | The array of beam objects to group. |
Returns an object containing past key values from the given decoder results object.
Kind: instance method of PreTrainedModel
Returns: Object
- An object containing past key values.
Param | Type | Description |
---|---|---|
decoderResults | Object | The decoder results object. |
pastKeyValues | Object | The previous past key values. |
Returns an object containing attentions from the given decoder results object.
Kind: instance method of PreTrainedModel
Returns: Object
- An object containing attentions.
Param | Type | Description |
---|---|---|
decoderResults | Object | The decoder results object. |
Adds past key values to the decoder feeds object. If pastKeyValues is null, creates new tensors for past key values.
Kind: instance method of PreTrainedModel
Param | Type | Description |
---|---|---|
decoderFeeds | Object | The decoder feeds object to add past key values to. |
pastKeyValues | Object | An object containing past key values. |
Instantiate one of the model classes of the library from a pretrained model.
The model class to instantiate is selected based on the model_type
property of the config object
(either passed as an argument or loaded from pretrained_model_name_or_path
if possible)
Kind: static method of PreTrainedModel
Returns: Promise.<PreTrainedModel>
- A new instance of the PreTrainedModel
class.
Param | Type | Description |
---|---|---|
pretrained_model_name_or_path | string | The name or path of the pretrained model. Can be either:
|
options | * | Additional options for loading the model. |
Base class for model’s outputs, with potential hidden states and attentions.
Kind: static class of models
Param | Type | Description |
---|---|---|
output | Object | The output of the model. |
output.last_hidden_state | Tensor | Sequence of hidden-states at the output of the last layer of the model. |
[output.hidden_states] | Tensor | Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. |
[output.attentions] | Tensor | Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. |
BertForMaskedLM is a class representing a BERT model for masked language modeling.
Kind: static class of models
Calls the model on new inputs.
Kind: instance method of BertForMaskedLM
Returns: Promise.<MaskedLMOutput>
- An object containing the model’s output logits for masked language modeling.
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
BertForSequenceClassification is a class representing a BERT model for sequence classification.
Kind: static class of models
Calls the model on new inputs.
Kind: instance method of BertForSequenceClassification
Returns: Promise.<SequenceClassifierOutput>
- An object containing the model’s output logits for sequence classification.
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
BertForTokenClassification is a class representing a BERT model for token classification.
Kind: static class of models
Calls the model on new inputs.
Kind: instance method of BertForTokenClassification
Returns: Promise.<TokenClassifierOutput>
- An object containing the model’s output logits for token classification.
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
BertForQuestionAnswering is a class representing a BERT model for question answering.
Kind: static class of models
Calls the model on new inputs.
Kind: instance method of BertForQuestionAnswering
Returns: Promise.<QuestionAnsweringModelOutput>
- An object containing the model’s output logits for question answering.
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
The bare CamemBERT Model transformer outputting raw hidden-states without any specific head on top.
Kind: static class of models
CamemBERT Model with a language modeling
head on top.
Kind: static class of models
Calls the model on new inputs.
Kind: instance method of CamembertForMaskedLM
Returns: Promise.<MaskedLMOutput>
- An object containing the model’s output logits for masked language modeling.
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
CamemBERT Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.
Kind: static class of models
Calls the model on new inputs.
Kind: instance method of CamembertForSequenceClassification
Returns: Promise.<SequenceClassifierOutput>
- An object containing the model’s output logits for sequence classification.
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
CamemBERT Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.
Kind: static class of models
Calls the model on new inputs.
Kind: instance method of CamembertForTokenClassification
Returns: Promise.<TokenClassifierOutput>
- An object containing the model’s output logits for token classification.
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
CamemBERT Model with a span classification head on top for extractive question-answering tasks
Kind: static class of models
Calls the model on new inputs.
Kind: instance method of CamembertForQuestionAnswering
Returns: Promise.<QuestionAnsweringModelOutput>
- An object containing the model’s output logits for question answering.
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
The bare DeBERTa Model transformer outputting raw hidden-states without any specific head on top.
Kind: static class of models
DeBERTa Model with a language modeling
head on top.
Kind: static class of models
Calls the model on new inputs.
Kind: instance method of DebertaForMaskedLM
Returns: Promise.<MaskedLMOutput>
- An object containing the model’s output logits for masked language modeling.
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
DeBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output)
Kind: static class of models
Calls the model on new inputs.
Kind: instance method of DebertaForSequenceClassification
Returns: Promise.<SequenceClassifierOutput>
- An object containing the model’s output logits for sequence classification.
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
DeBERTa Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.
Kind: static class of models
Calls the model on new inputs.
Kind: instance method of DebertaForTokenClassification
Returns: Promise.<TokenClassifierOutput>
- An object containing the model’s output logits for token classification.
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
DeBERTa Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
layers on top of the hidden-states output to compute span start logits
and span end logits
).
Kind: static class of models
Calls the model on new inputs.
Kind: instance method of DebertaForQuestionAnswering
Returns: Promise.<QuestionAnsweringModelOutput>
- An object containing the model’s output logits for question answering.
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
The bare DeBERTa-V2 Model transformer outputting raw hidden-states without any specific head on top.
Kind: static class of models
DeBERTa-V2 Model with a language modeling
head on top.
Kind: static class of models
Calls the model on new inputs.
Kind: instance method of DebertaV2ForMaskedLM
Returns: Promise.<MaskedLMOutput>
- An object containing the model’s output logits for masked language modeling.
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
DeBERTa-V2 Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output)
Kind: static class of models
Calls the model on new inputs.
Kind: instance method of DebertaV2ForSequenceClassification
Returns: Promise.<SequenceClassifierOutput>
- An object containing the model’s output logits for sequence classification.
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
DeBERTa-V2 Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.
Kind: static class of models
Calls the model on new inputs.
Kind: instance method of DebertaV2ForTokenClassification
Returns: Promise.<TokenClassifierOutput>
- An object containing the model’s output logits for token classification.
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
DeBERTa-V2 Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
layers on top of the hidden-states output to compute span start logits
and span end logits
).
Kind: static class of models
Calls the model on new inputs.
Kind: instance method of DebertaV2ForQuestionAnswering
Returns: Promise.<QuestionAnsweringModelOutput>
- An object containing the model’s output logits for question answering.
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
DistilBertForSequenceClassification is a class representing a DistilBERT model for sequence classification.
Kind: static class of models
Calls the model on new inputs.
Kind: instance method of DistilBertForSequenceClassification
Returns: Promise.<SequenceClassifierOutput>
- An object containing the model’s output logits for sequence classification.
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
DistilBertForTokenClassification is a class representing a DistilBERT model for token classification.
Kind: static class of models
Calls the model on new inputs.
Kind: instance method of DistilBertForTokenClassification
Returns: Promise.<TokenClassifierOutput>
- An object containing the model’s output logits for token classification.
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
DistilBertForQuestionAnswering is a class representing a DistilBERT model for question answering.
Kind: static class of models
Calls the model on new inputs.
Kind: instance method of DistilBertForQuestionAnswering
Returns: Promise.<QuestionAnsweringModelOutput>
- An object containing the model’s output logits for question answering.
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
DistilBertForMaskedLM is a class representing a DistilBERT model for masking task.
Kind: static class of models
Calls the model on new inputs.
Kind: instance method of DistilBertForMaskedLM
Returns: Promise.<MaskedLMOutput>
- returned object
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
MobileBertForMaskedLM is a class representing a MobileBERT model for masking task.
Kind: static class of models
Calls the model on new inputs.
Kind: instance method of MobileBertForMaskedLM
Returns: Promise.<MaskedLMOutput>
- returned object
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
MobileBert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output)
Kind: static class of models
Calls the model on new inputs.
Kind: instance method of MobileBertForSequenceClassification
Returns: Promise.<SequenceClassifierOutput>
- returned object
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
MobileBert Model with a span classification head on top for extractive question-answering tasks
Kind: static class of models
Calls the model on new inputs.
Kind: instance method of MobileBertForQuestionAnswering
Returns: Promise.<QuestionAnsweringModelOutput>
- returned object
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
The bare MPNet Model transformer outputting raw hidden-states without any specific head on top.
Kind: static class of models
MPNetForMaskedLM is a class representing a MPNet model for masked language modeling.
Kind: static class of models
Calls the model on new inputs.
Kind: instance method of MPNetForMaskedLM
Returns: Promise.<MaskedLMOutput>
- An object containing the model’s output logits for masked language modeling.
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
MPNetForSequenceClassification is a class representing a MPNet model for sequence classification.
Kind: static class of models
Calls the model on new inputs.
Kind: instance method of MPNetForSequenceClassification
Returns: Promise.<SequenceClassifierOutput>
- An object containing the model’s output logits for sequence classification.
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
MPNetForTokenClassification is a class representing a MPNet model for token classification.
Kind: static class of models
Calls the model on new inputs.
Kind: instance method of MPNetForTokenClassification
Returns: Promise.<TokenClassifierOutput>
- An object containing the model’s output logits for token classification.
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
MPNetForQuestionAnswering is a class representing a MPNet model for question answering.
Kind: static class of models
Calls the model on new inputs.
Kind: instance method of MPNetForQuestionAnswering
Returns: Promise.<QuestionAnsweringModelOutput>
- An object containing the model’s output logits for question answering.
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
T5Model is a class representing a T5 model for conditional generation.
Kind: static class of models
Creates a new instance of the T5ForConditionalGeneration
class.
Param | Type | Description |
---|---|---|
config | Object | The model configuration. |
session | any | session for the model. |
decoder_merged_session | any | session for the decoder. |
generation_config | GenerationConfig | The generation configuration. |
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models.
Kind: static class of models
The bare LONGT5 Model transformer outputting raw hidden-states without any specific head on top.
Kind: static class of models
LONGT5 Model with a language modeling
head on top.
Kind: static class of models
Creates a new instance of the LongT5ForConditionalGeneration
class.
Param | Type | Description |
---|---|---|
config | Object | The model configuration. |
session | any | session for the model. |
decoder_merged_session | any | session for the decoder. |
generation_config | GenerationConfig | The generation configuration. |
A class representing a conditional sequence-to-sequence model based on the MT5 architecture.
Kind: static class of models
Creates a new instance of the MT5ForConditionalGeneration
class.
Param | Type | Description |
---|---|---|
config | any | The model configuration. |
session | any | The ONNX session containing the encoder weights. |
decoder_merged_session | any | The ONNX session containing the merged decoder weights. |
generation_config | GenerationConfig | The generation configuration. |
The bare BART Model outputting raw hidden-states without any specific head on top.
Kind: static class of models
The BART Model with a language modeling head. Can be used for summarization.
Kind: static class of models
Creates a new instance of the BartForConditionalGeneration
class.
Param | Type | Description |
---|---|---|
config | Object | The configuration object for the Bart model. |
session | Object | The ONNX session used to execute the model. |
decoder_merged_session | Object | The ONNX session used to execute the decoder. |
generation_config | Object | The generation configuration object. |
Bart model with a sequence classification/head on top (a linear layer on top of the pooled output)
Kind: static class of models
Calls the model on new inputs.
Kind: instance method of BartForSequenceClassification
Returns: Promise.<SequenceClassifierOutput>
- An object containing the model’s output logits for sequence classification.
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
The bare MBART Model outputting raw hidden-states without any specific head on top.
Kind: static class of models
The MBART Model with a language modeling head. Can be used for summarization, after fine-tuning the pretrained models.
Kind: static class of models
Creates a new instance of the MBartForConditionalGeneration
class.
Param | Type | Description |
---|---|---|
config | Object | The configuration object for the Bart model. |
session | Object | The ONNX session used to execute the model. |
decoder_merged_session | Object | The ONNX session used to execute the decoder. |
generation_config | Object | The generation configuration object. |
MBart model with a sequence classification/head on top (a linear layer on top of the pooled output).
Kind: static class of models
Calls the model on new inputs.
Kind: instance method of MBartForSequenceClassification
Returns: Promise.<SequenceClassifierOutput>
- An object containing the model’s output logits for sequence classification.
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
Kind: static class of models
Creates a new instance of the MBartForCausalLM
class.
Param | Type | Description |
---|---|---|
config | Object | Configuration object for the model. |
decoder_merged_session | Object | ONNX Session object for the decoder. |
generation_config | Object | Configuration object for the generation process. |
The bare Blenderbot Model outputting raw hidden-states without any specific head on top.
Kind: static class of models
The Blenderbot Model with a language modeling head. Can be used for summarization.
Kind: static class of models
Creates a new instance of the BlenderbotForConditionalGeneration
class.
Param | Type | Description |
---|---|---|
config | any | The model configuration. |
session | any | The ONNX session containing the encoder weights. |
decoder_merged_session | any | The ONNX session containing the merged decoder weights. |
generation_config | GenerationConfig | The generation configuration. |
The bare BlenderbotSmall Model outputting raw hidden-states without any specific head on top.
Kind: static class of models
The BlenderbotSmall Model with a language modeling head. Can be used for summarization.
Kind: static class of models
Creates a new instance of the BlenderbotForConditionalGeneration
class.
Param | Type | Description |
---|---|---|
config | any | The model configuration. |
session | any | The ONNX session containing the encoder weights. |
decoder_merged_session | any | The ONNX session containing the merged decoder weights. |
generation_config | GenerationConfig | The generation configuration. |
RobertaForMaskedLM class for performing masked language modeling on Roberta models.
Kind: static class of models
Calls the model on new inputs.
Kind: instance method of RobertaForMaskedLM
Returns: Promise.<MaskedLMOutput>
- returned object
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
RobertaForSequenceClassification class for performing sequence classification on Roberta models.
Kind: static class of models
Calls the model on new inputs.
Kind: instance method of RobertaForSequenceClassification
Returns: Promise.<SequenceClassifierOutput>
- returned object
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
RobertaForTokenClassification class for performing token classification on Roberta models.
Kind: static class of models
Calls the model on new inputs.
Kind: instance method of RobertaForTokenClassification
Returns: Promise.<TokenClassifierOutput>
- An object containing the model’s output logits for token classification.
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
RobertaForQuestionAnswering class for performing question answering on Roberta models.
Kind: static class of models
Calls the model on new inputs.
Kind: instance method of RobertaForQuestionAnswering
Returns: Promise.<QuestionAnsweringModelOutput>
- returned object
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models.
Kind: static class of models
The bare XLM Model transformer outputting raw hidden-states without any specific head on top.
Kind: static class of models
The XLM Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings).
Kind: static class of models
Calls the model on new inputs.
Kind: instance method of XLMWithLMHeadModel
Returns: Promise.<MaskedLMOutput>
- returned object
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
XLM Model with a sequence classification/regression head on top (a linear layer on top of the pooled output)
Kind: static class of models
Calls the model on new inputs.
Kind: instance method of XLMForSequenceClassification
Returns: Promise.<SequenceClassifierOutput>
- returned object
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
XLM Model with a token classification head on top (a linear layer on top of the hidden-states output)
Kind: static class of models
Calls the model on new inputs.
Kind: instance method of XLMForTokenClassification
Returns: Promise.<TokenClassifierOutput>
- An object containing the model’s output logits for token classification.
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
XLM Model with a span classification head on top for extractive question-answering tasks
Kind: static class of models
Calls the model on new inputs.
Kind: instance method of XLMForQuestionAnswering
Returns: Promise.<QuestionAnsweringModelOutput>
- returned object
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
XLMRobertaForMaskedLM class for performing masked language modeling on XLMRoberta models.
Kind: static class of models
Calls the model on new inputs.
Kind: instance method of XLMRobertaForMaskedLM
Returns: Promise.<MaskedLMOutput>
- returned object
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
XLMRobertaForSequenceClassification class for performing sequence classification on XLMRoberta models.
Kind: static class of models
Calls the model on new inputs.
Kind: instance method of XLMRobertaForSequenceClassification
Returns: Promise.<SequenceClassifierOutput>
- returned object
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
XLMRobertaForTokenClassification class for performing token classification on XLMRoberta models.
Kind: static class of models
Calls the model on new inputs.
Kind: instance method of XLMRobertaForTokenClassification
Returns: Promise.<TokenClassifierOutput>
- An object containing the model’s output logits for token classification.
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
XLMRobertaForQuestionAnswering class for performing question answering on XLMRoberta models.
Kind: static class of models
Calls the model on new inputs.
Kind: instance method of XLMRobertaForQuestionAnswering
Returns: Promise.<QuestionAnsweringModelOutput>
- returned object
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
WhisperModel class for training Whisper models without a language model head.
Kind: static class of models
WhisperForConditionalGeneration class for generating conditional outputs from Whisper models.
Kind: static class of models
Creates a new instance of the WhisperForConditionalGeneration
class.
Param | Type | Description |
---|---|---|
config | Object | Configuration object for the model. |
session | Object | ONNX Session object for the model. |
decoder_merged_session | Object | ONNX Session object for the decoder. |
generation_config | Object | Configuration object for the generation process. |
Generates outputs based on input and generation configuration.
Kind: instance method of WhisperForConditionalGeneration
Returns: Promise.<Object>
- Promise object represents the generated outputs.
Param | Type | Default | Description |
---|---|---|---|
inputs | Object | Input data for the model. | |
generation_config | WhisperGenerationConfig |
| Configuration object for the generation process. |
logits_processor | Object |
| Optional logits processor object. |
Calculates token-level timestamps using the encoder-decoder cross-attentions and dynamic time-warping (DTW) to map each output token to a position in the input audio.
Kind: instance method of WhisperForConditionalGeneration
Returns: Tensor
- tensor containing the timestamps in seconds for each predicted token
Param | Type | Default | Description |
---|---|---|---|
generate_outputs | Object | Outputs generated by the model | |
generate_outputs.cross_attentions | Array.<Array<Array<Tensor>>> | The cross attentions output by the model | |
generate_outputs.decoder_attentions | Array.<Array<Array<Tensor>>> | The decoder attentions output by the model | |
generate_outputs.sequences | Array.<Array<number>> | The sequences output by the model | |
alignment_heads | Array.<Array<number>> | Alignment heads of the model | |
[num_frames] | number |
| Number of frames in the input audio. |
[time_precision] | number | 0.02 | Precision of the timestamps in seconds |
Vision Encoder-Decoder model based on OpenAI’s GPT architecture for image captioning and other vision tasks
Kind: static class of models
Creates a new instance of the VisionEncoderDecoderModel
class.
Param | Type | Description |
---|---|---|
config | Object | The configuration object specifying the hyperparameters and other model settings. |
session | Object | The ONNX session containing the encoder model. |
decoder_merged_session | any | The ONNX session containing the merged decoder model. |
generation_config | Object | Configuration object for the generation process. |
CLIP Text and Vision Model with a projection layers on top
Example: Perform zero-shot image classification with a CLIPModel
.
import { AutoTokenizer, AutoProcessor, CLIPModel, RawImage } from '@xenova/transformers';
// Load tokenizer, processor, and model
let tokenizer = await AutoTokenizer.from_pretrained('Xenova/clip-vit-base-patch16');
let processor = await AutoProcessor.from_pretrained('Xenova/clip-vit-base-patch16');
let model = await CLIPModel.from_pretrained('Xenova/clip-vit-base-patch16');
// Run tokenization
let texts = ['a photo of a car', 'a photo of a football match']
let text_inputs = tokenizer(texts, { padding: true, truncation: true });
// Read image and run processor
let image = await RawImage.read('https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/football-match.jpg');
let image_inputs = await processor(image);
// Run model with both text and pixel inputs
let output = await model({ ...text_inputs, ...image_inputs });
// {
// logits_per_image: Tensor {
// dims: [ 1, 2 ],
// data: Float32Array(2) [ 18.579734802246094, 24.31830596923828 ],
// },
// logits_per_text: Tensor {
// dims: [ 2, 1 ],
// data: Float32Array(2) [ 18.579734802246094, 24.31830596923828 ],
// },
// text_embeds: Tensor {
// dims: [ 2, 512 ],
// data: Float32Array(1024) [ ... ],
// },
// image_embeds: Tensor {
// dims: [ 1, 512 ],
// data: Float32Array(512) [ ... ],
// }
// }
Kind: static class of models
CLIP Text Model with a projection layer on top (a linear layer on top of the pooled output)
Example: Compute text embeddings with CLIPTextModelWithProjection
.
import { AutoTokenizer, CLIPTextModelWithProjection } from '@xenova/transformers';
// Load tokenizer and text model
const tokenizer = await AutoTokenizer.from_pretrained('Xenova/clip-vit-base-patch16');
const text_model = await CLIPTextModelWithProjection.from_pretrained('Xenova/clip-vit-base-patch16');
// Run tokenization
let texts = ['a photo of a car', 'a photo of a football match'];
let text_inputs = tokenizer(texts, { padding: true, truncation: true });
// Compute embeddings
const { text_embeds } = await text_model(text_inputs);
// Tensor {
// dims: [ 2, 512 ],
// type: 'float32',
// data: Float32Array(1024) [ ... ],
// size: 1024
// }
Kind: static class of models
Kind: static method of CLIPTextModelWithProjection
CLIP Vision Model with a projection layer on top (a linear layer on top of the pooled output)
Example: Compute vision embeddings with CLIPVisionModelWithProjection
.
import { AutoProcessor, CLIPVisionModelWithProjection, RawImage} from '@xenova/transformers';
// Load processor and vision model
const processor = await AutoProcessor.from_pretrained('Xenova/clip-vit-base-patch16');
const vision_model = await CLIPVisionModelWithProjection.from_pretrained('Xenova/clip-vit-base-patch16');
// Read image and run processor
let image = await RawImage.read('https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/football-match.jpg');
let image_inputs = await processor(image);
// Compute embeddings
const { image_embeds } = await vision_model(image_inputs);
// Tensor {
// dims: [ 1, 512 ],
// type: 'float32',
// data: Float32Array(512) [ ... ],
// size: 512
// }
Kind: static class of models
Kind: static method of CLIPVisionModelWithProjection
Kind: static class of models
Creates a new instance of the GPT2PreTrainedModel
class.
Param | Type | Description |
---|---|---|
config | Object | The configuration of the model. |
session | any | The ONNX session containing the model weights. |
generation_config | GenerationConfig | The generation configuration. |
GPT-2 language model head on top of the GPT-2 base model. This model is suitable for text generation tasks.
Kind: static class of models
Kind: static class of models
Creates a new instance of the GPTNeoPreTrainedModel
class.
Param | Type | Description |
---|---|---|
config | Object | The configuration of the model. |
session | any | The ONNX session containing the model weights. |
generation_config | GenerationConfig | The generation configuration. |
Kind: static class of models
Creates a new instance of the GPTNeoXPreTrainedModel
class.
Param | Type | Description |
---|---|---|
config | Object | The configuration of the model. |
session | any | The ONNX session containing the model weights. |
generation_config | GenerationConfig | The generation configuration. |
Kind: static class of models
Creates a new instance of the GPTJPreTrainedModel
class.
Param | Type | Description |
---|---|---|
config | Object | The configuration of the model. |
session | any | The ONNX session containing the model weights. |
generation_config | GenerationConfig | The generation configuration. |
Kind: static class of models
Creates a new instance of the GPTBigCodePreTrainedModel
class.
Param | Type | Description |
---|---|---|
config | Object | The configuration of the model. |
session | any | The ONNX session containing the model weights. |
generation_config | GenerationConfig | The generation configuration. |
Kind: static class of models
Creates a new instance of the CodeGenPreTrainedModel
class.
Param | Type | Description |
---|---|---|
config | Object | The model configuration object. |
session | Object | The ONNX session object. |
generation_config | GenerationConfig | The generation configuration. |
CodeGenModel is a class representing a code generation model without a language model head.
Kind: static class of models
CodeGenForCausalLM is a class that represents a code generation model based on the GPT-2 architecture. It extends the CodeGenPreTrainedModel
class.
Kind: static class of models
The bare LLama Model outputting raw hidden-states without any specific head on top.
Kind: static class of models
Creates a new instance of the LlamaPreTrainedModel
class.
Param | Type | Description |
---|---|---|
config | Object | The model configuration object. |
session | Object | The ONNX session object. |
generation_config | GenerationConfig | The generation configuration. |
The bare LLaMA Model outputting raw hidden-states without any specific head on top.
Kind: static class of models
The Bloom Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings).
Kind: static class of models
Creates a new instance of the BloomPreTrainedModel
class.
Param | Type | Description |
---|---|---|
config | Object | The configuration of the model. |
session | any | The ONNX session containing the model weights. |
generation_config | GenerationConfig | The generation configuration. |
The bare Bloom Model transformer outputting raw hidden-states without any specific head on top.
Kind: static class of models
The Bloom Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings).
Kind: static class of models
Kind: static class of models
Creates a new instance of the MptPreTrainedModel
class.
Param | Type | Description |
---|---|---|
config | Object | The model configuration object. |
session | Object | The ONNX session object. |
generation_config | GenerationConfig | The generation configuration. |
The bare Mpt Model transformer outputting raw hidden-states without any specific head on top.
Kind: static class of models
The MPT Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings).
Kind: static class of models
Kind: static class of models
Creates a new instance of the OPTPreTrainedModel
class.
Param | Type | Description |
---|---|---|
config | Object | The model configuration object. |
session | Object | The ONNX session object. |
generation_config | GenerationConfig | The generation configuration. |
The bare OPT Model outputting raw hidden-states without any specific head on top.
Kind: static class of models
The OPT Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings).
Kind: static class of models
Kind: static class of models
Param | Type | Description |
---|---|---|
output | Object | The output of the model. |
output.logits | Tensor | Classification logits (including no-object) for all queries. |
output.pred_boxes | Tensor | Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding possible padding). |
Kind: static class of models
Param | Type | Description |
---|---|---|
output | Object | The output of the model. |
output.logits | Tensor | The output logits of the model. |
output.pred_boxes | Tensor | Predicted boxes. |
output.pred_masks | Tensor | Predicted masks. |
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models.
Kind: static class of models
The bare ResNet model outputting raw features without any specific head on top.
Kind: static class of models
ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet.
Kind: static class of models
Kind: instance method of ResNetForImageClassification
Param | Type |
---|---|
model_inputs | any |
The bare Donut Swin Model transformer outputting raw hidden-states without any specific head on top.
Example: Step-by-step Document Parsing.
import { AutoProcessor, AutoTokenizer, AutoModelForVision2Seq, RawImage } from '@xenova/transformers';
// Choose model to use
const model_id = 'Xenova/donut-base-finetuned-cord-v2';
// Prepare image inputs
const processor = await AutoProcessor.from_pretrained(model_id);
const url = 'https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/receipt.png';
const image = await RawImage.read(url);
const image_inputs = await processor(image);
// Prepare decoder inputs
const tokenizer = await AutoTokenizer.from_pretrained(model_id);
const task_prompt = '<s_cord-v2>';
const decoder_input_ids = tokenizer(task_prompt, {
add_special_tokens: false,
}).input_ids;
// Create the model
const model = await AutoModelForVision2Seq.from_pretrained(model_id);
// Run inference
const output = await model.generate(image_inputs.pixel_values, {
decoder_input_ids,
max_length: model.config.decoder.max_position_embeddings,
});
// Decode output
const decoded = tokenizer.batch_decode(output)[0];
// <s_cord-v2><s_menu><s_nm> CINNAMON SUGAR</s_nm><s_unitprice> 17,000</s_unitprice><s_cnt> 1 x</s_cnt><s_price> 17,000</s_price></s_menu><s_sub_total><s_subtotal_price> 17,000</s_subtotal_price></s_sub_total><s_total><s_total_price> 17,000</s_total_price><s_cashprice> 20,000</s_cashprice><s_changeprice> 3,000</s_changeprice></s_total></s>
Example: Step-by-step Document Visual Question Answering (DocVQA)
import { AutoProcessor, AutoTokenizer, AutoModelForVision2Seq, RawImage } from '@xenova/transformers';
// Choose model to use
const model_id = 'Xenova/donut-base-finetuned-docvqa';
// Prepare image inputs
const processor = await AutoProcessor.from_pretrained(model_id);
const url = 'https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/invoice.png';
const image = await RawImage.read(url);
const image_inputs = await processor(image);
// Prepare decoder inputs
const tokenizer = await AutoTokenizer.from_pretrained(model_id);
const question = 'What is the invoice number?';
const task_prompt = `<s_docvqa><s_question>${question}</s_question><s_answer>`;
const decoder_input_ids = tokenizer(task_prompt, {
add_special_tokens: false,
}).input_ids;
// Create the model
const model = await AutoModelForVision2Seq.from_pretrained(model_id);
// Run inference
const output = await model.generate(image_inputs.pixel_values, {
decoder_input_ids,
max_length: model.config.decoder.max_position_embeddings,
});
// Decode output
const decoded = tokenizer.batch_decode(output)[0];
// <s_docvqa><s_question> What is the invoice number?</s_question><s_answer> us-001</s_answer></s>
Kind: static class of models
Kind: static class of models
Param | Type | Description |
---|---|---|
output | Object | The output of the model. |
output.logits | Tensor | Classification logits (including no-object) for all queries. |
output.pred_boxes | Tensor | Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding possible padding). |
Base class for Segment-Anything model’s output.
Kind: static class of models
Param | Type | Description |
---|---|---|
output | Object | The output of the model. |
output.iou_scores | Tensor | The output logits of the model. |
output.pred_masks | Tensor | Predicted boxes. |
Kind: static class of models
Creates a new instance of the MarianMTModel
class.
Param | Type | Description |
---|---|---|
config | Object | The model configuration object. |
session | Object | The ONNX session object. |
decoder_merged_session | any | |
generation_config | any |
Kind: static class of models
Creates a new instance of the M2M100ForConditionalGeneration
class.
Param | Type | Description |
---|---|---|
config | Object | The model configuration object. |
session | Object | The ONNX session object. |
decoder_merged_session | any | |
generation_config | any |
The bare Wav2Vec2 Model transformer outputting raw hidden-states without any specific head on top.
Example: Load and run an Wav2Vec2Model
for feature extraction.
import { AutoProcessor, AutoModel, read_audio } from '@xenova/transformers';
// Read and preprocess audio
const processor = await AutoProcessor.from_pretrained('Xenova/mms-300m');
const audio = await read_audio('https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/mlk.flac', 16000);
const inputs = await processor(audio);
// Run model with inputs
const model = await AutoModel.from_pretrained('Xenova/mms-300m');
const output = await model(inputs);
// {
// last_hidden_state: Tensor {
// dims: [ 1, 1144, 1024 ],
// type: 'float32',
// data: Float32Array(1171456) [ ... ],
// size: 1171456
// }
// }
Kind: static class of models
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models.
Kind: static class of models
The bare WavLM Model transformer outputting raw hidden-states without any specific head on top.
Example: Load and run an WavLMModel
for feature extraction.
import { AutoProcessor, AutoModel, read_audio } from '@xenova/transformers';
// Read and preprocess audio
const processor = await AutoProcessor.from_pretrained('Xenova/wavlm-base');
const audio = await read_audio('https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/jfk.wav', 16000);
const inputs = await processor(audio);
// Run model with inputs
const model = await AutoModel.from_pretrained('Xenova/wavlm-base');
const output = await model(inputs);
// {
// last_hidden_state: Tensor {
// dims: [ 1, 549, 768 ],
// type: 'float32',
// data: Float32Array(421632) [-0.349443256855011, -0.39341306686401367, 0.022836603224277496, ...],
// size: 421632
// }
// }
Kind: static class of models
WavLM Model with a language modeling
head on top for Connectionist Temporal Classification (CTC).
Kind: static class of models
Kind: instance method of WavLMForCTC
Param | Type | Description |
---|---|---|
model_inputs | Object | |
model_inputs.input_values | Tensor | Float values of input raw speech waveform. |
model_inputs.attention_mask | Tensor | Mask to avoid performing convolution and attention on padding token indices. Mask values selected in [0, 1] |
WavLM Model with a sequence classification head on top (a linear layer over the pooled output).
Kind: static class of models
Calls the model on new inputs.
Kind: instance method of WavLMForSequenceClassification
Returns: Promise.<SequenceClassifierOutput>
- An object containing the model’s output logits for sequence classification.
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models.
Kind: static class of models
The bare SpeechT5 Encoder-Decoder Model outputting raw hidden-states without any specific pre- or post-nets.
Kind: static class of models
SpeechT5 Model with a speech encoder and a text decoder.
Kind: static class of models
SpeechT5 Model with a text encoder and a speech decoder.
Kind: static class of models
Creates a new instance of the SpeechT5ForTextToSpeech
class.
Param | Type | Description |
---|---|---|
config | Object | The model configuration. |
session | any | session for the model. |
decoder_merged_session | any | session for the decoder. |
generation_config | GenerationConfig | The generation configuration. |
Converts a sequence of input tokens into a sequence of mel spectrograms, which are subsequently turned into a speech waveform using a vocoder.
Kind: instance method of SpeechT5ForTextToSpeech
Returns: Promise.<SpeechOutput>
- A promise which resolves to an object containing the spectrogram, waveform, and cross-attention tensors.
Param | Type | Default | Description |
---|---|---|---|
input_values | Tensor | Indices of input sequence tokens in the vocabulary. | |
speaker_embeddings | Tensor | Tensor containing the speaker embeddings. | |
options | Object | Optional parameters for generating speech. | |
[options.threshold] | number | 0.5 | The generated sequence ends when the predicted stop token probability exceeds this value. |
[options.minlenratio] | number | 0.0 | Used to calculate the minimum required length for the output sequence. |
[options.maxlenratio] | number | 20.0 | Used to calculate the maximum allowed length for the output sequence. |
[options.vocoder] | Object |
| The vocoder that converts the mel spectrogram into a speech waveform. If |
[options.output_cross_attentions] | boolean | false | Whether or not to return the attentions tensors of the decoder's cross-attention layers. |
HiFi-GAN vocoder.
Kind: static class of models
Base class of all AutoModels. Contains the from_pretrained
function
which is used to instantiate pretrained models.
Kind: static class of models
.from_pretrained()
: PreTrainedModel.from_pretrained
Mapping from model type to model class.
Kind: instance property of PretrainedMixin
Whether to attempt to instantiate the base class (PretrainedModel
) if
the model type is not found in the mapping.
Kind: instance property of PretrainedMixin
Kind: static method of PretrainedMixin
Helper class which is used to instantiate pretrained models with the from_pretrained
function.
The chosen model class is determined by the type specified in the model config.
Kind: static class of models
Helper class which is used to instantiate pretrained sequence classification models with the from_pretrained
function.
The chosen model class is determined by the type specified in the model config.
Kind: static class of models
Helper class which is used to instantiate pretrained token classification models with the from_pretrained
function.
The chosen model class is determined by the type specified in the model config.
Kind: static class of models
Helper class which is used to instantiate pretrained sequence-to-sequence models with the from_pretrained
function.
The chosen model class is determined by the type specified in the model config.
Kind: static class of models
Helper class which is used to instantiate pretrained sequence-to-sequence speech-to-text models with the from_pretrained
function.
The chosen model class is determined by the type specified in the model config.
Kind: static class of models
Helper class which is used to instantiate pretrained sequence-to-sequence text-to-spectrogram models with the from_pretrained
function.
The chosen model class is determined by the type specified in the model config.
Kind: static class of models
Helper class which is used to instantiate pretrained causal language models with the from_pretrained
function.
The chosen model class is determined by the type specified in the model config.
Kind: static class of models
Helper class which is used to instantiate pretrained masked language models with the from_pretrained
function.
The chosen model class is determined by the type specified in the model config.
Kind: static class of models
Helper class which is used to instantiate pretrained question answering models with the from_pretrained
function.
The chosen model class is determined by the type specified in the model config.
Kind: static class of models
Helper class which is used to instantiate pretrained vision-to-sequence models with the from_pretrained
function.
The chosen model class is determined by the type specified in the model config.
Kind: static class of models
Helper class which is used to instantiate pretrained image classification models with the from_pretrained
function.
The chosen model class is determined by the type specified in the model config.
Kind: static class of models
Helper class which is used to instantiate pretrained image segmentation models with the from_pretrained
function.
The chosen model class is determined by the type specified in the model config.
Kind: static class of models
Helper class which is used to instantiate pretrained object detection models with the from_pretrained
function.
The chosen model class is determined by the type specified in the model config.
Kind: static class of models
Helper class which is used to instantiate pretrained object detection models with the from_pretrained
function.
The chosen model class is determined by the type specified in the model config.
Kind: static class of models
Kind: static class of models
Param | Type | Description |
---|---|---|
output | Object | The output of the model. |
output.logits | Tensor | The output logits of the model. |
output.past_key_values | Tensor | An tensor of key/value pairs that represent the previous state of the model. |
output.encoder_outputs | Tensor | The output of the encoder in a sequence-to-sequence model. |
[output.decoder_attentions] | Tensor | Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. |
[output.cross_attentions] | Tensor | Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. |
Base class for outputs of sentence classification models.
Kind: static class of models
Param | Type | Description |
---|---|---|
output | Object | The output of the model. |
output.logits | Tensor | classification (or regression if config.num_labels==1) scores (before SoftMax). |
Base class for outputs of token classification models.
Kind: static class of models
Param | Type | Description |
---|---|---|
output | Object | The output of the model. |
output.logits | Tensor | Classification scores (before SoftMax). |
Base class for masked language models outputs.
Kind: static class of models
Param | Type | Description |
---|---|---|
output | Object | The output of the model. |
output.logits | Tensor | Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). |
Base class for outputs of question answering models.
Kind: static class of models
Param | Type | Description |
---|---|---|
output | Object | The output of the model. |
output.start_logits | Tensor | Span-start scores (before SoftMax). |
output.end_logits | Tensor | Span-end scores (before SoftMax). |
Base class for causal language model (or autoregressive) outputs.
Kind: static class of models
Param | Type | Description |
---|---|---|
output | Object | The output of the model. |
output.logits | Tensor | Prediction scores of the language modeling head (scores for each vocabulary token before softmax). |
Base class for causal language model (or autoregressive) outputs.
Kind: static class of models
Param | Type | Description |
---|---|---|
output | Object | The output of the model. |
output.logits | Tensor | Prediction scores of the language modeling head (scores for each vocabulary token before softmax). |
output.past_key_values | Tensor | Contains pre-computed hidden-states (key and values in the self-attention blocks)
that can be used (see |
Kind: inner typedef of models
Generates text based on the given inputs and generation configuration using the model.
Kind: inner typedef of models
Returns: Promise.<(Array<Array<number>>|EncoderDecoderOutput|DecoderOutput)>
- An array of generated output sequences, where each sequence is an array of token IDs.
Throws:
Error
Throws an error if the inputs array is empty.Param | Type | Default | Description |
---|---|---|---|
inputs | Tensor | Array | TypedArray | An array of input token IDs. | |
generation_config | Object | GenerationConfig | null | The generation configuration to use. If null, default configuration will be used. | |
logits_processor | Object | null | An optional logits processor to use. If null, a new LogitsProcessorList instance will be created. | |
options | Object | options | |
[options.inputs_attention_mask] | Object |
| An optional attention mask for the inputs. |
Kind: inner typedef of models
Extends: GenerationConfig
Properties
Name | Type | Default | Description |
---|---|---|---|
[return_timestamps] | boolean |
| Whether to return the timestamps with the text. This enables the |
[return_token_timestamps] | boolean |
| Whether to return token-level timestamps
with the text. This can be used with or without the |
[num_frames] | number |
| The number of audio frames available in this chunk. This is only used generating word-level timestamps. |
Kind: inner typedef of models
Properties
Name | Type | Description |
---|---|---|
[spectrogram] | Tensor | The predicted log-mel spectrogram of shape
|
[waveform] | Tensor | The predicted waveform of shape |
[cross_attentions] | Tensor | The outputs of the decoder's cross-attention layers of shape
|