戒酒的李白

Bert's method of processing sentences

  1 +{
  2 + "word_embedding_dimension": 768,
  3 + "pooling_mode_cls_token": false,
  4 + "pooling_mode_mean_tokens": true,
  5 + "pooling_mode_max_tokens": false,
  6 + "pooling_mode_mean_sqrt_len_tokens": false,
  7 + "pooling_mode_weightedmean_tokens": false,
  8 + "pooling_mode_lasttoken": false,
  9 + "include_prompt": true
  10 +}
  1 +{
  2 + "_name_or_path": "./bert_model",
  3 + "architectures": [
  4 + "BertModel"
  5 + ],
  6 + "attention_probs_dropout_prob": 0.1,
  7 + "classifier_dropout": null,
  8 + "directionality": "bidi",
  9 + "hidden_act": "gelu",
  10 + "hidden_dropout_prob": 0.1,
  11 + "hidden_size": 768,
  12 + "initializer_range": 0.02,
  13 + "intermediate_size": 3072,
  14 + "layer_norm_eps": 1e-12,
  15 + "max_position_embeddings": 512,
  16 + "model_type": "bert",
  17 + "num_attention_heads": 12,
  18 + "num_hidden_layers": 12,
  19 + "pad_token_id": 0,
  20 + "pooler_fc_size": 768,
  21 + "pooler_num_attention_heads": 12,
  22 + "pooler_num_fc_layers": 3,
  23 + "pooler_size_per_head": 128,
  24 + "pooler_type": "first_token_transform",
  25 + "position_embedding_type": "absolute",
  26 + "torch_dtype": "float32",
  27 + "transformers_version": "4.37.2",
  28 + "type_vocab_size": 2,
  29 + "use_cache": true,
  30 + "vocab_size": 21128
  31 +}
  1 +{
  2 + "__version__": {
  3 + "sentence_transformers": "3.0.1",
  4 + "transformers": "4.37.2",
  5 + "pytorch": "2.3.0"
  6 + },
  7 + "prompts": {},
  8 + "default_prompt_name": null,
  9 + "similarity_fn_name": null
  10 +}
  1 +[
  2 + {
  3 + "idx": 0,
  4 + "name": "0",
  5 + "path": "",
  6 + "type": "sentence_transformers.models.Transformer"
  7 + },
  8 + {
  9 + "idx": 1,
  10 + "name": "1",
  11 + "path": "1_Pooling",
  12 + "type": "sentence_transformers.models.Pooling"
  13 + }
  14 +]
  1 +sentence_bert_model model parameter file:
  2 +
  3 +Link: https://pan.baidu.com/s/1_5J4N6GGDC1BD5iqF2UJ7g?pwd=kzp0
  4 +
  5 +Access code: kzp0
  1 +{
  2 + "max_seq_length": 512,
  3 + "do_lower_case": false
  4 +}
  1 +{
  2 + "cls_token": "[CLS]",
  3 + "mask_token": "[MASK]",
  4 + "pad_token": "[PAD]",
  5 + "sep_token": "[SEP]",
  6 + "unk_token": "[UNK]"
  7 +}
This diff could not be displayed because it is too large.
  1 +{
  2 + "added_tokens_decoder": {
  3 + "0": {
  4 + "content": "[PAD]",
  5 + "lstrip": false,
  6 + "normalized": false,
  7 + "rstrip": false,
  8 + "single_word": false,
  9 + "special": true
  10 + },
  11 + "100": {
  12 + "content": "[UNK]",
  13 + "lstrip": false,
  14 + "normalized": false,
  15 + "rstrip": false,
  16 + "single_word": false,
  17 + "special": true
  18 + },
  19 + "101": {
  20 + "content": "[CLS]",
  21 + "lstrip": false,
  22 + "normalized": false,
  23 + "rstrip": false,
  24 + "single_word": false,
  25 + "special": true
  26 + },
  27 + "102": {
  28 + "content": "[SEP]",
  29 + "lstrip": false,
  30 + "normalized": false,
  31 + "rstrip": false,
  32 + "single_word": false,
  33 + "special": true
  34 + },
  35 + "103": {
  36 + "content": "[MASK]",
  37 + "lstrip": false,
  38 + "normalized": false,
  39 + "rstrip": false,
  40 + "single_word": false,
  41 + "special": true
  42 + }
  43 + },
  44 + "clean_up_tokenization_spaces": true,
  45 + "cls_token": "[CLS]",
  46 + "do_basic_tokenize": true,
  47 + "do_lower_case": false,
  48 + "mask_token": "[MASK]",
  49 + "model_max_length": 512,
  50 + "never_split": null,
  51 + "pad_token": "[PAD]",
  52 + "sep_token": "[SEP]",
  53 + "strip_accents": null,
  54 + "tokenize_chinese_chars": true,
  55 + "tokenizer_class": "BertTokenizer",
  56 + "unk_token": "[UNK]"
  57 +}
This diff could not be displayed because it is too large.