Second Version
Browse files- config.json +1 -1
 - pytorch_model.bin +1 -1
 - tokenizer_config.json +1 -1
 
    	
        config.json
    CHANGED
    
    | 
         @@ -1,5 +1,5 @@ 
     | 
|
| 1 | 
         
             
            {
         
     | 
| 2 | 
         
            -
              "_name_or_path": " 
     | 
| 3 | 
         
             
              "architectures": [
         
     | 
| 4 | 
         
             
                "VisionEncoderDecoderModel"
         
     | 
| 5 | 
         
             
              ],
         
     | 
| 
         | 
|
| 1 | 
         
             
            {
         
     | 
| 2 | 
         
            +
              "_name_or_path": "shoubhik/TrOCR_finetune",
         
     | 
| 3 | 
         
             
              "architectures": [
         
     | 
| 4 | 
         
             
                "VisionEncoderDecoderModel"
         
     | 
| 5 | 
         
             
              ],
         
     | 
    	
        pytorch_model.bin
    CHANGED
    
    | 
         @@ -1,3 +1,3 @@ 
     | 
|
| 1 | 
         
             
            version https://git-lfs.github.com/spec/v1
         
     | 
| 2 | 
         
            -
            oid sha256: 
     | 
| 3 | 
         
             
            size 1335850753
         
     | 
| 
         | 
|
| 1 | 
         
             
            version https://git-lfs.github.com/spec/v1
         
     | 
| 2 | 
         
            +
            oid sha256:104838ee50b0e5d99965a628086371cf1cc71ce40ed4844018c31b5d1d306540
         
     | 
| 3 | 
         
             
            size 1335850753
         
     | 
    	
        tokenizer_config.json
    CHANGED
    
    | 
         @@ -1 +1 @@ 
     | 
|
| 1 | 
         
            -
            {"errors": "replace", "bos_token": {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "eos_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "sep_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "cls_token": {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "unk_token": {"content": "<unk>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "pad_token": {"content": "<pad>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "add_prefix_space": false, "trim_offsets": true, "model_max_length": 512, "special_tokens_map_file": null, "name_or_path": " 
     | 
| 
         | 
|
| 1 | 
         
            +
            {"errors": "replace", "bos_token": {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "eos_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "sep_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "cls_token": {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "unk_token": {"content": "<unk>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "pad_token": {"content": "<pad>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "add_prefix_space": false, "trim_offsets": true, "model_max_length": 512, "special_tokens_map_file": null, "name_or_path": "shoubhik/TrOCR_finetune", "processor_class": "TrOCRProcessor", "tokenizer_class": "RobertaTokenizer"}
         
     |