update large encoders; slm and update c5 gpt4o
Browse filesThis view is limited to 50 files because it contains too many changes.  
							See raw diff
- runs/api_models/gpt-4o/gpt-4o-2024-11-20-zero-shot-C5-essay_only/evaluation_results.csv +2 -2
 - runs/api_models/gpt-4o/gpt-4o-2024-11-20-zero-shot-C5-essay_only/gpt-4o-2024-11-20-grader-zero-shot-C5-essay_only_inference_results.jsonl +0 -0
 - runs/large_models/bertimbau/{jbcs2025_bertimbau-large-C1-encoder_classification-C1-essay_only → jbcs2025_bert-large-portuguese-cased-encoder_classification-C1-essay_only-encoder_classification-C1-essay_only}/.hydra/config.yaml +2 -2
 - runs/large_models/bertimbau/{jbcs2025_bertimbau-large-C3-encoder_classification-C3-essay_only → jbcs2025_bert-large-portuguese-cased-encoder_classification-C1-essay_only-encoder_classification-C1-essay_only}/.hydra/hydra.yaml +6 -5
 - runs/large_models/bertimbau/jbcs2025_bert-large-portuguese-cased-encoder_classification-C1-essay_only-encoder_classification-C1-essay_only/.hydra/overrides.yaml +1 -0
 - runs/large_models/bertimbau/{jbcs2025_bertimbau-large-C1-encoder_classification-C1-essay_only → jbcs2025_bert-large-portuguese-cased-encoder_classification-C1-essay_only-encoder_classification-C1-essay_only}/bootstrap_confidence_intervals.csv +1 -1
 - runs/large_models/bertimbau/{jbcs2025_bertimbau-large-C1-encoder_classification-C1-essay_only → jbcs2025_bert-large-portuguese-cased-encoder_classification-C1-essay_only-encoder_classification-C1-essay_only}/evaluation_results.csv +2 -2
 - runs/large_models/bertimbau/{jbcs2025_bertimbau-large-C1-encoder_classification-C1-essay_only/jbcs2025_bertimbau-large-C1-encoder_classification-C1-essay_only_inference_results.jsonl → jbcs2025_bert-large-portuguese-cased-encoder_classification-C1-essay_only-encoder_classification-C1-essay_only/jbcs2025_bert-large-portuguese-cased-encoder_classification-C1-essay_only-encoder_classification-C1-essay_only_inference_results.jsonl} +0 -0
 - runs/large_models/bertimbau/{jbcs2025_bertimbau-large-C1-encoder_classification-C1-essay_only → jbcs2025_bert-large-portuguese-cased-encoder_classification-C1-essay_only-encoder_classification-C1-essay_only}/run_inference_experiment.log +105 -51
 - runs/large_models/bertimbau/{jbcs2025_bertimbau-large-C2-encoder_classification-C2-essay_only → jbcs2025_bert-large-portuguese-cased-encoder_classification-C2-essay_only-encoder_classification-C2-essay_only}/.hydra/config.yaml +2 -2
 - runs/large_models/bertimbau/{jbcs2025_bertimbau-large-C2-encoder_classification-C2-essay_only → jbcs2025_bert-large-portuguese-cased-encoder_classification-C2-essay_only-encoder_classification-C2-essay_only}/.hydra/hydra.yaml +6 -5
 - runs/large_models/bertimbau/jbcs2025_bert-large-portuguese-cased-encoder_classification-C2-essay_only-encoder_classification-C2-essay_only/.hydra/overrides.yaml +1 -0
 - runs/large_models/bertimbau/{jbcs2025_bertimbau-large-C4-encoder_classification-C4-essay_only → jbcs2025_bert-large-portuguese-cased-encoder_classification-C2-essay_only-encoder_classification-C2-essay_only}/bootstrap_confidence_intervals.csv +1 -1
 - runs/large_models/bertimbau/jbcs2025_bert-large-portuguese-cased-encoder_classification-C2-essay_only-encoder_classification-C2-essay_only/evaluation_results.csv +2 -0
 - runs/large_models/bertimbau/{jbcs2025_bertimbau-large-C2-encoder_classification-C2-essay_only/jbcs2025_bertimbau-large-C2-encoder_classification-C2-essay_only_inference_results.jsonl → jbcs2025_bert-large-portuguese-cased-encoder_classification-C2-essay_only-encoder_classification-C2-essay_only/jbcs2025_bert-large-portuguese-cased-encoder_classification-C2-essay_only-encoder_classification-C2-essay_only_inference_results.jsonl} +0 -0
 - runs/large_models/bertimbau/{jbcs2025_bertimbau-large-C2-encoder_classification-C2-essay_only → jbcs2025_bert-large-portuguese-cased-encoder_classification-C2-essay_only-encoder_classification-C2-essay_only}/run_inference_experiment.log +105 -51
 - runs/large_models/bertimbau/{jbcs2025_bertimbau-large-C3-encoder_classification-C3-essay_only → jbcs2025_bert-large-portuguese-cased-encoder_classification-C3-essay_only-encoder_classification-C3-essay_only}/.hydra/config.yaml +2 -2
 - runs/large_models/bertimbau/{jbcs2025_bertimbau-large-C1-encoder_classification-C1-essay_only → jbcs2025_bert-large-portuguese-cased-encoder_classification-C3-essay_only-encoder_classification-C3-essay_only}/.hydra/hydra.yaml +6 -5
 - runs/large_models/bertimbau/jbcs2025_bert-large-portuguese-cased-encoder_classification-C3-essay_only-encoder_classification-C3-essay_only/.hydra/overrides.yaml +1 -0
 - runs/large_models/bertimbau/jbcs2025_bert-large-portuguese-cased-encoder_classification-C3-essay_only-encoder_classification-C3-essay_only/bootstrap_confidence_intervals.csv +2 -0
 - runs/large_models/bertimbau/{jbcs2025_bertimbau-large-C2-encoder_classification-C2-essay_only → jbcs2025_bert-large-portuguese-cased-encoder_classification-C3-essay_only-encoder_classification-C3-essay_only}/evaluation_results.csv +2 -2
 - runs/large_models/bertimbau/{jbcs2025_bertimbau-large-C3-encoder_classification-C3-essay_only/jbcs2025_bertimbau-large-C3-encoder_classification-C3-essay_only_inference_results.jsonl → jbcs2025_bert-large-portuguese-cased-encoder_classification-C3-essay_only-encoder_classification-C3-essay_only/jbcs2025_bert-large-portuguese-cased-encoder_classification-C3-essay_only-encoder_classification-C3-essay_only_inference_results.jsonl} +0 -0
 - runs/large_models/bertimbau/{jbcs2025_bertimbau-large-C3-encoder_classification-C3-essay_only → jbcs2025_bert-large-portuguese-cased-encoder_classification-C3-essay_only-encoder_classification-C3-essay_only}/run_inference_experiment.log +105 -51
 - runs/large_models/bertimbau/{jbcs2025_bertimbau-large-C4-encoder_classification-C4-essay_only → jbcs2025_bert-large-portuguese-cased-encoder_classification-C4-essay_only-encoder_classification-C4-essay_only}/.hydra/config.yaml +2 -2
 - runs/large_models/bertimbau/{jbcs2025_bertimbau-large-C4-encoder_classification-C4-essay_only → jbcs2025_bert-large-portuguese-cased-encoder_classification-C4-essay_only-encoder_classification-C4-essay_only}/.hydra/hydra.yaml +6 -5
 - runs/large_models/bertimbau/jbcs2025_bert-large-portuguese-cased-encoder_classification-C4-essay_only-encoder_classification-C4-essay_only/.hydra/overrides.yaml +1 -0
 - runs/large_models/bertimbau/{jbcs2025_bertimbau-large-C2-encoder_classification-C2-essay_only → jbcs2025_bert-large-portuguese-cased-encoder_classification-C4-essay_only-encoder_classification-C4-essay_only}/bootstrap_confidence_intervals.csv +1 -1
 - runs/large_models/bertimbau/{jbcs2025_bertimbau-large-C3-encoder_classification-C3-essay_only → jbcs2025_bert-large-portuguese-cased-encoder_classification-C4-essay_only-encoder_classification-C4-essay_only}/evaluation_results.csv +2 -2
 - runs/large_models/bertimbau/{jbcs2025_bertimbau-large-C4-encoder_classification-C4-essay_only/jbcs2025_bertimbau-large-C4-encoder_classification-C4-essay_only_inference_results.jsonl → jbcs2025_bert-large-portuguese-cased-encoder_classification-C4-essay_only-encoder_classification-C4-essay_only/jbcs2025_bert-large-portuguese-cased-encoder_classification-C4-essay_only-encoder_classification-C4-essay_only_inference_results.jsonl} +0 -0
 - runs/large_models/bertimbau/{jbcs2025_bertimbau-large-C4-encoder_classification-C4-essay_only → jbcs2025_bert-large-portuguese-cased-encoder_classification-C4-essay_only-encoder_classification-C4-essay_only}/run_inference_experiment.log +105 -51
 - runs/large_models/bertimbau/{jbcs2025_bertimbau-large-C5-encoder_classification-C5-essay_only → jbcs2025_bert-large-portuguese-cased-encoder_classification-C5-essay_only-encoder_classification-C5-essay_only}/.hydra/config.yaml +2 -2
 - runs/large_models/bertimbau/jbcs2025_bert-large-portuguese-cased-encoder_classification-C5-essay_only-encoder_classification-C5-essay_only/.hydra/hydra.yaml +157 -0
 - runs/large_models/bertimbau/jbcs2025_bert-large-portuguese-cased-encoder_classification-C5-essay_only-encoder_classification-C5-essay_only/.hydra/overrides.yaml +1 -0
 - runs/large_models/bertimbau/{jbcs2025_bertimbau-large-C3-encoder_classification-C3-essay_only → jbcs2025_bert-large-portuguese-cased-encoder_classification-C5-essay_only-encoder_classification-C5-essay_only}/bootstrap_confidence_intervals.csv +1 -1
 - runs/large_models/bertimbau/{jbcs2025_bertimbau-large-C4-encoder_classification-C4-essay_only → jbcs2025_bert-large-portuguese-cased-encoder_classification-C5-essay_only-encoder_classification-C5-essay_only}/evaluation_results.csv +2 -2
 - runs/large_models/bertimbau/{jbcs2025_bertimbau-large-C5-encoder_classification-C5-essay_only/jbcs2025_bertimbau-large-C5-encoder_classification-C5-essay_only_inference_results.jsonl → jbcs2025_bert-large-portuguese-cased-encoder_classification-C5-essay_only-encoder_classification-C5-essay_only/jbcs2025_bert-large-portuguese-cased-encoder_classification-C5-essay_only-encoder_classification-C5-essay_only_inference_results.jsonl} +0 -0
 - runs/large_models/bertimbau/{jbcs2025_bertimbau-large-C5-encoder_classification-C5-essay_only → jbcs2025_bert-large-portuguese-cased-encoder_classification-C5-essay_only-encoder_classification-C5-essay_only}/run_inference_experiment.log +105 -51
 - runs/large_models/bertimbau/jbcs2025_bertimbau-large-C1-encoder_classification-C1-essay_only/.hydra/overrides.yaml +0 -1
 - runs/large_models/bertimbau/jbcs2025_bertimbau-large-C2-encoder_classification-C2-essay_only/.hydra/overrides.yaml +0 -1
 - runs/large_models/bertimbau/jbcs2025_bertimbau-large-C3-encoder_classification-C3-essay_only/.hydra/overrides.yaml +0 -1
 - runs/large_models/bertimbau/jbcs2025_bertimbau-large-C4-encoder_classification-C4-essay_only/.hydra/overrides.yaml +0 -1
 - runs/large_models/bertimbau/jbcs2025_bertimbau-large-C5-encoder_classification-C5-essay_only/.hydra/hydra.yaml +0 -156
 - runs/large_models/bertimbau/jbcs2025_bertimbau-large-C5-encoder_classification-C5-essay_only/.hydra/overrides.yaml +0 -1
 - runs/large_models/bertimbau/jbcs2025_bertimbau-large-C5-encoder_classification-C5-essay_only/bootstrap_confidence_intervals.csv +0 -2
 - runs/large_models/bertimbau/jbcs2025_bertimbau-large-C5-encoder_classification-C5-essay_only/evaluation_results.csv +0 -2
 - runs/slm_decoder_models/llama-3.1-8b/jbcs2025_Llama-3.1-8B-llama31_classification_lora-C1-essay_only-r16-llama31_classification_lora-C1-essay_only-r16/.hydra/hydra.yaml +3 -3
 - runs/slm_decoder_models/llama-3.1-8b/jbcs2025_Llama-3.1-8B-llama31_classification_lora-C1-essay_only-r16-llama31_classification_lora-C1-essay_only-r16/bootstrap_confidence_intervals.csv +1 -1
 - runs/slm_decoder_models/llama-3.1-8b/jbcs2025_Llama-3.1-8B-llama31_classification_lora-C1-essay_only-r16-llama31_classification_lora-C1-essay_only-r16/evaluation_results.csv +1 -1
 - runs/slm_decoder_models/llama-3.1-8b/jbcs2025_Llama-3.1-8B-llama31_classification_lora-C1-essay_only-r16-llama31_classification_lora-C1-essay_only-r16/jbcs2025_Llama-3.1-8B-llama31_classification_lora-C1-essay_only-r16-llama31_classification_lora-C1-essay_only-r16_inference_results.jsonl +0 -0
 - runs/slm_decoder_models/llama-3.1-8b/jbcs2025_Llama-3.1-8B-llama31_classification_lora-C1-essay_only-r16-llama31_classification_lora-C1-essay_only-r16/run_inference_experiment.log +68 -45
 
    	
        runs/api_models/gpt-4o/gpt-4o-2024-11-20-zero-shot-C5-essay_only/evaluation_results.csv
    CHANGED
    
    | 
         @@ -1,2 +1,2 @@ 
     | 
|
| 1 | 
         
            -
            accuracy,RMSE,QWK,HDIV,Macro_F1,Micro_F1,Weighted_F1,TP_0,TN_0,FP_0,FN_0,TP_1,TN_1,FP_1,FN_1,TP_2,TN_2,FP_2,FN_2,TP_3,TN_3,FP_3,FN_3,TP_4,TN_4,FP_4,FN_4,TP_5,TN_5,FP_5,FN_5,timestamp,id
         
     | 
| 2 | 
         
            -
            0.3188405797101449,60.048289746247356,0.5487540742298391,0.07971014492753625,0.2921402969790066,0.3188405797101449,0.29828469022017406,16,112,4,6,13,81,25,19,3,96,18,21,10,92,21,15,0,106,0,32,2,109,26,1,2025-07-02 21:08:07,gpt-4o-2024-11-20-zero-shot-C5-essay_only
         
     | 
| 
         | 
|
| 1 | 
         
            +
            accuracy,RMSE,QWK,HDIV,Macro_F1,Micro_F1,Weighted_F1,TP_0,TN_0,FP_0,FN_0,TP_1,TN_1,FP_1,FN_1,TP_2,TN_2,FP_2,FN_2,TP_3,TN_3,FP_3,FN_3,TP_4,TN_4,FP_4,FN_4,TP_5,TN_5,FP_5,FN_5,timestamp,id
         
     | 
| 2 | 
         
            +
            0.3188405797101449,60.048289746247356,0.5487540742298391,0.07971014492753625,0.2921402969790066,0.3188405797101449,0.29828469022017406,16,112,4,6,13,81,25,19,3,96,18,21,10,92,21,15,0,106,0,32,2,109,26,1,2025-07-02 21:08:07,gpt-4o-2024-11-20-zero-shot-C5-essay_only
         
     | 
    	
        runs/api_models/gpt-4o/gpt-4o-2024-11-20-zero-shot-C5-essay_only/gpt-4o-2024-11-20-grader-zero-shot-C5-essay_only_inference_results.jsonl
    DELETED
    
    | 
         The diff for this file is too large to render. 
		See raw diff 
     | 
| 
         | 
    	
        runs/large_models/bertimbau/{jbcs2025_bertimbau-large-C1-encoder_classification-C1-essay_only → jbcs2025_bert-large-portuguese-cased-encoder_classification-C1-essay_only-encoder_classification-C1-essay_only}/.hydra/config.yaml
    RENAMED
    
    | 
         @@ -20,12 +20,12 @@ post_training_results: 
     | 
|
| 20 | 
         
             
              model_path: /workspace/jbcs2025/outputs/2025-03-24/20-42-59
         
     | 
| 21 | 
         
             
            experiments:
         
     | 
| 22 | 
         
             
              model:
         
     | 
| 23 | 
         
            -
                name: kamel-usp/ 
     | 
| 24 | 
         
             
                type: encoder_classification
         
     | 
| 25 | 
         
             
                num_labels: 6
         
     | 
| 26 | 
         
             
                output_dir: ./results/bertimbau_large/C1
         
     | 
| 27 | 
         
             
                logging_dir: ./logs/bertimbau_large/C1
         
     | 
| 28 | 
         
            -
                best_model_dir:  
     | 
| 29 | 
         
             
              tokenizer:
         
     | 
| 30 | 
         
             
                name: neuralmind/bert-large-portuguese-cased
         
     | 
| 31 | 
         
             
              dataset:
         
     | 
| 
         | 
|
| 20 | 
         
             
              model_path: /workspace/jbcs2025/outputs/2025-03-24/20-42-59
         
     | 
| 21 | 
         
             
            experiments:
         
     | 
| 22 | 
         
             
              model:
         
     | 
| 23 | 
         
            +
                name: kamel-usp/jbcs2025_bert-large-portuguese-cased-encoder_classification-C1-essay_only
         
     | 
| 24 | 
         
             
                type: encoder_classification
         
     | 
| 25 | 
         
             
                num_labels: 6
         
     | 
| 26 | 
         
             
                output_dir: ./results/bertimbau_large/C1
         
     | 
| 27 | 
         
             
                logging_dir: ./logs/bertimbau_large/C1
         
     | 
| 28 | 
         
            +
                best_model_dir: kamel-usp/jbcs2025_bert-large-portuguese-cased-encoder_classification-C1-essay_only
         
     | 
| 29 | 
         
             
              tokenizer:
         
     | 
| 30 | 
         
             
                name: neuralmind/bert-large-portuguese-cased
         
     | 
| 31 | 
         
             
              dataset:
         
     | 
    	
        runs/large_models/bertimbau/{jbcs2025_bertimbau-large-C3-encoder_classification-C3-essay_only → jbcs2025_bert-large-portuguese-cased-encoder_classification-C1-essay_only-encoder_classification-C1-essay_only}/.hydra/hydra.yaml
    RENAMED
    
    | 
         @@ -1,6 +1,6 @@ 
     | 
|
| 1 | 
         
             
            hydra:
         
     | 
| 2 | 
         
             
              run:
         
     | 
| 3 | 
         
            -
                dir:  
     | 
| 4 | 
         
             
              sweep:
         
     | 
| 5 | 
         
             
                dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S}
         
     | 
| 6 | 
         
             
                subdir: ${hydra.job.num}
         
     | 
| 
         @@ -110,13 +110,14 @@ hydra: 
     | 
|
| 110 | 
         
             
              output_subdir: .hydra
         
     | 
| 111 | 
         
             
              overrides:
         
     | 
| 112 | 
         
             
                hydra:
         
     | 
| 
         | 
|
| 113 | 
         
             
                - hydra.mode=RUN
         
     | 
| 114 | 
         
             
                task:
         
     | 
| 115 | 
         
            -
                - experiments= 
     | 
| 116 | 
         
             
              job:
         
     | 
| 117 | 
         
             
                name: run_inference_experiment
         
     | 
| 118 | 
         
             
                chdir: null
         
     | 
| 119 | 
         
            -
                override_dirname: experiments= 
     | 
| 120 | 
         
             
                id: ???
         
     | 
| 121 | 
         
             
                num: ???
         
     | 
| 122 | 
         
             
                config_name: config
         
     | 
| 
         @@ -141,9 +142,9 @@ hydra: 
     | 
|
| 141 | 
         
             
                - path: ''
         
     | 
| 142 | 
         
             
                  schema: structured
         
     | 
| 143 | 
         
             
                  provider: schema
         
     | 
| 144 | 
         
            -
                output_dir: /workspace/jbcs2025/ 
     | 
| 145 | 
         
             
                choices:
         
     | 
| 146 | 
         
            -
                  experiments:  
     | 
| 147 | 
         
             
                  hydra/env: default
         
     | 
| 148 | 
         
             
                  hydra/callbacks: null
         
     | 
| 149 | 
         
             
                  hydra/job_logging: default
         
     | 
| 
         | 
|
| 1 | 
         
             
            hydra:
         
     | 
| 2 | 
         
             
              run:
         
     | 
| 3 | 
         
            +
                dir: inference_output/2025-07-10/01-10-43
         
     | 
| 4 | 
         
             
              sweep:
         
     | 
| 5 | 
         
             
                dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S}
         
     | 
| 6 | 
         
             
                subdir: ${hydra.job.num}
         
     | 
| 
         | 
|
| 110 | 
         
             
              output_subdir: .hydra
         
     | 
| 111 | 
         
             
              overrides:
         
     | 
| 112 | 
         
             
                hydra:
         
     | 
| 113 | 
         
            +
                - hydra.run.dir=inference_output/2025-07-10/01-10-43
         
     | 
| 114 | 
         
             
                - hydra.mode=RUN
         
     | 
| 115 | 
         
             
                task:
         
     | 
| 116 | 
         
            +
                - experiments=temp_inference/kamel-usp_jbcs2025_bert-large-portuguese-cased-encoder_classification-C1-essay_only
         
     | 
| 117 | 
         
             
              job:
         
     | 
| 118 | 
         
             
                name: run_inference_experiment
         
     | 
| 119 | 
         
             
                chdir: null
         
     | 
| 120 | 
         
            +
                override_dirname: experiments=temp_inference/kamel-usp_jbcs2025_bert-large-portuguese-cased-encoder_classification-C1-essay_only
         
     | 
| 121 | 
         
             
                id: ???
         
     | 
| 122 | 
         
             
                num: ???
         
     | 
| 123 | 
         
             
                config_name: config
         
     | 
| 
         | 
|
| 142 | 
         
             
                - path: ''
         
     | 
| 143 | 
         
             
                  schema: structured
         
     | 
| 144 | 
         
             
                  provider: schema
         
     | 
| 145 | 
         
            +
                output_dir: /workspace/jbcs2025/inference_output/2025-07-10/01-10-43
         
     | 
| 146 | 
         
             
                choices:
         
     | 
| 147 | 
         
            +
                  experiments: temp_inference/kamel-usp_jbcs2025_bert-large-portuguese-cased-encoder_classification-C1-essay_only
         
     | 
| 148 | 
         
             
                  hydra/env: default
         
     | 
| 149 | 
         
             
                  hydra/callbacks: null
         
     | 
| 150 | 
         
             
                  hydra/job_logging: default
         
     | 
    	
        runs/large_models/bertimbau/jbcs2025_bert-large-portuguese-cased-encoder_classification-C1-essay_only-encoder_classification-C1-essay_only/.hydra/overrides.yaml
    ADDED
    
    | 
         @@ -0,0 +1 @@ 
     | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            - experiments=temp_inference/kamel-usp_jbcs2025_bert-large-portuguese-cased-encoder_classification-C1-essay_only
         
     | 
    	
        runs/large_models/bertimbau/{jbcs2025_bertimbau-large-C1-encoder_classification-C1-essay_only → jbcs2025_bert-large-portuguese-cased-encoder_classification-C1-essay_only-encoder_classification-C1-essay_only}/bootstrap_confidence_intervals.csv
    RENAMED
    
    | 
         @@ -1,2 +1,2 @@ 
     | 
|
| 1 | 
         
             
            experiment_id,timestamp,QWK_mean,QWK_lower_95ci,QWK_upper_95ci,QWK_ci_width,Macro_F1_mean,Macro_F1_lower_95ci,Macro_F1_upper_95ci,Macro_F1_ci_width,Weighted_F1_mean,Weighted_F1_lower_95ci,Weighted_F1_upper_95ci,Weighted_F1_ci_width
         
     | 
| 2 | 
         
            -
             
     | 
| 
         | 
|
| 1 | 
         
             
            experiment_id,timestamp,QWK_mean,QWK_lower_95ci,QWK_upper_95ci,QWK_ci_width,Macro_F1_mean,Macro_F1_lower_95ci,Macro_F1_upper_95ci,Macro_F1_ci_width,Weighted_F1_mean,Weighted_F1_lower_95ci,Weighted_F1_upper_95ci,Weighted_F1_ci_width
         
     | 
| 2 | 
         
            +
            jbcs2025_bert-large-portuguese-cased-encoder_classification-C1-essay_only-encoder_classification-C1-essay_only,2025-07-10 01:10:49,0.6822787596623211,0.586354115876401,0.7722799106018048,0.18592579472540383,0.4971683868103125,0.37489665684586737,0.6515710555984839,0.27667439875261657,0.6920329870783886,0.612322638598863,0.767705078861486,0.15538244026262293
         
     | 
    	
        runs/large_models/bertimbau/{jbcs2025_bertimbau-large-C1-encoder_classification-C1-essay_only → jbcs2025_bert-large-portuguese-cased-encoder_classification-C1-essay_only-encoder_classification-C1-essay_only}/evaluation_results.csv
    RENAMED
    
    | 
         @@ -1,2 +1,2 @@ 
     | 
|
| 1 | 
         
            -
            accuracy,RMSE,QWK,HDIV,Macro_F1,Micro_F1,Weighted_F1,TP_0,TN_0,FP_0,FN_0,TP_1,TN_1,FP_1,FN_1,TP_2,TN_2,FP_2,FN_2,TP_3,TN_3,FP_3,FN_3,TP_4,TN_4,FP_4,FN_4,TP_5,TN_5,FP_5,FN_5,timestamp,id
         
     | 
| 2 | 
         
            -
            0. 
     | 
| 
         | 
|
| 1 | 
         
            +
            accuracy,RMSE,QWK,HDIV,Macro_F1,Micro_F1,Weighted_F1,TP_0,TN_0,FP_0,FN_0,TP_1,TN_1,FP_1,FN_1,TP_2,TN_2,FP_2,FN_2,TP_3,TN_3,FP_3,FN_3,TP_4,TN_4,FP_4,FN_4,TP_5,TN_5,FP_5,FN_5,timestamp,id
         
     | 
| 2 | 
         
            +
            0.6884057971014492,25.02172968684897,0.6825694326120293,0.007246376811594235,0.4609419962901689,0.6884057971014492,0.6910750081919986,0,137,0,1,0,138,0,0,4,122,6,6,46,62,10,20,40,69,18,11,5,119,9,5,2025-07-10 01:10:49,jbcs2025_bert-large-portuguese-cased-encoder_classification-C1-essay_only-encoder_classification-C1-essay_only
         
     | 
    	
        runs/large_models/bertimbau/{jbcs2025_bertimbau-large-C1-encoder_classification-C1-essay_only/jbcs2025_bertimbau-large-C1-encoder_classification-C1-essay_only_inference_results.jsonl → jbcs2025_bert-large-portuguese-cased-encoder_classification-C1-essay_only-encoder_classification-C1-essay_only/jbcs2025_bert-large-portuguese-cased-encoder_classification-C1-essay_only-encoder_classification-C1-essay_only_inference_results.jsonl}
    RENAMED
    
    | 
         The diff for this file is too large to render. 
		See raw diff 
     | 
| 
         | 
    	
        runs/large_models/bertimbau/{jbcs2025_bertimbau-large-C1-encoder_classification-C1-essay_only → jbcs2025_bert-large-portuguese-cased-encoder_classification-C1-essay_only-encoder_classification-C1-essay_only}/run_inference_experiment.log
    RENAMED
    
    | 
         @@ -1,5 +1,5 @@ 
     | 
|
| 1 | 
         
            -
            [2025-07-01 
     | 
| 2 | 
         
            -
            [2025-07-01 
     | 
| 3 | 
         
             
            dataset:
         
     | 
| 4 | 
         
             
              name: kamel-usp/aes_enem_dataset
         
     | 
| 5 | 
         
             
              split: JBCS2025
         
     | 
| 
         @@ -21,12 +21,12 @@ post_training_results: 
     | 
|
| 21 | 
         
             
              model_path: /workspace/jbcs2025/outputs/2025-03-24/20-42-59
         
     | 
| 22 | 
         
             
            experiments:
         
     | 
| 23 | 
         
             
              model:
         
     | 
| 24 | 
         
            -
                name: kamel-usp/ 
     | 
| 25 | 
         
             
                type: encoder_classification
         
     | 
| 26 | 
         
             
                num_labels: 6
         
     | 
| 27 | 
         
             
                output_dir: ./results/bertimbau_large/C1
         
     | 
| 28 | 
         
             
                logging_dir: ./logs/bertimbau_large/C1
         
     | 
| 29 | 
         
            -
                best_model_dir:  
     | 
| 30 | 
         
             
              tokenizer:
         
     | 
| 31 | 
         
             
                name: neuralmind/bert-large-portuguese-cased
         
     | 
| 32 | 
         
             
              dataset:
         
     | 
| 
         @@ -41,9 +41,9 @@ experiments: 
     | 
|
| 41 | 
         
             
                gradient_accumulation_steps: 1
         
     | 
| 42 | 
         
             
                gradient_checkpointing: false
         
     | 
| 43 | 
         | 
| 44 | 
         
            -
            [2025-07-01 
     | 
| 45 | 
         
            -
            [2025-07-01 
     | 
| 46 | 
         
            -
            [2025-07-01 
     | 
| 47 | 
         
             
              "architectures": [
         
     | 
| 48 | 
         
             
                "BertForMaskedLM"
         
     | 
| 49 | 
         
             
              ],
         
     | 
| 
         @@ -68,20 +68,14 @@ experiments: 
     | 
|
| 68 | 
         
             
              "pooler_size_per_head": 128,
         
     | 
| 69 | 
         
             
              "pooler_type": "first_token_transform",
         
     | 
| 70 | 
         
             
              "position_embedding_type": "absolute",
         
     | 
| 71 | 
         
            -
              "transformers_version": "4.53. 
     | 
| 72 | 
         
             
              "type_vocab_size": 2,
         
     | 
| 73 | 
         
             
              "use_cache": true,
         
     | 
| 74 | 
         
             
              "vocab_size": 29794
         
     | 
| 75 | 
         
             
            }
         
     | 
| 76 | 
         | 
| 77 | 
         
            -
            [2025-07-01 
     | 
| 78 | 
         
            -
            [2025-07-01 
     | 
| 79 | 
         
            -
            [2025-07-01 00:02:10,698][transformers.tokenization_utils_base][INFO] - loading file added_tokens.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/added_tokens.json
         
     | 
| 80 | 
         
            -
            [2025-07-01 00:02:10,698][transformers.tokenization_utils_base][INFO] - loading file special_tokens_map.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/special_tokens_map.json
         
     | 
| 81 | 
         
            -
            [2025-07-01 00:02:10,698][transformers.tokenization_utils_base][INFO] - loading file tokenizer_config.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/tokenizer_config.json
         
     | 
| 82 | 
         
            -
            [2025-07-01 00:02:10,698][transformers.tokenization_utils_base][INFO] - loading file chat_template.jinja from cache at None
         
     | 
| 83 | 
         
            -
            [2025-07-01 00:02:10,698][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/config.json
         
     | 
| 84 | 
         
            -
            [2025-07-01 00:02:10,699][transformers.configuration_utils][INFO] - Model config BertConfig {
         
     | 
| 85 | 
         
             
              "architectures": [
         
     | 
| 86 | 
         
             
                "BertForMaskedLM"
         
     | 
| 87 | 
         
             
              ],
         
     | 
| 
         @@ -106,14 +100,20 @@ experiments: 
     | 
|
| 106 | 
         
             
              "pooler_size_per_head": 128,
         
     | 
| 107 | 
         
             
              "pooler_type": "first_token_transform",
         
     | 
| 108 | 
         
             
              "position_embedding_type": "absolute",
         
     | 
| 109 | 
         
            -
              "transformers_version": "4.53. 
     | 
| 110 | 
         
             
              "type_vocab_size": 2,
         
     | 
| 111 | 
         
             
              "use_cache": true,
         
     | 
| 112 | 
         
             
              "vocab_size": 29794
         
     | 
| 113 | 
         
             
            }
         
     | 
| 114 | 
         | 
| 115 | 
         
            -
            [2025-07-01 
     | 
| 116 | 
         
            -
            [2025-07-01 
     | 
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 117 | 
         
             
              "architectures": [
         
     | 
| 118 | 
         
             
                "BertForMaskedLM"
         
     | 
| 119 | 
         
             
              ],
         
     | 
| 
         @@ -138,18 +138,73 @@ experiments: 
     | 
|
| 138 | 
         
             
              "pooler_size_per_head": 128,
         
     | 
| 139 | 
         
             
              "pooler_type": "first_token_transform",
         
     | 
| 140 | 
         
             
              "position_embedding_type": "absolute",
         
     | 
| 141 | 
         
            -
              "transformers_version": "4.53. 
     | 
| 142 | 
         
             
              "type_vocab_size": 2,
         
     | 
| 143 | 
         
             
              "use_cache": true,
         
     | 
| 144 | 
         
             
              "vocab_size": 29794
         
     | 
| 145 | 
         
             
            }
         
     | 
| 146 | 
         | 
| 147 | 
         
            -
            [2025-07-01 
     | 
| 148 | 
         
            -
            [2025-07-01 
     | 
| 149 | 
         
            -
             
     | 
| 150 | 
         
            -
             
     | 
| 151 | 
         
            -
             
     | 
| 152 | 
         
            -
             
     | 
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 153 | 
         
             
              "architectures": [
         
     | 
| 154 | 
         
             
                "BertForSequenceClassification"
         
     | 
| 155 | 
         
             
              ],
         
     | 
| 
         @@ -190,37 +245,36 @@ experiments: 
     | 
|
| 190 | 
         
             
              "pooler_size_per_head": 128,
         
     | 
| 191 | 
         
             
              "pooler_type": "first_token_transform",
         
     | 
| 192 | 
         
             
              "position_embedding_type": "absolute",
         
     | 
| 193 | 
         
            -
              "problem_type": "single_label_classification",
         
     | 
| 194 | 
         
             
              "torch_dtype": "float32",
         
     | 
| 195 | 
         
            -
              "transformers_version": "4.53. 
     | 
| 196 | 
         
             
              "type_vocab_size": 2,
         
     | 
| 197 | 
         
             
              "use_cache": true,
         
     | 
| 198 | 
         
             
              "vocab_size": 29794
         
     | 
| 199 | 
         
             
            }
         
     | 
| 200 | 
         | 
| 201 | 
         
            -
            [2025-07-01 
     | 
| 202 | 
         
            -
            [2025-07-01 
     | 
| 203 | 
         
            -
            [2025-07-01 
     | 
| 204 | 
         
            -
            [2025-07-01 
     | 
| 205 | 
         | 
| 206 | 
         
            -
            [2025-07-01 
     | 
| 207 | 
         
             
            If your task is similar to the task the model of the checkpoint was trained on, you can already use BertForSequenceClassification for predictions without further training.
         
     | 
| 208 | 
         
            -
            [2025-07-01 
     | 
| 209 | 
         
            -
            [2025-07-01 
     | 
| 210 | 
         
            -
            [2025-07-01 
     | 
| 211 | 
         
            -
            [2025-07-01 
     | 
| 212 | 
         
            -
            [2025-07-01 
     | 
| 213 | 
         
            -
            [2025-07-01 
     | 
| 214 | 
         
            -
            [2025-07-01 
     | 
| 215 | 
         
             
            ***** Running Prediction *****
         
     | 
| 216 | 
         
            -
            [2025-07-01 
     | 
| 217 | 
         
            -
            [2025-07-01 
     | 
| 218 | 
         
            -
            [2025-07-01 
     | 
| 219 | 
         
            -
            [2025-07-01 
     | 
| 220 | 
         
            -
            [2025-07-01 
     | 
| 221 | 
         
            -
            [2025-07-01 
     | 
| 222 | 
         
            -
            [2025-07-01 
     | 
| 223 | 
         
            -
            [2025-07-01 
     | 
| 224 | 
         
            -
            [2025-07-01 
     | 
| 225 | 
         
            -
            [2025-07-01 
     | 
| 226 | 
         
            -
            [2025-07-01 
     | 
| 
         | 
|
| 1 | 
         
            +
            [2025-07-10 01:10:49,396][__main__][INFO] - Starting inference experiment
         
     | 
| 2 | 
         
            +
            [2025-07-10 01:10:49,398][__main__][INFO] - cache_dir: /tmp/
         
     | 
| 3 | 
         
             
            dataset:
         
     | 
| 4 | 
         
             
              name: kamel-usp/aes_enem_dataset
         
     | 
| 5 | 
         
             
              split: JBCS2025
         
     | 
| 
         | 
|
| 21 | 
         
             
              model_path: /workspace/jbcs2025/outputs/2025-03-24/20-42-59
         
     | 
| 22 | 
         
             
            experiments:
         
     | 
| 23 | 
         
             
              model:
         
     | 
| 24 | 
         
            +
                name: kamel-usp/jbcs2025_bert-large-portuguese-cased-encoder_classification-C1-essay_only
         
     | 
| 25 | 
         
             
                type: encoder_classification
         
     | 
| 26 | 
         
             
                num_labels: 6
         
     | 
| 27 | 
         
             
                output_dir: ./results/bertimbau_large/C1
         
     | 
| 28 | 
         
             
                logging_dir: ./logs/bertimbau_large/C1
         
     | 
| 29 | 
         
            +
                best_model_dir: kamel-usp/jbcs2025_bert-large-portuguese-cased-encoder_classification-C1-essay_only
         
     | 
| 30 | 
         
             
              tokenizer:
         
     | 
| 31 | 
         
             
                name: neuralmind/bert-large-portuguese-cased
         
     | 
| 32 | 
         
             
              dataset:
         
     | 
| 
         | 
|
| 41 | 
         
             
                gradient_accumulation_steps: 1
         
     | 
| 42 | 
         
             
                gradient_checkpointing: false
         
     | 
| 43 | 
         | 
| 44 | 
         
            +
            [2025-07-10 01:10:49,400][__main__][INFO] - Running inference with fine-tuned HF model
         
     | 
| 45 | 
         
            +
            [2025-07-10 01:10:55,009][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/config.json
         
     | 
| 46 | 
         
            +
            [2025-07-10 01:10:55,010][transformers.configuration_utils][INFO] - Model config BertConfig {
         
     | 
| 47 | 
         
             
              "architectures": [
         
     | 
| 48 | 
         
             
                "BertForMaskedLM"
         
     | 
| 49 | 
         
             
              ],
         
     | 
| 
         | 
|
| 68 | 
         
             
              "pooler_size_per_head": 128,
         
     | 
| 69 | 
         
             
              "pooler_type": "first_token_transform",
         
     | 
| 70 | 
         
             
              "position_embedding_type": "absolute",
         
     | 
| 71 | 
         
            +
              "transformers_version": "4.53.1",
         
     | 
| 72 | 
         
             
              "type_vocab_size": 2,
         
     | 
| 73 | 
         
             
              "use_cache": true,
         
     | 
| 74 | 
         
             
              "vocab_size": 29794
         
     | 
| 75 | 
         
             
            }
         
     | 
| 76 | 
         | 
| 77 | 
         
            +
            [2025-07-10 01:10:55,328][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/config.json
         
     | 
| 78 | 
         
            +
            [2025-07-10 01:10:55,328][transformers.configuration_utils][INFO] - Model config BertConfig {
         
     | 
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 79 | 
         
             
              "architectures": [
         
     | 
| 80 | 
         
             
                "BertForMaskedLM"
         
     | 
| 81 | 
         
             
              ],
         
     | 
| 
         | 
|
| 100 | 
         
             
              "pooler_size_per_head": 128,
         
     | 
| 101 | 
         
             
              "pooler_type": "first_token_transform",
         
     | 
| 102 | 
         
             
              "position_embedding_type": "absolute",
         
     | 
| 103 | 
         
            +
              "transformers_version": "4.53.1",
         
     | 
| 104 | 
         
             
              "type_vocab_size": 2,
         
     | 
| 105 | 
         
             
              "use_cache": true,
         
     | 
| 106 | 
         
             
              "vocab_size": 29794
         
     | 
| 107 | 
         
             
            }
         
     | 
| 108 | 
         | 
| 109 | 
         
            +
            [2025-07-10 01:10:55,537][transformers.tokenization_utils_base][INFO] - loading file vocab.txt from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/vocab.txt
         
     | 
| 110 | 
         
            +
            [2025-07-10 01:10:55,537][transformers.tokenization_utils_base][INFO] - loading file tokenizer.json from cache at None
         
     | 
| 111 | 
         
            +
            [2025-07-10 01:10:55,538][transformers.tokenization_utils_base][INFO] - loading file added_tokens.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/added_tokens.json
         
     | 
| 112 | 
         
            +
            [2025-07-10 01:10:55,538][transformers.tokenization_utils_base][INFO] - loading file special_tokens_map.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/special_tokens_map.json
         
     | 
| 113 | 
         
            +
            [2025-07-10 01:10:55,538][transformers.tokenization_utils_base][INFO] - loading file tokenizer_config.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/tokenizer_config.json
         
     | 
| 114 | 
         
            +
            [2025-07-10 01:10:55,538][transformers.tokenization_utils_base][INFO] - loading file chat_template.jinja from cache at None
         
     | 
| 115 | 
         
            +
            [2025-07-10 01:10:55,538][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/config.json
         
     | 
| 116 | 
         
            +
            [2025-07-10 01:10:55,538][transformers.configuration_utils][INFO] - Model config BertConfig {
         
     | 
| 117 | 
         
             
              "architectures": [
         
     | 
| 118 | 
         
             
                "BertForMaskedLM"
         
     | 
| 119 | 
         
             
              ],
         
     | 
| 
         | 
|
| 138 | 
         
             
              "pooler_size_per_head": 128,
         
     | 
| 139 | 
         
             
              "pooler_type": "first_token_transform",
         
     | 
| 140 | 
         
             
              "position_embedding_type": "absolute",
         
     | 
| 141 | 
         
            +
              "transformers_version": "4.53.1",
         
     | 
| 142 | 
         
             
              "type_vocab_size": 2,
         
     | 
| 143 | 
         
             
              "use_cache": true,
         
     | 
| 144 | 
         
             
              "vocab_size": 29794
         
     | 
| 145 | 
         
             
            }
         
     | 
| 146 | 
         | 
| 147 | 
         
            +
            [2025-07-10 01:10:55,572][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/config.json
         
     | 
| 148 | 
         
            +
            [2025-07-10 01:10:55,572][transformers.configuration_utils][INFO] - Model config BertConfig {
         
     | 
| 149 | 
         
            +
              "architectures": [
         
     | 
| 150 | 
         
            +
                "BertForMaskedLM"
         
     | 
| 151 | 
         
            +
              ],
         
     | 
| 152 | 
         
            +
              "attention_probs_dropout_prob": 0.1,
         
     | 
| 153 | 
         
            +
              "classifier_dropout": null,
         
     | 
| 154 | 
         
            +
              "directionality": "bidi",
         
     | 
| 155 | 
         
            +
              "hidden_act": "gelu",
         
     | 
| 156 | 
         
            +
              "hidden_dropout_prob": 0.1,
         
     | 
| 157 | 
         
            +
              "hidden_size": 1024,
         
     | 
| 158 | 
         
            +
              "initializer_range": 0.02,
         
     | 
| 159 | 
         
            +
              "intermediate_size": 4096,
         
     | 
| 160 | 
         
            +
              "layer_norm_eps": 1e-12,
         
     | 
| 161 | 
         
            +
              "max_position_embeddings": 512,
         
     | 
| 162 | 
         
            +
              "model_type": "bert",
         
     | 
| 163 | 
         
            +
              "num_attention_heads": 16,
         
     | 
| 164 | 
         
            +
              "num_hidden_layers": 24,
         
     | 
| 165 | 
         
            +
              "output_past": true,
         
     | 
| 166 | 
         
            +
              "pad_token_id": 0,
         
     | 
| 167 | 
         
            +
              "pooler_fc_size": 768,
         
     | 
| 168 | 
         
            +
              "pooler_num_attention_heads": 12,
         
     | 
| 169 | 
         
            +
              "pooler_num_fc_layers": 3,
         
     | 
| 170 | 
         
            +
              "pooler_size_per_head": 128,
         
     | 
| 171 | 
         
            +
              "pooler_type": "first_token_transform",
         
     | 
| 172 | 
         
            +
              "position_embedding_type": "absolute",
         
     | 
| 173 | 
         
            +
              "transformers_version": "4.53.1",
         
     | 
| 174 | 
         
            +
              "type_vocab_size": 2,
         
     | 
| 175 | 
         
            +
              "use_cache": true,
         
     | 
| 176 | 
         
            +
              "vocab_size": 29794
         
     | 
| 177 | 
         
            +
            }
         
     | 
| 178 | 
         
            +
             
     | 
| 179 | 
         
            +
            [2025-07-10 01:10:55,590][__main__][INFO] - Tokenizer function parameters- Padding:longest; Truncation: True; Use Full Context: False
         
     | 
| 180 | 
         
            +
            [2025-07-10 01:10:56,012][__main__][INFO] - 
         
     | 
| 181 | 
         
            +
            Token statistics for 'train' split:
         
     | 
| 182 | 
         
            +
            [2025-07-10 01:10:56,012][__main__][INFO] -   Total examples: 500
         
     | 
| 183 | 
         
            +
            [2025-07-10 01:10:56,012][__main__][INFO] -   Min tokens: 512
         
     | 
| 184 | 
         
            +
            [2025-07-10 01:10:56,012][__main__][INFO] -   Max tokens: 512
         
     | 
| 185 | 
         
            +
            [2025-07-10 01:10:56,012][__main__][INFO] -   Avg tokens: 512.00
         
     | 
| 186 | 
         
            +
            [2025-07-10 01:10:56,012][__main__][INFO] -   Std tokens: 0.00
         
     | 
| 187 | 
         
            +
            [2025-07-10 01:10:56,103][__main__][INFO] - 
         
     | 
| 188 | 
         
            +
            Token statistics for 'validation' split:
         
     | 
| 189 | 
         
            +
            [2025-07-10 01:10:56,103][__main__][INFO] -   Total examples: 132
         
     | 
| 190 | 
         
            +
            [2025-07-10 01:10:56,103][__main__][INFO] -   Min tokens: 512
         
     | 
| 191 | 
         
            +
            [2025-07-10 01:10:56,103][__main__][INFO] -   Max tokens: 512
         
     | 
| 192 | 
         
            +
            [2025-07-10 01:10:56,103][__main__][INFO] -   Avg tokens: 512.00
         
     | 
| 193 | 
         
            +
            [2025-07-10 01:10:56,103][__main__][INFO] -   Std tokens: 0.00
         
     | 
| 194 | 
         
            +
            [2025-07-10 01:10:56,198][__main__][INFO] - 
         
     | 
| 195 | 
         
            +
            Token statistics for 'test' split:
         
     | 
| 196 | 
         
            +
            [2025-07-10 01:10:56,198][__main__][INFO] -   Total examples: 138
         
     | 
| 197 | 
         
            +
            [2025-07-10 01:10:56,198][__main__][INFO] -   Min tokens: 512
         
     | 
| 198 | 
         
            +
            [2025-07-10 01:10:56,198][__main__][INFO] -   Max tokens: 512
         
     | 
| 199 | 
         
            +
            [2025-07-10 01:10:56,198][__main__][INFO] -   Avg tokens: 512.00
         
     | 
| 200 | 
         
            +
            [2025-07-10 01:10:56,198][__main__][INFO] -   Std tokens: 0.00
         
     | 
| 201 | 
         
            +
            [2025-07-10 01:10:56,198][__main__][INFO] - If token statistics are the same (max, avg, min) keep in mind that this is due to batched tokenization and padding.
         
     | 
| 202 | 
         
            +
            [2025-07-10 01:10:56,198][__main__][INFO] - Model max length: 512. If it is the same as stats, then there is a high chance that sequences are being truncated.
         
     | 
| 203 | 
         
            +
            [2025-07-10 01:10:56,198][__main__][INFO] - Loading model from: kamel-usp/jbcs2025_bert-large-portuguese-cased-encoder_classification-C1-essay_only
         
     | 
| 204 | 
         
            +
            [2025-07-10 01:10:56,199][__main__][INFO] - Loading model from: kamel-usp/jbcs2025_bert-large-portuguese-cased-encoder_classification-C1-essay_only
         
     | 
| 205 | 
         
            +
            [2025-07-10 01:10:57,324][__main__][INFO] - Model need ≈ 2.62 GiB to run inference and 6.36 for training 
         
     | 
| 206 | 
         
            +
            [2025-07-10 01:10:58,329][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--kamel-usp--jbcs2025_bert-large-portuguese-cased-encoder_classification-C1-essay_only/snapshots/da30f459272b3b4c66170aa85fce0e1191eea445/config.json
         
     | 
| 207 | 
         
            +
            [2025-07-10 01:10:58,330][transformers.configuration_utils][INFO] - Model config BertConfig {
         
     | 
| 208 | 
         
             
              "architectures": [
         
     | 
| 209 | 
         
             
                "BertForSequenceClassification"
         
     | 
| 210 | 
         
             
              ],
         
     | 
| 
         | 
|
| 245 | 
         
             
              "pooler_size_per_head": 128,
         
     | 
| 246 | 
         
             
              "pooler_type": "first_token_transform",
         
     | 
| 247 | 
         
             
              "position_embedding_type": "absolute",
         
     | 
| 
         | 
|
| 248 | 
         
             
              "torch_dtype": "float32",
         
     | 
| 249 | 
         
            +
              "transformers_version": "4.53.1",
         
     | 
| 250 | 
         
             
              "type_vocab_size": 2,
         
     | 
| 251 | 
         
             
              "use_cache": true,
         
     | 
| 252 | 
         
             
              "vocab_size": 29794
         
     | 
| 253 | 
         
             
            }
         
     | 
| 254 | 
         | 
| 255 | 
         
            +
            [2025-07-10 01:11:22,859][transformers.modeling_utils][INFO] - loading weights file model.safetensors from cache at /tmp/models--kamel-usp--jbcs2025_bert-large-portuguese-cased-encoder_classification-C1-essay_only/snapshots/da30f459272b3b4c66170aa85fce0e1191eea445/model.safetensors
         
     | 
| 256 | 
         
            +
            [2025-07-10 01:11:22,862][transformers.modeling_utils][INFO] - Will use torch_dtype=torch.float32 as defined in model's config object
         
     | 
| 257 | 
         
            +
            [2025-07-10 01:11:22,862][transformers.modeling_utils][INFO] - Instantiating BertForSequenceClassification model under default dtype torch.float32.
         
     | 
| 258 | 
         
            +
            [2025-07-10 01:11:23,611][transformers.modeling_utils][INFO] - All model checkpoint weights were used when initializing BertForSequenceClassification.
         
     | 
| 259 | 
         | 
| 260 | 
         
            +
            [2025-07-10 01:11:23,612][transformers.modeling_utils][INFO] - All the weights of BertForSequenceClassification were initialized from the model checkpoint at kamel-usp/jbcs2025_bert-large-portuguese-cased-encoder_classification-C1-essay_only.
         
     | 
| 261 | 
         
             
            If your task is similar to the task the model of the checkpoint was trained on, you can already use BertForSequenceClassification for predictions without further training.
         
     | 
| 262 | 
         
            +
            [2025-07-10 01:11:23,630][transformers.training_args][INFO] - PyTorch: setting up devices
         
     | 
| 263 | 
         
            +
            [2025-07-10 01:11:23,654][transformers.training_args][INFO] - The default value for the training argument `--report_to` will change in v5 (from all installed integrations to none). In v5, you will need to use `--report_to all` to get the same behavior as now. You should start updating your code and make this info disappear :-).
         
     | 
| 264 | 
         
            +
            [2025-07-10 01:11:23,667][transformers.trainer][INFO] - You have loaded a model on multiple GPUs. `is_model_parallel` attribute will be force-set to `True` to avoid any unexpected behavior such as device placement mismatching.
         
     | 
| 265 | 
         
            +
            [2025-07-10 01:11:23,697][transformers.trainer][INFO] - Using auto half precision backend
         
     | 
| 266 | 
         
            +
            [2025-07-10 01:11:27,018][__main__][INFO] - Running inference on test dataset
         
     | 
| 267 | 
         
            +
            [2025-07-10 01:11:27,019][transformers.trainer][INFO] - The following columns in the test set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: reference, id, grades, essay_text, id_prompt, supporting_text, prompt, essay_year. If reference, id, grades, essay_text, id_prompt, supporting_text, prompt, essay_year are not expected by `BertForSequenceClassification.forward`,  you can safely ignore this message.
         
     | 
| 268 | 
         
            +
            [2025-07-10 01:11:27,031][transformers.trainer][INFO] - 
         
     | 
| 269 | 
         
             
            ***** Running Prediction *****
         
     | 
| 270 | 
         
            +
            [2025-07-10 01:11:27,032][transformers.trainer][INFO] -   Num examples = 138
         
     | 
| 271 | 
         
            +
            [2025-07-10 01:11:27,032][transformers.trainer][INFO] -   Batch size = 16
         
     | 
| 272 | 
         
            +
            [2025-07-10 01:11:28,595][__main__][INFO] - Inference results saved to jbcs2025_bert-large-portuguese-cased-encoder_classification-C1-essay_only-encoder_classification-C1-essay_only_inference_results.jsonl
         
     | 
| 273 | 
         
            +
            [2025-07-10 01:11:28,596][__main__][INFO] - Computing bootstrap confidence intervals for metrics: ['QWK', 'Macro_F1', 'Weighted_F1']
         
     | 
| 274 | 
         
            +
            [2025-07-10 01:13:34,349][__main__][INFO] - Bootstrap CI results saved to bootstrap_confidence_intervals.csv
         
     | 
| 275 | 
         
            +
            [2025-07-10 01:13:34,350][__main__][INFO] - Bootstrap Confidence Intervals (95%):
         
     | 
| 276 | 
         
            +
            [2025-07-10 01:13:34,350][__main__][INFO] -   QWK: 0.6823 [0.5864, 0.7723]
         
     | 
| 277 | 
         
            +
            [2025-07-10 01:13:34,350][__main__][INFO] -   Macro_F1: 0.4972 [0.3749, 0.6516]
         
     | 
| 278 | 
         
            +
            [2025-07-10 01:13:34,350][__main__][INFO] -   Weighted_F1: 0.6920 [0.6123, 0.7677]
         
     | 
| 279 | 
         
            +
            [2025-07-10 01:13:34,350][__main__][INFO] - Inference results: {'accuracy': 0.6884057971014492, 'RMSE': 25.02172968684897, 'QWK': 0.6825694326120293, 'HDIV': 0.007246376811594235, 'Macro_F1': 0.4609419962901689, 'Micro_F1': 0.6884057971014492, 'Weighted_F1': 0.6910750081919986, 'TP_0': np.int64(0), 'TN_0': np.int64(137), 'FP_0': np.int64(0), 'FN_0': np.int64(1), 'TP_1': np.int64(0), 'TN_1': np.int64(138), 'FP_1': np.int64(0), 'FN_1': np.int64(0), 'TP_2': np.int64(4), 'TN_2': np.int64(122), 'FP_2': np.int64(6), 'FN_2': np.int64(6), 'TP_3': np.int64(46), 'TN_3': np.int64(62), 'FP_3': np.int64(10), 'FN_3': np.int64(20), 'TP_4': np.int64(40), 'TN_4': np.int64(69), 'FP_4': np.int64(18), 'FN_4': np.int64(11), 'TP_5': np.int64(5), 'TN_5': np.int64(119), 'FP_5': np.int64(9), 'FN_5': np.int64(5)}
         
     | 
| 280 | 
         
            +
            [2025-07-10 01:13:34,354][__main__][INFO] - Inference experiment completed
         
     | 
    	
        runs/large_models/bertimbau/{jbcs2025_bertimbau-large-C2-encoder_classification-C2-essay_only → jbcs2025_bert-large-portuguese-cased-encoder_classification-C2-essay_only-encoder_classification-C2-essay_only}/.hydra/config.yaml
    RENAMED
    
    | 
         @@ -20,12 +20,12 @@ post_training_results: 
     | 
|
| 20 | 
         
             
              model_path: /workspace/jbcs2025/outputs/2025-03-24/20-42-59
         
     | 
| 21 | 
         
             
            experiments:
         
     | 
| 22 | 
         
             
              model:
         
     | 
| 23 | 
         
            -
                name: kamel-usp/ 
     | 
| 24 | 
         
             
                type: encoder_classification
         
     | 
| 25 | 
         
             
                num_labels: 6
         
     | 
| 26 | 
         
             
                output_dir: ./results/bertimbau_large/C2
         
     | 
| 27 | 
         
             
                logging_dir: ./logs/bertimbau_large/C2
         
     | 
| 28 | 
         
            -
                best_model_dir:  
     | 
| 29 | 
         
             
              tokenizer:
         
     | 
| 30 | 
         
             
                name: neuralmind/bert-large-portuguese-cased
         
     | 
| 31 | 
         
             
              dataset:
         
     | 
| 
         | 
|
| 20 | 
         
             
              model_path: /workspace/jbcs2025/outputs/2025-03-24/20-42-59
         
     | 
| 21 | 
         
             
            experiments:
         
     | 
| 22 | 
         
             
              model:
         
     | 
| 23 | 
         
            +
                name: kamel-usp/jbcs2025_bert-large-portuguese-cased-encoder_classification-C2-essay_only
         
     | 
| 24 | 
         
             
                type: encoder_classification
         
     | 
| 25 | 
         
             
                num_labels: 6
         
     | 
| 26 | 
         
             
                output_dir: ./results/bertimbau_large/C2
         
     | 
| 27 | 
         
             
                logging_dir: ./logs/bertimbau_large/C2
         
     | 
| 28 | 
         
            +
                best_model_dir: kamel-usp/jbcs2025_bert-large-portuguese-cased-encoder_classification-C2-essay_only
         
     | 
| 29 | 
         
             
              tokenizer:
         
     | 
| 30 | 
         
             
                name: neuralmind/bert-large-portuguese-cased
         
     | 
| 31 | 
         
             
              dataset:
         
     | 
    	
        runs/large_models/bertimbau/{jbcs2025_bertimbau-large-C2-encoder_classification-C2-essay_only → jbcs2025_bert-large-portuguese-cased-encoder_classification-C2-essay_only-encoder_classification-C2-essay_only}/.hydra/hydra.yaml
    RENAMED
    
    | 
         @@ -1,6 +1,6 @@ 
     | 
|
| 1 | 
         
             
            hydra:
         
     | 
| 2 | 
         
             
              run:
         
     | 
| 3 | 
         
            -
                dir:  
     | 
| 4 | 
         
             
              sweep:
         
     | 
| 5 | 
         
             
                dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S}
         
     | 
| 6 | 
         
             
                subdir: ${hydra.job.num}
         
     | 
| 
         @@ -110,13 +110,14 @@ hydra: 
     | 
|
| 110 | 
         
             
              output_subdir: .hydra
         
     | 
| 111 | 
         
             
              overrides:
         
     | 
| 112 | 
         
             
                hydra:
         
     | 
| 
         | 
|
| 113 | 
         
             
                - hydra.mode=RUN
         
     | 
| 114 | 
         
             
                task:
         
     | 
| 115 | 
         
            -
                - experiments= 
     | 
| 116 | 
         
             
              job:
         
     | 
| 117 | 
         
             
                name: run_inference_experiment
         
     | 
| 118 | 
         
             
                chdir: null
         
     | 
| 119 | 
         
            -
                override_dirname: experiments= 
     | 
| 120 | 
         
             
                id: ???
         
     | 
| 121 | 
         
             
                num: ???
         
     | 
| 122 | 
         
             
                config_name: config
         
     | 
| 
         @@ -141,9 +142,9 @@ hydra: 
     | 
|
| 141 | 
         
             
                - path: ''
         
     | 
| 142 | 
         
             
                  schema: structured
         
     | 
| 143 | 
         
             
                  provider: schema
         
     | 
| 144 | 
         
            -
                output_dir: /workspace/jbcs2025/ 
     | 
| 145 | 
         
             
                choices:
         
     | 
| 146 | 
         
            -
                  experiments:  
     | 
| 147 | 
         
             
                  hydra/env: default
         
     | 
| 148 | 
         
             
                  hydra/callbacks: null
         
     | 
| 149 | 
         
             
                  hydra/job_logging: default
         
     | 
| 
         | 
|
| 1 | 
         
             
            hydra:
         
     | 
| 2 | 
         
             
              run:
         
     | 
| 3 | 
         
            +
                dir: inference_output/2025-07-10/01-13-40
         
     | 
| 4 | 
         
             
              sweep:
         
     | 
| 5 | 
         
             
                dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S}
         
     | 
| 6 | 
         
             
                subdir: ${hydra.job.num}
         
     | 
| 
         | 
|
| 110 | 
         
             
              output_subdir: .hydra
         
     | 
| 111 | 
         
             
              overrides:
         
     | 
| 112 | 
         
             
                hydra:
         
     | 
| 113 | 
         
            +
                - hydra.run.dir=inference_output/2025-07-10/01-13-40
         
     | 
| 114 | 
         
             
                - hydra.mode=RUN
         
     | 
| 115 | 
         
             
                task:
         
     | 
| 116 | 
         
            +
                - experiments=temp_inference/kamel-usp_jbcs2025_bert-large-portuguese-cased-encoder_classification-C2-essay_only
         
     | 
| 117 | 
         
             
              job:
         
     | 
| 118 | 
         
             
                name: run_inference_experiment
         
     | 
| 119 | 
         
             
                chdir: null
         
     | 
| 120 | 
         
            +
                override_dirname: experiments=temp_inference/kamel-usp_jbcs2025_bert-large-portuguese-cased-encoder_classification-C2-essay_only
         
     | 
| 121 | 
         
             
                id: ???
         
     | 
| 122 | 
         
             
                num: ???
         
     | 
| 123 | 
         
             
                config_name: config
         
     | 
| 
         | 
|
| 142 | 
         
             
                - path: ''
         
     | 
| 143 | 
         
             
                  schema: structured
         
     | 
| 144 | 
         
             
                  provider: schema
         
     | 
| 145 | 
         
            +
                output_dir: /workspace/jbcs2025/inference_output/2025-07-10/01-13-40
         
     | 
| 146 | 
         
             
                choices:
         
     | 
| 147 | 
         
            +
                  experiments: temp_inference/kamel-usp_jbcs2025_bert-large-portuguese-cased-encoder_classification-C2-essay_only
         
     | 
| 148 | 
         
             
                  hydra/env: default
         
     | 
| 149 | 
         
             
                  hydra/callbacks: null
         
     | 
| 150 | 
         
             
                  hydra/job_logging: default
         
     | 
    	
        runs/large_models/bertimbau/jbcs2025_bert-large-portuguese-cased-encoder_classification-C2-essay_only-encoder_classification-C2-essay_only/.hydra/overrides.yaml
    ADDED
    
    | 
         @@ -0,0 +1 @@ 
     | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            - experiments=temp_inference/kamel-usp_jbcs2025_bert-large-portuguese-cased-encoder_classification-C2-essay_only
         
     | 
    	
        runs/large_models/bertimbau/{jbcs2025_bertimbau-large-C4-encoder_classification-C4-essay_only → jbcs2025_bert-large-portuguese-cased-encoder_classification-C2-essay_only-encoder_classification-C2-essay_only}/bootstrap_confidence_intervals.csv
    RENAMED
    
    | 
         @@ -1,2 +1,2 @@ 
     | 
|
| 1 | 
         
             
            experiment_id,timestamp,QWK_mean,QWK_lower_95ci,QWK_upper_95ci,QWK_ci_width,Macro_F1_mean,Macro_F1_lower_95ci,Macro_F1_upper_95ci,Macro_F1_ci_width,Weighted_F1_mean,Weighted_F1_lower_95ci,Weighted_F1_upper_95ci,Weighted_F1_ci_width
         
     | 
| 2 | 
         
            -
             
     | 
| 
         | 
|
| 1 | 
         
             
            experiment_id,timestamp,QWK_mean,QWK_lower_95ci,QWK_upper_95ci,QWK_ci_width,Macro_F1_mean,Macro_F1_lower_95ci,Macro_F1_upper_95ci,Macro_F1_ci_width,Weighted_F1_mean,Weighted_F1_lower_95ci,Weighted_F1_upper_95ci,Weighted_F1_ci_width
         
     | 
| 2 | 
         
            +
            jbcs2025_bert-large-portuguese-cased-encoder_classification-C2-essay_only-encoder_classification-C2-essay_only,2025-07-10 01:13:46,0.31345082186336776,0.14492296148559206,0.4709425779174953,0.32601961643190325,0.3376111572696462,0.25175523989642284,0.442912177116335,0.19115693721991217,0.4106064761329726,0.3275210469472954,0.49310656594887087,0.16558551900157548
         
     | 
    	
        runs/large_models/bertimbau/jbcs2025_bert-large-portuguese-cased-encoder_classification-C2-essay_only-encoder_classification-C2-essay_only/evaluation_results.csv
    ADDED
    
    | 
         @@ -0,0 +1,2 @@ 
     | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            accuracy,RMSE,QWK,HDIV,Macro_F1,Micro_F1,Weighted_F1,TP_0,TN_0,FP_0,FN_0,TP_1,TN_1,FP_1,FN_1,TP_2,TN_2,FP_2,FN_2,TP_3,TN_3,FP_3,FN_3,TP_4,TN_4,FP_4,FN_4,TP_5,TN_5,FP_5,FN_5,timestamp,id
         
     | 
| 2 | 
         
            +
            0.39855072463768115,67.93005780671166,0.31696761677361585,0.1376811594202898,0.31891930577920913,0.39855072463768115,0.4108570406154947,0,137,0,1,16,73,30,19,3,114,19,2,16,79,8,35,10,102,10,16,10,102,16,10,2025-07-10 01:13:46,jbcs2025_bert-large-portuguese-cased-encoder_classification-C2-essay_only-encoder_classification-C2-essay_only
         
     | 
    	
        runs/large_models/bertimbau/{jbcs2025_bertimbau-large-C2-encoder_classification-C2-essay_only/jbcs2025_bertimbau-large-C2-encoder_classification-C2-essay_only_inference_results.jsonl → jbcs2025_bert-large-portuguese-cased-encoder_classification-C2-essay_only-encoder_classification-C2-essay_only/jbcs2025_bert-large-portuguese-cased-encoder_classification-C2-essay_only-encoder_classification-C2-essay_only_inference_results.jsonl}
    RENAMED
    
    | 
         The diff for this file is too large to render. 
		See raw diff 
     | 
| 
         | 
    	
        runs/large_models/bertimbau/{jbcs2025_bertimbau-large-C2-encoder_classification-C2-essay_only → jbcs2025_bert-large-portuguese-cased-encoder_classification-C2-essay_only-encoder_classification-C2-essay_only}/run_inference_experiment.log
    RENAMED
    
    | 
         @@ -1,5 +1,5 @@ 
     | 
|
| 1 | 
         
            -
            [2025-07-01 
     | 
| 2 | 
         
            -
            [2025-07-01 
     | 
| 3 | 
         
             
            dataset:
         
     | 
| 4 | 
         
             
              name: kamel-usp/aes_enem_dataset
         
     | 
| 5 | 
         
             
              split: JBCS2025
         
     | 
| 
         @@ -21,12 +21,12 @@ post_training_results: 
     | 
|
| 21 | 
         
             
              model_path: /workspace/jbcs2025/outputs/2025-03-24/20-42-59
         
     | 
| 22 | 
         
             
            experiments:
         
     | 
| 23 | 
         
             
              model:
         
     | 
| 24 | 
         
            -
                name: kamel-usp/ 
     | 
| 25 | 
         
             
                type: encoder_classification
         
     | 
| 26 | 
         
             
                num_labels: 6
         
     | 
| 27 | 
         
             
                output_dir: ./results/bertimbau_large/C2
         
     | 
| 28 | 
         
             
                logging_dir: ./logs/bertimbau_large/C2
         
     | 
| 29 | 
         
            -
                best_model_dir:  
     | 
| 30 | 
         
             
              tokenizer:
         
     | 
| 31 | 
         
             
                name: neuralmind/bert-large-portuguese-cased
         
     | 
| 32 | 
         
             
              dataset:
         
     | 
| 
         @@ -41,9 +41,9 @@ experiments: 
     | 
|
| 41 | 
         
             
                gradient_accumulation_steps: 1
         
     | 
| 42 | 
         
             
                gradient_checkpointing: false
         
     | 
| 43 | 
         | 
| 44 | 
         
            -
            [2025-07-01 
     | 
| 45 | 
         
            -
            [2025-07-01 
     | 
| 46 | 
         
            -
            [2025-07-01 
     | 
| 47 | 
         
             
              "architectures": [
         
     | 
| 48 | 
         
             
                "BertForMaskedLM"
         
     | 
| 49 | 
         
             
              ],
         
     | 
| 
         @@ -68,20 +68,14 @@ experiments: 
     | 
|
| 68 | 
         
             
              "pooler_size_per_head": 128,
         
     | 
| 69 | 
         
             
              "pooler_type": "first_token_transform",
         
     | 
| 70 | 
         
             
              "position_embedding_type": "absolute",
         
     | 
| 71 | 
         
            -
              "transformers_version": "4.53. 
     | 
| 72 | 
         
             
              "type_vocab_size": 2,
         
     | 
| 73 | 
         
             
              "use_cache": true,
         
     | 
| 74 | 
         
             
              "vocab_size": 29794
         
     | 
| 75 | 
         
             
            }
         
     | 
| 76 | 
         | 
| 77 | 
         
            -
            [2025-07-01 
     | 
| 78 | 
         
            -
            [2025-07-01 
     | 
| 79 | 
         
            -
            [2025-07-01 00:04:45,517][transformers.tokenization_utils_base][INFO] - loading file added_tokens.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/added_tokens.json
         
     | 
| 80 | 
         
            -
            [2025-07-01 00:04:45,517][transformers.tokenization_utils_base][INFO] - loading file special_tokens_map.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/special_tokens_map.json
         
     | 
| 81 | 
         
            -
            [2025-07-01 00:04:45,517][transformers.tokenization_utils_base][INFO] - loading file tokenizer_config.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/tokenizer_config.json
         
     | 
| 82 | 
         
            -
            [2025-07-01 00:04:45,517][transformers.tokenization_utils_base][INFO] - loading file chat_template.jinja from cache at None
         
     | 
| 83 | 
         
            -
            [2025-07-01 00:04:45,517][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/config.json
         
     | 
| 84 | 
         
            -
            [2025-07-01 00:04:45,518][transformers.configuration_utils][INFO] - Model config BertConfig {
         
     | 
| 85 | 
         
             
              "architectures": [
         
     | 
| 86 | 
         
             
                "BertForMaskedLM"
         
     | 
| 87 | 
         
             
              ],
         
     | 
| 
         @@ -106,14 +100,20 @@ experiments: 
     | 
|
| 106 | 
         
             
              "pooler_size_per_head": 128,
         
     | 
| 107 | 
         
             
              "pooler_type": "first_token_transform",
         
     | 
| 108 | 
         
             
              "position_embedding_type": "absolute",
         
     | 
| 109 | 
         
            -
              "transformers_version": "4.53. 
     | 
| 110 | 
         
             
              "type_vocab_size": 2,
         
     | 
| 111 | 
         
             
              "use_cache": true,
         
     | 
| 112 | 
         
             
              "vocab_size": 29794
         
     | 
| 113 | 
         
             
            }
         
     | 
| 114 | 
         | 
| 115 | 
         
            -
            [2025-07-01 
     | 
| 116 | 
         
            -
            [2025-07-01 
     | 
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 117 | 
         
             
              "architectures": [
         
     | 
| 118 | 
         
             
                "BertForMaskedLM"
         
     | 
| 119 | 
         
             
              ],
         
     | 
| 
         @@ -138,18 +138,73 @@ experiments: 
     | 
|
| 138 | 
         
             
              "pooler_size_per_head": 128,
         
     | 
| 139 | 
         
             
              "pooler_type": "first_token_transform",
         
     | 
| 140 | 
         
             
              "position_embedding_type": "absolute",
         
     | 
| 141 | 
         
            -
              "transformers_version": "4.53. 
     | 
| 142 | 
         
             
              "type_vocab_size": 2,
         
     | 
| 143 | 
         
             
              "use_cache": true,
         
     | 
| 144 | 
         
             
              "vocab_size": 29794
         
     | 
| 145 | 
         
             
            }
         
     | 
| 146 | 
         | 
| 147 | 
         
            -
            [2025-07-01 
     | 
| 148 | 
         
            -
            [2025-07-01 
     | 
| 149 | 
         
            -
             
     | 
| 150 | 
         
            -
             
     | 
| 151 | 
         
            -
             
     | 
| 152 | 
         
            -
             
     | 
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 153 | 
         
             
              "architectures": [
         
     | 
| 154 | 
         
             
                "BertForSequenceClassification"
         
     | 
| 155 | 
         
             
              ],
         
     | 
| 
         @@ -190,37 +245,36 @@ experiments: 
     | 
|
| 190 | 
         
             
              "pooler_size_per_head": 128,
         
     | 
| 191 | 
         
             
              "pooler_type": "first_token_transform",
         
     | 
| 192 | 
         
             
              "position_embedding_type": "absolute",
         
     | 
| 193 | 
         
            -
              "problem_type": "single_label_classification",
         
     | 
| 194 | 
         
             
              "torch_dtype": "float32",
         
     | 
| 195 | 
         
            -
              "transformers_version": "4.53. 
     | 
| 196 | 
         
             
              "type_vocab_size": 2,
         
     | 
| 197 | 
         
             
              "use_cache": true,
         
     | 
| 198 | 
         
             
              "vocab_size": 29794
         
     | 
| 199 | 
         
             
            }
         
     | 
| 200 | 
         | 
| 201 | 
         
            -
            [2025-07-01 
     | 
| 202 | 
         
            -
            [2025-07-01 
     | 
| 203 | 
         
            -
            [2025-07-01 
     | 
| 204 | 
         
            -
            [2025-07-01 
     | 
| 205 | 
         | 
| 206 | 
         
            -
            [2025-07-01 
     | 
| 207 | 
         
             
            If your task is similar to the task the model of the checkpoint was trained on, you can already use BertForSequenceClassification for predictions without further training.
         
     | 
| 208 | 
         
            -
            [2025-07-01 
     | 
| 209 | 
         
            -
            [2025-07-01 
     | 
| 210 | 
         
            -
            [2025-07-01 
     | 
| 211 | 
         
            -
            [2025-07-01 
     | 
| 212 | 
         
            -
            [2025-07-01 
     | 
| 213 | 
         
            -
            [2025-07-01 
     | 
| 214 | 
         
            -
            [2025-07-01 
     | 
| 215 | 
         
             
            ***** Running Prediction *****
         
     | 
| 216 | 
         
            -
            [2025-07-01 
     | 
| 217 | 
         
            -
            [2025-07-01 
     | 
| 218 | 
         
            -
            [2025-07-01 
     | 
| 219 | 
         
            -
            [2025-07-01 
     | 
| 220 | 
         
            -
            [2025-07-01 
     | 
| 221 | 
         
            -
            [2025-07-01 
     | 
| 222 | 
         
            -
            [2025-07-01 
     | 
| 223 | 
         
            -
            [2025-07-01 
     | 
| 224 | 
         
            -
            [2025-07-01 
     | 
| 225 | 
         
            -
            [2025-07-01 
     | 
| 226 | 
         
            -
            [2025-07-01 
     | 
| 
         | 
|
| 1 | 
         
            +
            [2025-07-10 01:13:46,409][__main__][INFO] - Starting inference experiment
         
     | 
| 2 | 
         
            +
            [2025-07-10 01:13:46,411][__main__][INFO] - cache_dir: /tmp/
         
     | 
| 3 | 
         
             
            dataset:
         
     | 
| 4 | 
         
             
              name: kamel-usp/aes_enem_dataset
         
     | 
| 5 | 
         
             
              split: JBCS2025
         
     | 
| 
         | 
|
| 21 | 
         
             
              model_path: /workspace/jbcs2025/outputs/2025-03-24/20-42-59
         
     | 
| 22 | 
         
             
            experiments:
         
     | 
| 23 | 
         
             
              model:
         
     | 
| 24 | 
         
            +
                name: kamel-usp/jbcs2025_bert-large-portuguese-cased-encoder_classification-C2-essay_only
         
     | 
| 25 | 
         
             
                type: encoder_classification
         
     | 
| 26 | 
         
             
                num_labels: 6
         
     | 
| 27 | 
         
             
                output_dir: ./results/bertimbau_large/C2
         
     | 
| 28 | 
         
             
                logging_dir: ./logs/bertimbau_large/C2
         
     | 
| 29 | 
         
            +
                best_model_dir: kamel-usp/jbcs2025_bert-large-portuguese-cased-encoder_classification-C2-essay_only
         
     | 
| 30 | 
         
             
              tokenizer:
         
     | 
| 31 | 
         
             
                name: neuralmind/bert-large-portuguese-cased
         
     | 
| 32 | 
         
             
              dataset:
         
     | 
| 
         | 
|
| 41 | 
         
             
                gradient_accumulation_steps: 1
         
     | 
| 42 | 
         
             
                gradient_checkpointing: false
         
     | 
| 43 | 
         | 
| 44 | 
         
            +
            [2025-07-10 01:13:46,413][__main__][INFO] - Running inference with fine-tuned HF model
         
     | 
| 45 | 
         
            +
            [2025-07-10 01:13:51,408][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/config.json
         
     | 
| 46 | 
         
            +
            [2025-07-10 01:13:51,409][transformers.configuration_utils][INFO] - Model config BertConfig {
         
     | 
| 47 | 
         
             
              "architectures": [
         
     | 
| 48 | 
         
             
                "BertForMaskedLM"
         
     | 
| 49 | 
         
             
              ],
         
     | 
| 
         | 
|
| 68 | 
         
             
              "pooler_size_per_head": 128,
         
     | 
| 69 | 
         
             
              "pooler_type": "first_token_transform",
         
     | 
| 70 | 
         
             
              "position_embedding_type": "absolute",
         
     | 
| 71 | 
         
            +
              "transformers_version": "4.53.1",
         
     | 
| 72 | 
         
             
              "type_vocab_size": 2,
         
     | 
| 73 | 
         
             
              "use_cache": true,
         
     | 
| 74 | 
         
             
              "vocab_size": 29794
         
     | 
| 75 | 
         
             
            }
         
     | 
| 76 | 
         | 
| 77 | 
         
            +
            [2025-07-10 01:13:51,618][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/config.json
         
     | 
| 78 | 
         
            +
            [2025-07-10 01:13:51,618][transformers.configuration_utils][INFO] - Model config BertConfig {
         
     | 
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 79 | 
         
             
              "architectures": [
         
     | 
| 80 | 
         
             
                "BertForMaskedLM"
         
     | 
| 81 | 
         
             
              ],
         
     | 
| 
         | 
|
| 100 | 
         
             
              "pooler_size_per_head": 128,
         
     | 
| 101 | 
         
             
              "pooler_type": "first_token_transform",
         
     | 
| 102 | 
         
             
              "position_embedding_type": "absolute",
         
     | 
| 103 | 
         
            +
              "transformers_version": "4.53.1",
         
     | 
| 104 | 
         
             
              "type_vocab_size": 2,
         
     | 
| 105 | 
         
             
              "use_cache": true,
         
     | 
| 106 | 
         
             
              "vocab_size": 29794
         
     | 
| 107 | 
         
             
            }
         
     | 
| 108 | 
         | 
| 109 | 
         
            +
            [2025-07-10 01:13:51,813][transformers.tokenization_utils_base][INFO] - loading file vocab.txt from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/vocab.txt
         
     | 
| 110 | 
         
            +
            [2025-07-10 01:13:51,813][transformers.tokenization_utils_base][INFO] - loading file tokenizer.json from cache at None
         
     | 
| 111 | 
         
            +
            [2025-07-10 01:13:51,813][transformers.tokenization_utils_base][INFO] - loading file added_tokens.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/added_tokens.json
         
     | 
| 112 | 
         
            +
            [2025-07-10 01:13:51,813][transformers.tokenization_utils_base][INFO] - loading file special_tokens_map.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/special_tokens_map.json
         
     | 
| 113 | 
         
            +
            [2025-07-10 01:13:51,813][transformers.tokenization_utils_base][INFO] - loading file tokenizer_config.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/tokenizer_config.json
         
     | 
| 114 | 
         
            +
            [2025-07-10 01:13:51,813][transformers.tokenization_utils_base][INFO] - loading file chat_template.jinja from cache at None
         
     | 
| 115 | 
         
            +
            [2025-07-10 01:13:51,814][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/config.json
         
     | 
| 116 | 
         
            +
            [2025-07-10 01:13:51,814][transformers.configuration_utils][INFO] - Model config BertConfig {
         
     | 
| 117 | 
         
             
              "architectures": [
         
     | 
| 118 | 
         
             
                "BertForMaskedLM"
         
     | 
| 119 | 
         
             
              ],
         
     | 
| 
         | 
|
| 138 | 
         
             
              "pooler_size_per_head": 128,
         
     | 
| 139 | 
         
             
              "pooler_type": "first_token_transform",
         
     | 
| 140 | 
         
             
              "position_embedding_type": "absolute",
         
     | 
| 141 | 
         
            +
              "transformers_version": "4.53.1",
         
     | 
| 142 | 
         
             
              "type_vocab_size": 2,
         
     | 
| 143 | 
         
             
              "use_cache": true,
         
     | 
| 144 | 
         
             
              "vocab_size": 29794
         
     | 
| 145 | 
         
             
            }
         
     | 
| 146 | 
         | 
| 147 | 
         
            +
            [2025-07-10 01:13:51,844][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/config.json
         
     | 
| 148 | 
         
            +
            [2025-07-10 01:13:51,845][transformers.configuration_utils][INFO] - Model config BertConfig {
         
     | 
| 149 | 
         
            +
              "architectures": [
         
     | 
| 150 | 
         
            +
                "BertForMaskedLM"
         
     | 
| 151 | 
         
            +
              ],
         
     | 
| 152 | 
         
            +
              "attention_probs_dropout_prob": 0.1,
         
     | 
| 153 | 
         
            +
              "classifier_dropout": null,
         
     | 
| 154 | 
         
            +
              "directionality": "bidi",
         
     | 
| 155 | 
         
            +
              "hidden_act": "gelu",
         
     | 
| 156 | 
         
            +
              "hidden_dropout_prob": 0.1,
         
     | 
| 157 | 
         
            +
              "hidden_size": 1024,
         
     | 
| 158 | 
         
            +
              "initializer_range": 0.02,
         
     | 
| 159 | 
         
            +
              "intermediate_size": 4096,
         
     | 
| 160 | 
         
            +
              "layer_norm_eps": 1e-12,
         
     | 
| 161 | 
         
            +
              "max_position_embeddings": 512,
         
     | 
| 162 | 
         
            +
              "model_type": "bert",
         
     | 
| 163 | 
         
            +
              "num_attention_heads": 16,
         
     | 
| 164 | 
         
            +
              "num_hidden_layers": 24,
         
     | 
| 165 | 
         
            +
              "output_past": true,
         
     | 
| 166 | 
         
            +
              "pad_token_id": 0,
         
     | 
| 167 | 
         
            +
              "pooler_fc_size": 768,
         
     | 
| 168 | 
         
            +
              "pooler_num_attention_heads": 12,
         
     | 
| 169 | 
         
            +
              "pooler_num_fc_layers": 3,
         
     | 
| 170 | 
         
            +
              "pooler_size_per_head": 128,
         
     | 
| 171 | 
         
            +
              "pooler_type": "first_token_transform",
         
     | 
| 172 | 
         
            +
              "position_embedding_type": "absolute",
         
     | 
| 173 | 
         
            +
              "transformers_version": "4.53.1",
         
     | 
| 174 | 
         
            +
              "type_vocab_size": 2,
         
     | 
| 175 | 
         
            +
              "use_cache": true,
         
     | 
| 176 | 
         
            +
              "vocab_size": 29794
         
     | 
| 177 | 
         
            +
            }
         
     | 
| 178 | 
         
            +
             
     | 
| 179 | 
         
            +
            [2025-07-10 01:13:51,862][__main__][INFO] - Tokenizer function parameters- Padding:longest; Truncation: True; Use Full Context: False
         
     | 
| 180 | 
         
            +
            [2025-07-10 01:13:52,270][__main__][INFO] - 
         
     | 
| 181 | 
         
            +
            Token statistics for 'train' split:
         
     | 
| 182 | 
         
            +
            [2025-07-10 01:13:52,270][__main__][INFO] -   Total examples: 500
         
     | 
| 183 | 
         
            +
            [2025-07-10 01:13:52,270][__main__][INFO] -   Min tokens: 512
         
     | 
| 184 | 
         
            +
            [2025-07-10 01:13:52,270][__main__][INFO] -   Max tokens: 512
         
     | 
| 185 | 
         
            +
            [2025-07-10 01:13:52,270][__main__][INFO] -   Avg tokens: 512.00
         
     | 
| 186 | 
         
            +
            [2025-07-10 01:13:52,270][__main__][INFO] -   Std tokens: 0.00
         
     | 
| 187 | 
         
            +
            [2025-07-10 01:13:52,360][__main__][INFO] - 
         
     | 
| 188 | 
         
            +
            Token statistics for 'validation' split:
         
     | 
| 189 | 
         
            +
            [2025-07-10 01:13:52,360][__main__][INFO] -   Total examples: 132
         
     | 
| 190 | 
         
            +
            [2025-07-10 01:13:52,360][__main__][INFO] -   Min tokens: 512
         
     | 
| 191 | 
         
            +
            [2025-07-10 01:13:52,360][__main__][INFO] -   Max tokens: 512
         
     | 
| 192 | 
         
            +
            [2025-07-10 01:13:52,360][__main__][INFO] -   Avg tokens: 512.00
         
     | 
| 193 | 
         
            +
            [2025-07-10 01:13:52,360][__main__][INFO] -   Std tokens: 0.00
         
     | 
| 194 | 
         
            +
            [2025-07-10 01:13:52,454][__main__][INFO] - 
         
     | 
| 195 | 
         
            +
            Token statistics for 'test' split:
         
     | 
| 196 | 
         
            +
            [2025-07-10 01:13:52,454][__main__][INFO] -   Total examples: 138
         
     | 
| 197 | 
         
            +
            [2025-07-10 01:13:52,454][__main__][INFO] -   Min tokens: 512
         
     | 
| 198 | 
         
            +
            [2025-07-10 01:13:52,454][__main__][INFO] -   Max tokens: 512
         
     | 
| 199 | 
         
            +
            [2025-07-10 01:13:52,454][__main__][INFO] -   Avg tokens: 512.00
         
     | 
| 200 | 
         
            +
            [2025-07-10 01:13:52,454][__main__][INFO] -   Std tokens: 0.00
         
     | 
| 201 | 
         
            +
            [2025-07-10 01:13:52,454][__main__][INFO] - If token statistics are the same (max, avg, min) keep in mind that this is due to batched tokenization and padding.
         
     | 
| 202 | 
         
            +
            [2025-07-10 01:13:52,454][__main__][INFO] - Model max length: 512. If it is the same as stats, then there is a high chance that sequences are being truncated.
         
     | 
| 203 | 
         
            +
            [2025-07-10 01:13:52,455][__main__][INFO] - Loading model from: kamel-usp/jbcs2025_bert-large-portuguese-cased-encoder_classification-C2-essay_only
         
     | 
| 204 | 
         
            +
            [2025-07-10 01:13:52,455][__main__][INFO] - Loading model from: kamel-usp/jbcs2025_bert-large-portuguese-cased-encoder_classification-C2-essay_only
         
     | 
| 205 | 
         
            +
            [2025-07-10 01:13:53,423][__main__][INFO] - Model need ≈ 2.62 GiB to run inference and 6.36 for training 
         
     | 
| 206 | 
         
            +
            [2025-07-10 01:13:54,248][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--kamel-usp--jbcs2025_bert-large-portuguese-cased-encoder_classification-C2-essay_only/snapshots/a8253d03c01bf4be8629e8e2c587013f34834335/config.json
         
     | 
| 207 | 
         
            +
            [2025-07-10 01:13:54,248][transformers.configuration_utils][INFO] - Model config BertConfig {
         
     | 
| 208 | 
         
             
              "architectures": [
         
     | 
| 209 | 
         
             
                "BertForSequenceClassification"
         
     | 
| 210 | 
         
             
              ],
         
     | 
| 
         | 
|
| 245 | 
         
             
              "pooler_size_per_head": 128,
         
     | 
| 246 | 
         
             
              "pooler_type": "first_token_transform",
         
     | 
| 247 | 
         
             
              "position_embedding_type": "absolute",
         
     | 
| 
         | 
|
| 248 | 
         
             
              "torch_dtype": "float32",
         
     | 
| 249 | 
         
            +
              "transformers_version": "4.53.1",
         
     | 
| 250 | 
         
             
              "type_vocab_size": 2,
         
     | 
| 251 | 
         
             
              "use_cache": true,
         
     | 
| 252 | 
         
             
              "vocab_size": 29794
         
     | 
| 253 | 
         
             
            }
         
     | 
| 254 | 
         | 
| 255 | 
         
            +
            [2025-07-10 01:14:18,652][transformers.modeling_utils][INFO] - loading weights file model.safetensors from cache at /tmp/models--kamel-usp--jbcs2025_bert-large-portuguese-cased-encoder_classification-C2-essay_only/snapshots/a8253d03c01bf4be8629e8e2c587013f34834335/model.safetensors
         
     | 
| 256 | 
         
            +
            [2025-07-10 01:14:18,653][transformers.modeling_utils][INFO] - Will use torch_dtype=torch.float32 as defined in model's config object
         
     | 
| 257 | 
         
            +
            [2025-07-10 01:14:18,654][transformers.modeling_utils][INFO] - Instantiating BertForSequenceClassification model under default dtype torch.float32.
         
     | 
| 258 | 
         
            +
            [2025-07-10 01:14:19,407][transformers.modeling_utils][INFO] - All model checkpoint weights were used when initializing BertForSequenceClassification.
         
     | 
| 259 | 
         | 
| 260 | 
         
            +
            [2025-07-10 01:14:19,407][transformers.modeling_utils][INFO] - All the weights of BertForSequenceClassification were initialized from the model checkpoint at kamel-usp/jbcs2025_bert-large-portuguese-cased-encoder_classification-C2-essay_only.
         
     | 
| 261 | 
         
             
            If your task is similar to the task the model of the checkpoint was trained on, you can already use BertForSequenceClassification for predictions without further training.
         
     | 
| 262 | 
         
            +
            [2025-07-10 01:14:19,425][transformers.training_args][INFO] - PyTorch: setting up devices
         
     | 
| 263 | 
         
            +
            [2025-07-10 01:14:19,450][transformers.training_args][INFO] - The default value for the training argument `--report_to` will change in v5 (from all installed integrations to none). In v5, you will need to use `--report_to all` to get the same behavior as now. You should start updating your code and make this info disappear :-).
         
     | 
| 264 | 
         
            +
            [2025-07-10 01:14:19,465][transformers.trainer][INFO] - You have loaded a model on multiple GPUs. `is_model_parallel` attribute will be force-set to `True` to avoid any unexpected behavior such as device placement mismatching.
         
     | 
| 265 | 
         
            +
            [2025-07-10 01:14:19,499][transformers.trainer][INFO] - Using auto half precision backend
         
     | 
| 266 | 
         
            +
            [2025-07-10 01:14:22,862][__main__][INFO] - Running inference on test dataset
         
     | 
| 267 | 
         
            +
            [2025-07-10 01:14:22,864][transformers.trainer][INFO] - The following columns in the test set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: prompt, reference, essay_text, essay_year, grades, id_prompt, id, supporting_text. If prompt, reference, essay_text, essay_year, grades, id_prompt, id, supporting_text are not expected by `BertForSequenceClassification.forward`,  you can safely ignore this message.
         
     | 
| 268 | 
         
            +
            [2025-07-10 01:14:22,875][transformers.trainer][INFO] - 
         
     | 
| 269 | 
         
             
            ***** Running Prediction *****
         
     | 
| 270 | 
         
            +
            [2025-07-10 01:14:22,876][transformers.trainer][INFO] -   Num examples = 138
         
     | 
| 271 | 
         
            +
            [2025-07-10 01:14:22,876][transformers.trainer][INFO] -   Batch size = 16
         
     | 
| 272 | 
         
            +
            [2025-07-10 01:14:24,390][__main__][INFO] - Inference results saved to jbcs2025_bert-large-portuguese-cased-encoder_classification-C2-essay_only-encoder_classification-C2-essay_only_inference_results.jsonl
         
     | 
| 273 | 
         
            +
            [2025-07-10 01:14:24,391][__main__][INFO] - Computing bootstrap confidence intervals for metrics: ['QWK', 'Macro_F1', 'Weighted_F1']
         
     | 
| 274 | 
         
            +
            [2025-07-10 01:16:29,838][__main__][INFO] - Bootstrap CI results saved to bootstrap_confidence_intervals.csv
         
     | 
| 275 | 
         
            +
            [2025-07-10 01:16:29,841][__main__][INFO] - Bootstrap Confidence Intervals (95%):
         
     | 
| 276 | 
         
            +
            [2025-07-10 01:16:29,841][__main__][INFO] -   QWK: 0.3135 [0.1449, 0.4709]
         
     | 
| 277 | 
         
            +
            [2025-07-10 01:16:29,841][__main__][INFO] -   Macro_F1: 0.3376 [0.2518, 0.4429]
         
     | 
| 278 | 
         
            +
            [2025-07-10 01:16:29,841][__main__][INFO] -   Weighted_F1: 0.4106 [0.3275, 0.4931]
         
     | 
| 279 | 
         
            +
            [2025-07-10 01:16:29,841][__main__][INFO] - Inference results: {'accuracy': 0.39855072463768115, 'RMSE': 67.93005780671166, 'QWK': 0.31696761677361585, 'HDIV': 0.1376811594202898, 'Macro_F1': 0.31891930577920913, 'Micro_F1': 0.39855072463768115, 'Weighted_F1': 0.4108570406154947, 'TP_0': np.int64(0), 'TN_0': np.int64(137), 'FP_0': np.int64(0), 'FN_0': np.int64(1), 'TP_1': np.int64(16), 'TN_1': np.int64(73), 'FP_1': np.int64(30), 'FN_1': np.int64(19), 'TP_2': np.int64(3), 'TN_2': np.int64(114), 'FP_2': np.int64(19), 'FN_2': np.int64(2), 'TP_3': np.int64(16), 'TN_3': np.int64(79), 'FP_3': np.int64(8), 'FN_3': np.int64(35), 'TP_4': np.int64(10), 'TN_4': np.int64(102), 'FP_4': np.int64(10), 'FN_4': np.int64(16), 'TP_5': np.int64(10), 'TN_5': np.int64(102), 'FP_5': np.int64(16), 'FN_5': np.int64(10)}
         
     | 
| 280 | 
         
            +
            [2025-07-10 01:16:29,843][__main__][INFO] - Inference experiment completed
         
     | 
    	
        runs/large_models/bertimbau/{jbcs2025_bertimbau-large-C3-encoder_classification-C3-essay_only → jbcs2025_bert-large-portuguese-cased-encoder_classification-C3-essay_only-encoder_classification-C3-essay_only}/.hydra/config.yaml
    RENAMED
    
    | 
         @@ -20,12 +20,12 @@ post_training_results: 
     | 
|
| 20 | 
         
             
              model_path: /workspace/jbcs2025/outputs/2025-03-24/20-42-59
         
     | 
| 21 | 
         
             
            experiments:
         
     | 
| 22 | 
         
             
              model:
         
     | 
| 23 | 
         
            -
                name: kamel-usp/ 
     | 
| 24 | 
         
             
                type: encoder_classification
         
     | 
| 25 | 
         
             
                num_labels: 6
         
     | 
| 26 | 
         
             
                output_dir: ./results/bertimbau_large/C3
         
     | 
| 27 | 
         
             
                logging_dir: ./logs/bertimbau_large/C3
         
     | 
| 28 | 
         
            -
                best_model_dir:  
     | 
| 29 | 
         
             
              tokenizer:
         
     | 
| 30 | 
         
             
                name: neuralmind/bert-large-portuguese-cased
         
     | 
| 31 | 
         
             
              dataset:
         
     | 
| 
         | 
|
| 20 | 
         
             
              model_path: /workspace/jbcs2025/outputs/2025-03-24/20-42-59
         
     | 
| 21 | 
         
             
            experiments:
         
     | 
| 22 | 
         
             
              model:
         
     | 
| 23 | 
         
            +
                name: kamel-usp/jbcs2025_bert-large-portuguese-cased-encoder_classification-C3-essay_only
         
     | 
| 24 | 
         
             
                type: encoder_classification
         
     | 
| 25 | 
         
             
                num_labels: 6
         
     | 
| 26 | 
         
             
                output_dir: ./results/bertimbau_large/C3
         
     | 
| 27 | 
         
             
                logging_dir: ./logs/bertimbau_large/C3
         
     | 
| 28 | 
         
            +
                best_model_dir: kamel-usp/jbcs2025_bert-large-portuguese-cased-encoder_classification-C3-essay_only
         
     | 
| 29 | 
         
             
              tokenizer:
         
     | 
| 30 | 
         
             
                name: neuralmind/bert-large-portuguese-cased
         
     | 
| 31 | 
         
             
              dataset:
         
     | 
    	
        runs/large_models/bertimbau/{jbcs2025_bertimbau-large-C1-encoder_classification-C1-essay_only → jbcs2025_bert-large-portuguese-cased-encoder_classification-C3-essay_only-encoder_classification-C3-essay_only}/.hydra/hydra.yaml
    RENAMED
    
    | 
         @@ -1,6 +1,6 @@ 
     | 
|
| 1 | 
         
             
            hydra:
         
     | 
| 2 | 
         
             
              run:
         
     | 
| 3 | 
         
            -
                dir:  
     | 
| 4 | 
         
             
              sweep:
         
     | 
| 5 | 
         
             
                dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S}
         
     | 
| 6 | 
         
             
                subdir: ${hydra.job.num}
         
     | 
| 
         @@ -110,13 +110,14 @@ hydra: 
     | 
|
| 110 | 
         
             
              output_subdir: .hydra
         
     | 
| 111 | 
         
             
              overrides:
         
     | 
| 112 | 
         
             
                hydra:
         
     | 
| 
         | 
|
| 113 | 
         
             
                - hydra.mode=RUN
         
     | 
| 114 | 
         
             
                task:
         
     | 
| 115 | 
         
            -
                - experiments= 
     | 
| 116 | 
         
             
              job:
         
     | 
| 117 | 
         
             
                name: run_inference_experiment
         
     | 
| 118 | 
         
             
                chdir: null
         
     | 
| 119 | 
         
            -
                override_dirname: experiments= 
     | 
| 120 | 
         
             
                id: ???
         
     | 
| 121 | 
         
             
                num: ???
         
     | 
| 122 | 
         
             
                config_name: config
         
     | 
| 
         @@ -141,9 +142,9 @@ hydra: 
     | 
|
| 141 | 
         
             
                - path: ''
         
     | 
| 142 | 
         
             
                  schema: structured
         
     | 
| 143 | 
         
             
                  provider: schema
         
     | 
| 144 | 
         
            -
                output_dir: /workspace/jbcs2025/ 
     | 
| 145 | 
         
             
                choices:
         
     | 
| 146 | 
         
            -
                  experiments:  
     | 
| 147 | 
         
             
                  hydra/env: default
         
     | 
| 148 | 
         
             
                  hydra/callbacks: null
         
     | 
| 149 | 
         
             
                  hydra/job_logging: default
         
     | 
| 
         | 
|
| 1 | 
         
             
            hydra:
         
     | 
| 2 | 
         
             
              run:
         
     | 
| 3 | 
         
            +
                dir: inference_output/2025-07-10/01-16-36
         
     | 
| 4 | 
         
             
              sweep:
         
     | 
| 5 | 
         
             
                dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S}
         
     | 
| 6 | 
         
             
                subdir: ${hydra.job.num}
         
     | 
| 
         | 
|
| 110 | 
         
             
              output_subdir: .hydra
         
     | 
| 111 | 
         
             
              overrides:
         
     | 
| 112 | 
         
             
                hydra:
         
     | 
| 113 | 
         
            +
                - hydra.run.dir=inference_output/2025-07-10/01-16-36
         
     | 
| 114 | 
         
             
                - hydra.mode=RUN
         
     | 
| 115 | 
         
             
                task:
         
     | 
| 116 | 
         
            +
                - experiments=temp_inference/kamel-usp_jbcs2025_bert-large-portuguese-cased-encoder_classification-C3-essay_only
         
     | 
| 117 | 
         
             
              job:
         
     | 
| 118 | 
         
             
                name: run_inference_experiment
         
     | 
| 119 | 
         
             
                chdir: null
         
     | 
| 120 | 
         
            +
                override_dirname: experiments=temp_inference/kamel-usp_jbcs2025_bert-large-portuguese-cased-encoder_classification-C3-essay_only
         
     | 
| 121 | 
         
             
                id: ???
         
     | 
| 122 | 
         
             
                num: ???
         
     | 
| 123 | 
         
             
                config_name: config
         
     | 
| 
         | 
|
| 142 | 
         
             
                - path: ''
         
     | 
| 143 | 
         
             
                  schema: structured
         
     | 
| 144 | 
         
             
                  provider: schema
         
     | 
| 145 | 
         
            +
                output_dir: /workspace/jbcs2025/inference_output/2025-07-10/01-16-36
         
     | 
| 146 | 
         
             
                choices:
         
     | 
| 147 | 
         
            +
                  experiments: temp_inference/kamel-usp_jbcs2025_bert-large-portuguese-cased-encoder_classification-C3-essay_only
         
     | 
| 148 | 
         
             
                  hydra/env: default
         
     | 
| 149 | 
         
             
                  hydra/callbacks: null
         
     | 
| 150 | 
         
             
                  hydra/job_logging: default
         
     | 
    	
        runs/large_models/bertimbau/jbcs2025_bert-large-portuguese-cased-encoder_classification-C3-essay_only-encoder_classification-C3-essay_only/.hydra/overrides.yaml
    ADDED
    
    | 
         @@ -0,0 +1 @@ 
     | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            - experiments=temp_inference/kamel-usp_jbcs2025_bert-large-portuguese-cased-encoder_classification-C3-essay_only
         
     | 
    	
        runs/large_models/bertimbau/jbcs2025_bert-large-portuguese-cased-encoder_classification-C3-essay_only-encoder_classification-C3-essay_only/bootstrap_confidence_intervals.csv
    ADDED
    
    | 
         @@ -0,0 +1,2 @@ 
     | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            experiment_id,timestamp,QWK_mean,QWK_lower_95ci,QWK_upper_95ci,QWK_ci_width,Macro_F1_mean,Macro_F1_lower_95ci,Macro_F1_upper_95ci,Macro_F1_ci_width,Weighted_F1_mean,Weighted_F1_lower_95ci,Weighted_F1_upper_95ci,Weighted_F1_ci_width
         
     | 
| 2 | 
         
            +
            jbcs2025_bert-large-portuguese-cased-encoder_classification-C3-essay_only-encoder_classification-C3-essay_only,2025-07-10 01:16:42,0.24545954800981098,0.09445649614356424,0.39034083613362736,0.29588433999006314,0.18382071874415615,0.12381966213862766,0.2605668201553197,0.13674715801669202,0.2556045233592953,0.1820167330799211,0.33321252550062413,0.15119579242070302
         
     | 
    	
        runs/large_models/bertimbau/{jbcs2025_bertimbau-large-C2-encoder_classification-C2-essay_only → jbcs2025_bert-large-portuguese-cased-encoder_classification-C3-essay_only-encoder_classification-C3-essay_only}/evaluation_results.csv
    RENAMED
    
    | 
         @@ -1,2 +1,2 @@ 
     | 
|
| 1 | 
         
            -
            accuracy,RMSE,QWK,HDIV,Macro_F1,Micro_F1,Weighted_F1,TP_0,TN_0,FP_0,FN_0,TP_1,TN_1,FP_1,FN_1,TP_2,TN_2,FP_2,FN_2,TP_3,TN_3,FP_3,FN_3,TP_4,TN_4,FP_4,FN_4,TP_5,TN_5,FP_5,FN_5,timestamp,id
         
     | 
| 2 | 
         
            -
            0. 
     | 
| 
         | 
|
| 1 | 
         
            +
            accuracy,RMSE,QWK,HDIV,Macro_F1,Micro_F1,Weighted_F1,TP_0,TN_0,FP_0,FN_0,TP_1,TN_1,FP_1,FN_1,TP_2,TN_2,FP_2,FN_2,TP_3,TN_3,FP_3,FN_3,TP_4,TN_4,FP_4,FN_4,TP_5,TN_5,FP_5,FN_5,timestamp,id
         
     | 
| 2 | 
         
            +
            0.2753623188405797,64.87446070815474,0.2477700693756193,0.13043478260869568,0.17399856052293136,0.2753623188405797,0.25553888306634265,0,137,0,1,9,66,43,20,0,118,2,18,9,74,19,36,19,71,29,19,1,124,7,6,2025-07-10 01:16:42,jbcs2025_bert-large-portuguese-cased-encoder_classification-C3-essay_only-encoder_classification-C3-essay_only
         
     | 
    	
        runs/large_models/bertimbau/{jbcs2025_bertimbau-large-C3-encoder_classification-C3-essay_only/jbcs2025_bertimbau-large-C3-encoder_classification-C3-essay_only_inference_results.jsonl → jbcs2025_bert-large-portuguese-cased-encoder_classification-C3-essay_only-encoder_classification-C3-essay_only/jbcs2025_bert-large-portuguese-cased-encoder_classification-C3-essay_only-encoder_classification-C3-essay_only_inference_results.jsonl}
    RENAMED
    
    | 
         The diff for this file is too large to render. 
		See raw diff 
     | 
| 
         | 
    	
        runs/large_models/bertimbau/{jbcs2025_bertimbau-large-C3-encoder_classification-C3-essay_only → jbcs2025_bert-large-portuguese-cased-encoder_classification-C3-essay_only-encoder_classification-C3-essay_only}/run_inference_experiment.log
    RENAMED
    
    | 
         @@ -1,5 +1,5 @@ 
     | 
|
| 1 | 
         
            -
            [2025-07-01 
     | 
| 2 | 
         
            -
            [2025-07-01 
     | 
| 3 | 
         
             
            dataset:
         
     | 
| 4 | 
         
             
              name: kamel-usp/aes_enem_dataset
         
     | 
| 5 | 
         
             
              split: JBCS2025
         
     | 
| 
         @@ -21,12 +21,12 @@ post_training_results: 
     | 
|
| 21 | 
         
             
              model_path: /workspace/jbcs2025/outputs/2025-03-24/20-42-59
         
     | 
| 22 | 
         
             
            experiments:
         
     | 
| 23 | 
         
             
              model:
         
     | 
| 24 | 
         
            -
                name: kamel-usp/ 
     | 
| 25 | 
         
             
                type: encoder_classification
         
     | 
| 26 | 
         
             
                num_labels: 6
         
     | 
| 27 | 
         
             
                output_dir: ./results/bertimbau_large/C3
         
     | 
| 28 | 
         
             
                logging_dir: ./logs/bertimbau_large/C3
         
     | 
| 29 | 
         
            -
                best_model_dir:  
     | 
| 30 | 
         
             
              tokenizer:
         
     | 
| 31 | 
         
             
                name: neuralmind/bert-large-portuguese-cased
         
     | 
| 32 | 
         
             
              dataset:
         
     | 
| 
         @@ -41,9 +41,9 @@ experiments: 
     | 
|
| 41 | 
         
             
                gradient_accumulation_steps: 1
         
     | 
| 42 | 
         
             
                gradient_checkpointing: false
         
     | 
| 43 | 
         | 
| 44 | 
         
            -
            [2025-07-01 
     | 
| 45 | 
         
            -
            [2025-07-01 
     | 
| 46 | 
         
            -
            [2025-07-01 
     | 
| 47 | 
         
             
              "architectures": [
         
     | 
| 48 | 
         
             
                "BertForMaskedLM"
         
     | 
| 49 | 
         
             
              ],
         
     | 
| 
         @@ -68,20 +68,14 @@ experiments: 
     | 
|
| 68 | 
         
             
              "pooler_size_per_head": 128,
         
     | 
| 69 | 
         
             
              "pooler_type": "first_token_transform",
         
     | 
| 70 | 
         
             
              "position_embedding_type": "absolute",
         
     | 
| 71 | 
         
            -
              "transformers_version": "4.53. 
     | 
| 72 | 
         
             
              "type_vocab_size": 2,
         
     | 
| 73 | 
         
             
              "use_cache": true,
         
     | 
| 74 | 
         
             
              "vocab_size": 29794
         
     | 
| 75 | 
         
             
            }
         
     | 
| 76 | 
         | 
| 77 | 
         
            -
            [2025-07-01 
     | 
| 78 | 
         
            -
            [2025-07-01 
     | 
| 79 | 
         
            -
            [2025-07-01 00:08:13,201][transformers.tokenization_utils_base][INFO] - loading file added_tokens.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/added_tokens.json
         
     | 
| 80 | 
         
            -
            [2025-07-01 00:08:13,201][transformers.tokenization_utils_base][INFO] - loading file special_tokens_map.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/special_tokens_map.json
         
     | 
| 81 | 
         
            -
            [2025-07-01 00:08:13,201][transformers.tokenization_utils_base][INFO] - loading file tokenizer_config.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/tokenizer_config.json
         
     | 
| 82 | 
         
            -
            [2025-07-01 00:08:13,201][transformers.tokenization_utils_base][INFO] - loading file chat_template.jinja from cache at None
         
     | 
| 83 | 
         
            -
            [2025-07-01 00:08:13,201][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/config.json
         
     | 
| 84 | 
         
            -
            [2025-07-01 00:08:13,202][transformers.configuration_utils][INFO] - Model config BertConfig {
         
     | 
| 85 | 
         
             
              "architectures": [
         
     | 
| 86 | 
         
             
                "BertForMaskedLM"
         
     | 
| 87 | 
         
             
              ],
         
     | 
| 
         @@ -106,14 +100,20 @@ experiments: 
     | 
|
| 106 | 
         
             
              "pooler_size_per_head": 128,
         
     | 
| 107 | 
         
             
              "pooler_type": "first_token_transform",
         
     | 
| 108 | 
         
             
              "position_embedding_type": "absolute",
         
     | 
| 109 | 
         
            -
              "transformers_version": "4.53. 
     | 
| 110 | 
         
             
              "type_vocab_size": 2,
         
     | 
| 111 | 
         
             
              "use_cache": true,
         
     | 
| 112 | 
         
             
              "vocab_size": 29794
         
     | 
| 113 | 
         
             
            }
         
     | 
| 114 | 
         | 
| 115 | 
         
            -
            [2025-07-01 
     | 
| 116 | 
         
            -
            [2025-07-01 
     | 
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 117 | 
         
             
              "architectures": [
         
     | 
| 118 | 
         
             
                "BertForMaskedLM"
         
     | 
| 119 | 
         
             
              ],
         
     | 
| 
         @@ -138,18 +138,73 @@ experiments: 
     | 
|
| 138 | 
         
             
              "pooler_size_per_head": 128,
         
     | 
| 139 | 
         
             
              "pooler_type": "first_token_transform",
         
     | 
| 140 | 
         
             
              "position_embedding_type": "absolute",
         
     | 
| 141 | 
         
            -
              "transformers_version": "4.53. 
     | 
| 142 | 
         
             
              "type_vocab_size": 2,
         
     | 
| 143 | 
         
             
              "use_cache": true,
         
     | 
| 144 | 
         
             
              "vocab_size": 29794
         
     | 
| 145 | 
         
             
            }
         
     | 
| 146 | 
         | 
| 147 | 
         
            -
            [2025-07-01 
     | 
| 148 | 
         
            -
            [2025-07-01 
     | 
| 149 | 
         
            -
             
     | 
| 150 | 
         
            -
             
     | 
| 151 | 
         
            -
             
     | 
| 152 | 
         
            -
             
     | 
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 153 | 
         
             
              "architectures": [
         
     | 
| 154 | 
         
             
                "BertForSequenceClassification"
         
     | 
| 155 | 
         
             
              ],
         
     | 
| 
         @@ -190,37 +245,36 @@ experiments: 
     | 
|
| 190 | 
         
             
              "pooler_size_per_head": 128,
         
     | 
| 191 | 
         
             
              "pooler_type": "first_token_transform",
         
     | 
| 192 | 
         
             
              "position_embedding_type": "absolute",
         
     | 
| 193 | 
         
            -
              "problem_type": "single_label_classification",
         
     | 
| 194 | 
         
             
              "torch_dtype": "float32",
         
     | 
| 195 | 
         
            -
              "transformers_version": "4.53. 
     | 
| 196 | 
         
             
              "type_vocab_size": 2,
         
     | 
| 197 | 
         
             
              "use_cache": true,
         
     | 
| 198 | 
         
             
              "vocab_size": 29794
         
     | 
| 199 | 
         
             
            }
         
     | 
| 200 | 
         | 
| 201 | 
         
            -
            [2025-07-01 
     | 
| 202 | 
         
            -
            [2025-07-01 
     | 
| 203 | 
         
            -
            [2025-07-01 
     | 
| 204 | 
         
            -
            [2025-07-01 
     | 
| 205 | 
         | 
| 206 | 
         
            -
            [2025-07-01 
     | 
| 207 | 
         
             
            If your task is similar to the task the model of the checkpoint was trained on, you can already use BertForSequenceClassification for predictions without further training.
         
     | 
| 208 | 
         
            -
            [2025-07-01 
     | 
| 209 | 
         
            -
            [2025-07-01 
     | 
| 210 | 
         
            -
            [2025-07-01 
     | 
| 211 | 
         
            -
            [2025-07-01 
     | 
| 212 | 
         
            -
            [2025-07-01 
     | 
| 213 | 
         
            -
            [2025-07-01 
     | 
| 214 | 
         
            -
            [2025-07-01 
     | 
| 215 | 
         
             
            ***** Running Prediction *****
         
     | 
| 216 | 
         
            -
            [2025-07-01 
     | 
| 217 | 
         
            -
            [2025-07-01 
     | 
| 218 | 
         
            -
            [2025-07-01 
     | 
| 219 | 
         
            -
            [2025-07-01 
     | 
| 220 | 
         
            -
            [2025-07-01 
     | 
| 221 | 
         
            -
            [2025-07-01 
     | 
| 222 | 
         
            -
            [2025-07-01 
     | 
| 223 | 
         
            -
            [2025-07-01 
     | 
| 224 | 
         
            -
            [2025-07-01 
     | 
| 225 | 
         
            -
            [2025-07-01 
     | 
| 226 | 
         
            -
            [2025-07-01 
     | 
| 
         | 
|
| 1 | 
         
            +
            [2025-07-10 01:16:42,095][__main__][INFO] - Starting inference experiment
         
     | 
| 2 | 
         
            +
            [2025-07-10 01:16:42,097][__main__][INFO] - cache_dir: /tmp/
         
     | 
| 3 | 
         
             
            dataset:
         
     | 
| 4 | 
         
             
              name: kamel-usp/aes_enem_dataset
         
     | 
| 5 | 
         
             
              split: JBCS2025
         
     | 
| 
         | 
|
| 21 | 
         
             
              model_path: /workspace/jbcs2025/outputs/2025-03-24/20-42-59
         
     | 
| 22 | 
         
             
            experiments:
         
     | 
| 23 | 
         
             
              model:
         
     | 
| 24 | 
         
            +
                name: kamel-usp/jbcs2025_bert-large-portuguese-cased-encoder_classification-C3-essay_only
         
     | 
| 25 | 
         
             
                type: encoder_classification
         
     | 
| 26 | 
         
             
                num_labels: 6
         
     | 
| 27 | 
         
             
                output_dir: ./results/bertimbau_large/C3
         
     | 
| 28 | 
         
             
                logging_dir: ./logs/bertimbau_large/C3
         
     | 
| 29 | 
         
            +
                best_model_dir: kamel-usp/jbcs2025_bert-large-portuguese-cased-encoder_classification-C3-essay_only
         
     | 
| 30 | 
         
             
              tokenizer:
         
     | 
| 31 | 
         
             
                name: neuralmind/bert-large-portuguese-cased
         
     | 
| 32 | 
         
             
              dataset:
         
     | 
| 
         | 
|
| 41 | 
         
             
                gradient_accumulation_steps: 1
         
     | 
| 42 | 
         
             
                gradient_checkpointing: false
         
     | 
| 43 | 
         | 
| 44 | 
         
            +
            [2025-07-10 01:16:42,099][__main__][INFO] - Running inference with fine-tuned HF model
         
     | 
| 45 | 
         
            +
            [2025-07-10 01:16:46,973][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/config.json
         
     | 
| 46 | 
         
            +
            [2025-07-10 01:16:46,974][transformers.configuration_utils][INFO] - Model config BertConfig {
         
     | 
| 47 | 
         
             
              "architectures": [
         
     | 
| 48 | 
         
             
                "BertForMaskedLM"
         
     | 
| 49 | 
         
             
              ],
         
     | 
| 
         | 
|
| 68 | 
         
             
              "pooler_size_per_head": 128,
         
     | 
| 69 | 
         
             
              "pooler_type": "first_token_transform",
         
     | 
| 70 | 
         
             
              "position_embedding_type": "absolute",
         
     | 
| 71 | 
         
            +
              "transformers_version": "4.53.1",
         
     | 
| 72 | 
         
             
              "type_vocab_size": 2,
         
     | 
| 73 | 
         
             
              "use_cache": true,
         
     | 
| 74 | 
         
             
              "vocab_size": 29794
         
     | 
| 75 | 
         
             
            }
         
     | 
| 76 | 
         | 
| 77 | 
         
            +
            [2025-07-10 01:16:47,174][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/config.json
         
     | 
| 78 | 
         
            +
            [2025-07-10 01:16:47,175][transformers.configuration_utils][INFO] - Model config BertConfig {
         
     | 
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 79 | 
         
             
              "architectures": [
         
     | 
| 80 | 
         
             
                "BertForMaskedLM"
         
     | 
| 81 | 
         
             
              ],
         
     | 
| 
         | 
|
| 100 | 
         
             
              "pooler_size_per_head": 128,
         
     | 
| 101 | 
         
             
              "pooler_type": "first_token_transform",
         
     | 
| 102 | 
         
             
              "position_embedding_type": "absolute",
         
     | 
| 103 | 
         
            +
              "transformers_version": "4.53.1",
         
     | 
| 104 | 
         
             
              "type_vocab_size": 2,
         
     | 
| 105 | 
         
             
              "use_cache": true,
         
     | 
| 106 | 
         
             
              "vocab_size": 29794
         
     | 
| 107 | 
         
             
            }
         
     | 
| 108 | 
         | 
| 109 | 
         
            +
            [2025-07-10 01:16:47,374][transformers.tokenization_utils_base][INFO] - loading file vocab.txt from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/vocab.txt
         
     | 
| 110 | 
         
            +
            [2025-07-10 01:16:47,375][transformers.tokenization_utils_base][INFO] - loading file tokenizer.json from cache at None
         
     | 
| 111 | 
         
            +
            [2025-07-10 01:16:47,375][transformers.tokenization_utils_base][INFO] - loading file added_tokens.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/added_tokens.json
         
     | 
| 112 | 
         
            +
            [2025-07-10 01:16:47,375][transformers.tokenization_utils_base][INFO] - loading file special_tokens_map.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/special_tokens_map.json
         
     | 
| 113 | 
         
            +
            [2025-07-10 01:16:47,375][transformers.tokenization_utils_base][INFO] - loading file tokenizer_config.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/tokenizer_config.json
         
     | 
| 114 | 
         
            +
            [2025-07-10 01:16:47,375][transformers.tokenization_utils_base][INFO] - loading file chat_template.jinja from cache at None
         
     | 
| 115 | 
         
            +
            [2025-07-10 01:16:47,375][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/config.json
         
     | 
| 116 | 
         
            +
            [2025-07-10 01:16:47,376][transformers.configuration_utils][INFO] - Model config BertConfig {
         
     | 
| 117 | 
         
             
              "architectures": [
         
     | 
| 118 | 
         
             
                "BertForMaskedLM"
         
     | 
| 119 | 
         
             
              ],
         
     | 
| 
         | 
|
| 138 | 
         
             
              "pooler_size_per_head": 128,
         
     | 
| 139 | 
         
             
              "pooler_type": "first_token_transform",
         
     | 
| 140 | 
         
             
              "position_embedding_type": "absolute",
         
     | 
| 141 | 
         
            +
              "transformers_version": "4.53.1",
         
     | 
| 142 | 
         
             
              "type_vocab_size": 2,
         
     | 
| 143 | 
         
             
              "use_cache": true,
         
     | 
| 144 | 
         
             
              "vocab_size": 29794
         
     | 
| 145 | 
         
             
            }
         
     | 
| 146 | 
         | 
| 147 | 
         
            +
            [2025-07-10 01:16:47,405][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/config.json
         
     | 
| 148 | 
         
            +
            [2025-07-10 01:16:47,406][transformers.configuration_utils][INFO] - Model config BertConfig {
         
     | 
| 149 | 
         
            +
              "architectures": [
         
     | 
| 150 | 
         
            +
                "BertForMaskedLM"
         
     | 
| 151 | 
         
            +
              ],
         
     | 
| 152 | 
         
            +
              "attention_probs_dropout_prob": 0.1,
         
     | 
| 153 | 
         
            +
              "classifier_dropout": null,
         
     | 
| 154 | 
         
            +
              "directionality": "bidi",
         
     | 
| 155 | 
         
            +
              "hidden_act": "gelu",
         
     | 
| 156 | 
         
            +
              "hidden_dropout_prob": 0.1,
         
     | 
| 157 | 
         
            +
              "hidden_size": 1024,
         
     | 
| 158 | 
         
            +
              "initializer_range": 0.02,
         
     | 
| 159 | 
         
            +
              "intermediate_size": 4096,
         
     | 
| 160 | 
         
            +
              "layer_norm_eps": 1e-12,
         
     | 
| 161 | 
         
            +
              "max_position_embeddings": 512,
         
     | 
| 162 | 
         
            +
              "model_type": "bert",
         
     | 
| 163 | 
         
            +
              "num_attention_heads": 16,
         
     | 
| 164 | 
         
            +
              "num_hidden_layers": 24,
         
     | 
| 165 | 
         
            +
              "output_past": true,
         
     | 
| 166 | 
         
            +
              "pad_token_id": 0,
         
     | 
| 167 | 
         
            +
              "pooler_fc_size": 768,
         
     | 
| 168 | 
         
            +
              "pooler_num_attention_heads": 12,
         
     | 
| 169 | 
         
            +
              "pooler_num_fc_layers": 3,
         
     | 
| 170 | 
         
            +
              "pooler_size_per_head": 128,
         
     | 
| 171 | 
         
            +
              "pooler_type": "first_token_transform",
         
     | 
| 172 | 
         
            +
              "position_embedding_type": "absolute",
         
     | 
| 173 | 
         
            +
              "transformers_version": "4.53.1",
         
     | 
| 174 | 
         
            +
              "type_vocab_size": 2,
         
     | 
| 175 | 
         
            +
              "use_cache": true,
         
     | 
| 176 | 
         
            +
              "vocab_size": 29794
         
     | 
| 177 | 
         
            +
            }
         
     | 
| 178 | 
         
            +
             
     | 
| 179 | 
         
            +
            [2025-07-10 01:16:47,422][__main__][INFO] - Tokenizer function parameters- Padding:longest; Truncation: True; Use Full Context: False
         
     | 
| 180 | 
         
            +
            [2025-07-10 01:16:47,831][__main__][INFO] - 
         
     | 
| 181 | 
         
            +
            Token statistics for 'train' split:
         
     | 
| 182 | 
         
            +
            [2025-07-10 01:16:47,831][__main__][INFO] -   Total examples: 500
         
     | 
| 183 | 
         
            +
            [2025-07-10 01:16:47,831][__main__][INFO] -   Min tokens: 512
         
     | 
| 184 | 
         
            +
            [2025-07-10 01:16:47,831][__main__][INFO] -   Max tokens: 512
         
     | 
| 185 | 
         
            +
            [2025-07-10 01:16:47,831][__main__][INFO] -   Avg tokens: 512.00
         
     | 
| 186 | 
         
            +
            [2025-07-10 01:16:47,831][__main__][INFO] -   Std tokens: 0.00
         
     | 
| 187 | 
         
            +
            [2025-07-10 01:16:47,920][__main__][INFO] - 
         
     | 
| 188 | 
         
            +
            Token statistics for 'validation' split:
         
     | 
| 189 | 
         
            +
            [2025-07-10 01:16:47,921][__main__][INFO] -   Total examples: 132
         
     | 
| 190 | 
         
            +
            [2025-07-10 01:16:47,921][__main__][INFO] -   Min tokens: 512
         
     | 
| 191 | 
         
            +
            [2025-07-10 01:16:47,921][__main__][INFO] -   Max tokens: 512
         
     | 
| 192 | 
         
            +
            [2025-07-10 01:16:47,921][__main__][INFO] -   Avg tokens: 512.00
         
     | 
| 193 | 
         
            +
            [2025-07-10 01:16:47,921][__main__][INFO] -   Std tokens: 0.00
         
     | 
| 194 | 
         
            +
            [2025-07-10 01:16:48,015][__main__][INFO] - 
         
     | 
| 195 | 
         
            +
            Token statistics for 'test' split:
         
     | 
| 196 | 
         
            +
            [2025-07-10 01:16:48,015][__main__][INFO] -   Total examples: 138
         
     | 
| 197 | 
         
            +
            [2025-07-10 01:16:48,015][__main__][INFO] -   Min tokens: 512
         
     | 
| 198 | 
         
            +
            [2025-07-10 01:16:48,015][__main__][INFO] -   Max tokens: 512
         
     | 
| 199 | 
         
            +
            [2025-07-10 01:16:48,015][__main__][INFO] -   Avg tokens: 512.00
         
     | 
| 200 | 
         
            +
            [2025-07-10 01:16:48,015][__main__][INFO] -   Std tokens: 0.00
         
     | 
| 201 | 
         
            +
            [2025-07-10 01:16:48,015][__main__][INFO] - If token statistics are the same (max, avg, min) keep in mind that this is due to batched tokenization and padding.
         
     | 
| 202 | 
         
            +
            [2025-07-10 01:16:48,015][__main__][INFO] - Model max length: 512. If it is the same as stats, then there is a high chance that sequences are being truncated.
         
     | 
| 203 | 
         
            +
            [2025-07-10 01:16:48,016][__main__][INFO] - Loading model from: kamel-usp/jbcs2025_bert-large-portuguese-cased-encoder_classification-C3-essay_only
         
     | 
| 204 | 
         
            +
            [2025-07-10 01:16:48,016][__main__][INFO] - Loading model from: kamel-usp/jbcs2025_bert-large-portuguese-cased-encoder_classification-C3-essay_only
         
     | 
| 205 | 
         
            +
            [2025-07-10 01:16:48,940][__main__][INFO] - Model need ≈ 2.62 GiB to run inference and 6.36 for training 
         
     | 
| 206 | 
         
            +
            [2025-07-10 01:16:49,806][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--kamel-usp--jbcs2025_bert-large-portuguese-cased-encoder_classification-C3-essay_only/snapshots/95506fbe9cc549ab2018d8ddd3a10010ee1c8d61/config.json
         
     | 
| 207 | 
         
            +
            [2025-07-10 01:16:49,807][transformers.configuration_utils][INFO] - Model config BertConfig {
         
     | 
| 208 | 
         
             
              "architectures": [
         
     | 
| 209 | 
         
             
                "BertForSequenceClassification"
         
     | 
| 210 | 
         
             
              ],
         
     | 
| 
         | 
|
| 245 | 
         
             
              "pooler_size_per_head": 128,
         
     | 
| 246 | 
         
             
              "pooler_type": "first_token_transform",
         
     | 
| 247 | 
         
             
              "position_embedding_type": "absolute",
         
     | 
| 
         | 
|
| 248 | 
         
             
              "torch_dtype": "float32",
         
     | 
| 249 | 
         
            +
              "transformers_version": "4.53.1",
         
     | 
| 250 | 
         
             
              "type_vocab_size": 2,
         
     | 
| 251 | 
         
             
              "use_cache": true,
         
     | 
| 252 | 
         
             
              "vocab_size": 29794
         
     | 
| 253 | 
         
             
            }
         
     | 
| 254 | 
         | 
| 255 | 
         
            +
            [2025-07-10 01:17:16,434][transformers.modeling_utils][INFO] - loading weights file model.safetensors from cache at /tmp/models--kamel-usp--jbcs2025_bert-large-portuguese-cased-encoder_classification-C3-essay_only/snapshots/95506fbe9cc549ab2018d8ddd3a10010ee1c8d61/model.safetensors
         
     | 
| 256 | 
         
            +
            [2025-07-10 01:17:16,438][transformers.modeling_utils][INFO] - Will use torch_dtype=torch.float32 as defined in model's config object
         
     | 
| 257 | 
         
            +
            [2025-07-10 01:17:16,438][transformers.modeling_utils][INFO] - Instantiating BertForSequenceClassification model under default dtype torch.float32.
         
     | 
| 258 | 
         
            +
            [2025-07-10 01:17:17,158][transformers.modeling_utils][INFO] - All model checkpoint weights were used when initializing BertForSequenceClassification.
         
     | 
| 259 | 
         | 
| 260 | 
         
            +
            [2025-07-10 01:17:17,158][transformers.modeling_utils][INFO] - All the weights of BertForSequenceClassification were initialized from the model checkpoint at kamel-usp/jbcs2025_bert-large-portuguese-cased-encoder_classification-C3-essay_only.
         
     | 
| 261 | 
         
             
            If your task is similar to the task the model of the checkpoint was trained on, you can already use BertForSequenceClassification for predictions without further training.
         
     | 
| 262 | 
         
            +
            [2025-07-10 01:17:17,177][transformers.training_args][INFO] - PyTorch: setting up devices
         
     | 
| 263 | 
         
            +
            [2025-07-10 01:17:17,200][transformers.training_args][INFO] - The default value for the training argument `--report_to` will change in v5 (from all installed integrations to none). In v5, you will need to use `--report_to all` to get the same behavior as now. You should start updating your code and make this info disappear :-).
         
     | 
| 264 | 
         
            +
            [2025-07-10 01:17:17,210][transformers.trainer][INFO] - You have loaded a model on multiple GPUs. `is_model_parallel` attribute will be force-set to `True` to avoid any unexpected behavior such as device placement mismatching.
         
     | 
| 265 | 
         
            +
            [2025-07-10 01:17:17,245][transformers.trainer][INFO] - Using auto half precision backend
         
     | 
| 266 | 
         
            +
            [2025-07-10 01:17:20,549][__main__][INFO] - Running inference on test dataset
         
     | 
| 267 | 
         
            +
            [2025-07-10 01:17:20,550][transformers.trainer][INFO] - The following columns in the test set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: grades, id, essay_year, supporting_text, reference, essay_text, id_prompt, prompt. If grades, id, essay_year, supporting_text, reference, essay_text, id_prompt, prompt are not expected by `BertForSequenceClassification.forward`,  you can safely ignore this message.
         
     | 
| 268 | 
         
            +
            [2025-07-10 01:17:20,563][transformers.trainer][INFO] - 
         
     | 
| 269 | 
         
             
            ***** Running Prediction *****
         
     | 
| 270 | 
         
            +
            [2025-07-10 01:17:20,563][transformers.trainer][INFO] -   Num examples = 138
         
     | 
| 271 | 
         
            +
            [2025-07-10 01:17:20,563][transformers.trainer][INFO] -   Batch size = 16
         
     | 
| 272 | 
         
            +
            [2025-07-10 01:17:22,039][__main__][INFO] - Inference results saved to jbcs2025_bert-large-portuguese-cased-encoder_classification-C3-essay_only-encoder_classification-C3-essay_only_inference_results.jsonl
         
     | 
| 273 | 
         
            +
            [2025-07-10 01:17:22,040][__main__][INFO] - Computing bootstrap confidence intervals for metrics: ['QWK', 'Macro_F1', 'Weighted_F1']
         
     | 
| 274 | 
         
            +
            [2025-07-10 01:19:27,495][__main__][INFO] - Bootstrap CI results saved to bootstrap_confidence_intervals.csv
         
     | 
| 275 | 
         
            +
            [2025-07-10 01:19:27,497][__main__][INFO] - Bootstrap Confidence Intervals (95%):
         
     | 
| 276 | 
         
            +
            [2025-07-10 01:19:27,497][__main__][INFO] -   QWK: 0.2455 [0.0945, 0.3903]
         
     | 
| 277 | 
         
            +
            [2025-07-10 01:19:27,497][__main__][INFO] -   Macro_F1: 0.1838 [0.1238, 0.2606]
         
     | 
| 278 | 
         
            +
            [2025-07-10 01:19:27,497][__main__][INFO] -   Weighted_F1: 0.2556 [0.1820, 0.3332]
         
     | 
| 279 | 
         
            +
            [2025-07-10 01:19:27,498][__main__][INFO] - Inference results: {'accuracy': 0.2753623188405797, 'RMSE': 64.87446070815474, 'QWK': 0.2477700693756193, 'HDIV': 0.13043478260869568, 'Macro_F1': 0.17399856052293136, 'Micro_F1': 0.2753623188405797, 'Weighted_F1': 0.25553888306634265, 'TP_0': np.int64(0), 'TN_0': np.int64(137), 'FP_0': np.int64(0), 'FN_0': np.int64(1), 'TP_1': np.int64(9), 'TN_1': np.int64(66), 'FP_1': np.int64(43), 'FN_1': np.int64(20), 'TP_2': np.int64(0), 'TN_2': np.int64(118), 'FP_2': np.int64(2), 'FN_2': np.int64(18), 'TP_3': np.int64(9), 'TN_3': np.int64(74), 'FP_3': np.int64(19), 'FN_3': np.int64(36), 'TP_4': np.int64(19), 'TN_4': np.int64(71), 'FP_4': np.int64(29), 'FN_4': np.int64(19), 'TP_5': np.int64(1), 'TN_5': np.int64(124), 'FP_5': np.int64(7), 'FN_5': np.int64(6)}
         
     | 
| 280 | 
         
            +
            [2025-07-10 01:19:27,503][__main__][INFO] - Inference experiment completed
         
     | 
    	
        runs/large_models/bertimbau/{jbcs2025_bertimbau-large-C4-encoder_classification-C4-essay_only → jbcs2025_bert-large-portuguese-cased-encoder_classification-C4-essay_only-encoder_classification-C4-essay_only}/.hydra/config.yaml
    RENAMED
    
    | 
         @@ -20,12 +20,12 @@ post_training_results: 
     | 
|
| 20 | 
         
             
              model_path: /workspace/jbcs2025/outputs/2025-03-24/20-42-59
         
     | 
| 21 | 
         
             
            experiments:
         
     | 
| 22 | 
         
             
              model:
         
     | 
| 23 | 
         
            -
                name: kamel-usp/ 
     | 
| 24 | 
         
             
                type: encoder_classification
         
     | 
| 25 | 
         
             
                num_labels: 6
         
     | 
| 26 | 
         
             
                output_dir: ./results/bertimbau_large/C4
         
     | 
| 27 | 
         
             
                logging_dir: ./logs/bertimbau_large/C4
         
     | 
| 28 | 
         
            -
                best_model_dir:  
     | 
| 29 | 
         
             
              tokenizer:
         
     | 
| 30 | 
         
             
                name: neuralmind/bert-large-portuguese-cased
         
     | 
| 31 | 
         
             
              dataset:
         
     | 
| 
         | 
|
| 20 | 
         
             
              model_path: /workspace/jbcs2025/outputs/2025-03-24/20-42-59
         
     | 
| 21 | 
         
             
            experiments:
         
     | 
| 22 | 
         
             
              model:
         
     | 
| 23 | 
         
            +
                name: kamel-usp/jbcs2025_bert-large-portuguese-cased-encoder_classification-C4-essay_only
         
     | 
| 24 | 
         
             
                type: encoder_classification
         
     | 
| 25 | 
         
             
                num_labels: 6
         
     | 
| 26 | 
         
             
                output_dir: ./results/bertimbau_large/C4
         
     | 
| 27 | 
         
             
                logging_dir: ./logs/bertimbau_large/C4
         
     | 
| 28 | 
         
            +
                best_model_dir: kamel-usp/jbcs2025_bert-large-portuguese-cased-encoder_classification-C4-essay_only
         
     | 
| 29 | 
         
             
              tokenizer:
         
     | 
| 30 | 
         
             
                name: neuralmind/bert-large-portuguese-cased
         
     | 
| 31 | 
         
             
              dataset:
         
     | 
    	
        runs/large_models/bertimbau/{jbcs2025_bertimbau-large-C4-encoder_classification-C4-essay_only → jbcs2025_bert-large-portuguese-cased-encoder_classification-C4-essay_only-encoder_classification-C4-essay_only}/.hydra/hydra.yaml
    RENAMED
    
    | 
         @@ -1,6 +1,6 @@ 
     | 
|
| 1 | 
         
             
            hydra:
         
     | 
| 2 | 
         
             
              run:
         
     | 
| 3 | 
         
            -
                dir:  
     | 
| 4 | 
         
             
              sweep:
         
     | 
| 5 | 
         
             
                dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S}
         
     | 
| 6 | 
         
             
                subdir: ${hydra.job.num}
         
     | 
| 
         @@ -110,13 +110,14 @@ hydra: 
     | 
|
| 110 | 
         
             
              output_subdir: .hydra
         
     | 
| 111 | 
         
             
              overrides:
         
     | 
| 112 | 
         
             
                hydra:
         
     | 
| 
         | 
|
| 113 | 
         
             
                - hydra.mode=RUN
         
     | 
| 114 | 
         
             
                task:
         
     | 
| 115 | 
         
            -
                - experiments= 
     | 
| 116 | 
         
             
              job:
         
     | 
| 117 | 
         
             
                name: run_inference_experiment
         
     | 
| 118 | 
         
             
                chdir: null
         
     | 
| 119 | 
         
            -
                override_dirname: experiments= 
     | 
| 120 | 
         
             
                id: ???
         
     | 
| 121 | 
         
             
                num: ???
         
     | 
| 122 | 
         
             
                config_name: config
         
     | 
| 
         @@ -141,9 +142,9 @@ hydra: 
     | 
|
| 141 | 
         
             
                - path: ''
         
     | 
| 142 | 
         
             
                  schema: structured
         
     | 
| 143 | 
         
             
                  provider: schema
         
     | 
| 144 | 
         
            -
                output_dir: /workspace/jbcs2025/ 
     | 
| 145 | 
         
             
                choices:
         
     | 
| 146 | 
         
            -
                  experiments:  
     | 
| 147 | 
         
             
                  hydra/env: default
         
     | 
| 148 | 
         
             
                  hydra/callbacks: null
         
     | 
| 149 | 
         
             
                  hydra/job_logging: default
         
     | 
| 
         | 
|
| 1 | 
         
             
            hydra:
         
     | 
| 2 | 
         
             
              run:
         
     | 
| 3 | 
         
            +
                dir: inference_output/2025-07-10/01-19-33
         
     | 
| 4 | 
         
             
              sweep:
         
     | 
| 5 | 
         
             
                dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S}
         
     | 
| 6 | 
         
             
                subdir: ${hydra.job.num}
         
     | 
| 
         | 
|
| 110 | 
         
             
              output_subdir: .hydra
         
     | 
| 111 | 
         
             
              overrides:
         
     | 
| 112 | 
         
             
                hydra:
         
     | 
| 113 | 
         
            +
                - hydra.run.dir=inference_output/2025-07-10/01-19-33
         
     | 
| 114 | 
         
             
                - hydra.mode=RUN
         
     | 
| 115 | 
         
             
                task:
         
     | 
| 116 | 
         
            +
                - experiments=temp_inference/kamel-usp_jbcs2025_bert-large-portuguese-cased-encoder_classification-C4-essay_only
         
     | 
| 117 | 
         
             
              job:
         
     | 
| 118 | 
         
             
                name: run_inference_experiment
         
     | 
| 119 | 
         
             
                chdir: null
         
     | 
| 120 | 
         
            +
                override_dirname: experiments=temp_inference/kamel-usp_jbcs2025_bert-large-portuguese-cased-encoder_classification-C4-essay_only
         
     | 
| 121 | 
         
             
                id: ???
         
     | 
| 122 | 
         
             
                num: ???
         
     | 
| 123 | 
         
             
                config_name: config
         
     | 
| 
         | 
|
| 142 | 
         
             
                - path: ''
         
     | 
| 143 | 
         
             
                  schema: structured
         
     | 
| 144 | 
         
             
                  provider: schema
         
     | 
| 145 | 
         
            +
                output_dir: /workspace/jbcs2025/inference_output/2025-07-10/01-19-33
         
     | 
| 146 | 
         
             
                choices:
         
     | 
| 147 | 
         
            +
                  experiments: temp_inference/kamel-usp_jbcs2025_bert-large-portuguese-cased-encoder_classification-C4-essay_only
         
     | 
| 148 | 
         
             
                  hydra/env: default
         
     | 
| 149 | 
         
             
                  hydra/callbacks: null
         
     | 
| 150 | 
         
             
                  hydra/job_logging: default
         
     | 
    	
        runs/large_models/bertimbau/jbcs2025_bert-large-portuguese-cased-encoder_classification-C4-essay_only-encoder_classification-C4-essay_only/.hydra/overrides.yaml
    ADDED
    
    | 
         @@ -0,0 +1 @@ 
     | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            - experiments=temp_inference/kamel-usp_jbcs2025_bert-large-portuguese-cased-encoder_classification-C4-essay_only
         
     | 
    	
        runs/large_models/bertimbau/{jbcs2025_bertimbau-large-C2-encoder_classification-C2-essay_only → jbcs2025_bert-large-portuguese-cased-encoder_classification-C4-essay_only-encoder_classification-C4-essay_only}/bootstrap_confidence_intervals.csv
    RENAMED
    
    | 
         @@ -1,2 +1,2 @@ 
     | 
|
| 1 | 
         
             
            experiment_id,timestamp,QWK_mean,QWK_lower_95ci,QWK_upper_95ci,QWK_ci_width,Macro_F1_mean,Macro_F1_lower_95ci,Macro_F1_upper_95ci,Macro_F1_ci_width,Weighted_F1_mean,Weighted_F1_lower_95ci,Weighted_F1_upper_95ci,Weighted_F1_ci_width
         
     | 
| 2 | 
         
            -
             
     | 
| 
         | 
|
| 1 | 
         
             
            experiment_id,timestamp,QWK_mean,QWK_lower_95ci,QWK_upper_95ci,QWK_ci_width,Macro_F1_mean,Macro_F1_lower_95ci,Macro_F1_upper_95ci,Macro_F1_ci_width,Weighted_F1_mean,Weighted_F1_lower_95ci,Weighted_F1_upper_95ci,Weighted_F1_ci_width
         
     | 
| 2 | 
         
            +
            jbcs2025_bert-large-portuguese-cased-encoder_classification-C4-essay_only-encoder_classification-C4-essay_only,2025-07-10 01:19:39,0.5942508582936158,0.4700019118242705,0.7064182815355218,0.2364163697112513,0.4619540479839987,0.29942982127027257,0.6409612672040503,0.34153144593377777,0.6059987254912385,0.5248203752695526,0.6834775202623181,0.1586571449927655
         
     | 
    	
        runs/large_models/bertimbau/{jbcs2025_bertimbau-large-C3-encoder_classification-C3-essay_only → jbcs2025_bert-large-portuguese-cased-encoder_classification-C4-essay_only-encoder_classification-C4-essay_only}/evaluation_results.csv
    RENAMED
    
    | 
         @@ -1,2 +1,2 @@ 
     | 
|
| 1 | 
         
            -
            accuracy,RMSE,QWK,HDIV,Macro_F1,Micro_F1,Weighted_F1,TP_0,TN_0,FP_0,FN_0,TP_1,TN_1,FP_1,FN_1,TP_2,TN_2,FP_2,FN_2,TP_3,TN_3,FP_3,FN_3,TP_4,TN_4,FP_4,FN_4,TP_5,TN_5,FP_5,FN_5,timestamp,id
         
     | 
| 2 | 
         
            -
            0. 
     | 
| 
         | 
|
| 1 | 
         
            +
            accuracy,RMSE,QWK,HDIV,Macro_F1,Micro_F1,Weighted_F1,TP_0,TN_0,FP_0,FN_0,TP_1,TN_1,FP_1,FN_1,TP_2,TN_2,FP_2,FN_2,TP_3,TN_3,FP_3,FN_3,TP_4,TN_4,FP_4,FN_4,TP_5,TN_5,FP_5,FN_5,timestamp,id
         
     | 
| 2 | 
         
            +
            0.5869565217391305,29.29114225312413,0.5985849056603774,0.007246376811594235,0.45502820381025977,0.5869565217391305,0.6030619269196934,0,137,0,1,1,136,1,0,5,114,15,4,42,52,10,34,29,69,23,17,4,125,8,1,2025-07-10 01:19:39,jbcs2025_bert-large-portuguese-cased-encoder_classification-C4-essay_only-encoder_classification-C4-essay_only
         
     | 
    	
        runs/large_models/bertimbau/{jbcs2025_bertimbau-large-C4-encoder_classification-C4-essay_only/jbcs2025_bertimbau-large-C4-encoder_classification-C4-essay_only_inference_results.jsonl → jbcs2025_bert-large-portuguese-cased-encoder_classification-C4-essay_only-encoder_classification-C4-essay_only/jbcs2025_bert-large-portuguese-cased-encoder_classification-C4-essay_only-encoder_classification-C4-essay_only_inference_results.jsonl}
    RENAMED
    
    | 
         The diff for this file is too large to render. 
		See raw diff 
     | 
| 
         | 
    	
        runs/large_models/bertimbau/{jbcs2025_bertimbau-large-C4-encoder_classification-C4-essay_only → jbcs2025_bert-large-portuguese-cased-encoder_classification-C4-essay_only-encoder_classification-C4-essay_only}/run_inference_experiment.log
    RENAMED
    
    | 
         @@ -1,5 +1,5 @@ 
     | 
|
| 1 | 
         
            -
            [2025-07-01 
     | 
| 2 | 
         
            -
            [2025-07-01 
     | 
| 3 | 
         
             
            dataset:
         
     | 
| 4 | 
         
             
              name: kamel-usp/aes_enem_dataset
         
     | 
| 5 | 
         
             
              split: JBCS2025
         
     | 
| 
         @@ -21,12 +21,12 @@ post_training_results: 
     | 
|
| 21 | 
         
             
              model_path: /workspace/jbcs2025/outputs/2025-03-24/20-42-59
         
     | 
| 22 | 
         
             
            experiments:
         
     | 
| 23 | 
         
             
              model:
         
     | 
| 24 | 
         
            -
                name: kamel-usp/ 
     | 
| 25 | 
         
             
                type: encoder_classification
         
     | 
| 26 | 
         
             
                num_labels: 6
         
     | 
| 27 | 
         
             
                output_dir: ./results/bertimbau_large/C4
         
     | 
| 28 | 
         
             
                logging_dir: ./logs/bertimbau_large/C4
         
     | 
| 29 | 
         
            -
                best_model_dir:  
     | 
| 30 | 
         
             
              tokenizer:
         
     | 
| 31 | 
         
             
                name: neuralmind/bert-large-portuguese-cased
         
     | 
| 32 | 
         
             
              dataset:
         
     | 
| 
         @@ -41,9 +41,9 @@ experiments: 
     | 
|
| 41 | 
         
             
                gradient_accumulation_steps: 1
         
     | 
| 42 | 
         
             
                gradient_checkpointing: false
         
     | 
| 43 | 
         | 
| 44 | 
         
            -
            [2025-07-01 
     | 
| 45 | 
         
            -
            [2025-07-01 
     | 
| 46 | 
         
            -
            [2025-07-01 
     | 
| 47 | 
         
             
              "architectures": [
         
     | 
| 48 | 
         
             
                "BertForMaskedLM"
         
     | 
| 49 | 
         
             
              ],
         
     | 
| 
         @@ -68,20 +68,14 @@ experiments: 
     | 
|
| 68 | 
         
             
              "pooler_size_per_head": 128,
         
     | 
| 69 | 
         
             
              "pooler_type": "first_token_transform",
         
     | 
| 70 | 
         
             
              "position_embedding_type": "absolute",
         
     | 
| 71 | 
         
            -
              "transformers_version": "4.53. 
     | 
| 72 | 
         
             
              "type_vocab_size": 2,
         
     | 
| 73 | 
         
             
              "use_cache": true,
         
     | 
| 74 | 
         
             
              "vocab_size": 29794
         
     | 
| 75 | 
         
             
            }
         
     | 
| 76 | 
         | 
| 77 | 
         
            -
            [2025-07-01 
     | 
| 78 | 
         
            -
            [2025-07-01 
     | 
| 79 | 
         
            -
            [2025-07-01 00:10:47,955][transformers.tokenization_utils_base][INFO] - loading file added_tokens.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/added_tokens.json
         
     | 
| 80 | 
         
            -
            [2025-07-01 00:10:47,955][transformers.tokenization_utils_base][INFO] - loading file special_tokens_map.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/special_tokens_map.json
         
     | 
| 81 | 
         
            -
            [2025-07-01 00:10:47,955][transformers.tokenization_utils_base][INFO] - loading file tokenizer_config.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/tokenizer_config.json
         
     | 
| 82 | 
         
            -
            [2025-07-01 00:10:47,955][transformers.tokenization_utils_base][INFO] - loading file chat_template.jinja from cache at None
         
     | 
| 83 | 
         
            -
            [2025-07-01 00:10:47,955][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/config.json
         
     | 
| 84 | 
         
            -
            [2025-07-01 00:10:47,955][transformers.configuration_utils][INFO] - Model config BertConfig {
         
     | 
| 85 | 
         
             
              "architectures": [
         
     | 
| 86 | 
         
             
                "BertForMaskedLM"
         
     | 
| 87 | 
         
             
              ],
         
     | 
| 
         @@ -106,14 +100,20 @@ experiments: 
     | 
|
| 106 | 
         
             
              "pooler_size_per_head": 128,
         
     | 
| 107 | 
         
             
              "pooler_type": "first_token_transform",
         
     | 
| 108 | 
         
             
              "position_embedding_type": "absolute",
         
     | 
| 109 | 
         
            -
              "transformers_version": "4.53. 
     | 
| 110 | 
         
             
              "type_vocab_size": 2,
         
     | 
| 111 | 
         
             
              "use_cache": true,
         
     | 
| 112 | 
         
             
              "vocab_size": 29794
         
     | 
| 113 | 
         
             
            }
         
     | 
| 114 | 
         | 
| 115 | 
         
            -
            [2025-07-01 
     | 
| 116 | 
         
            -
            [2025-07-01 
     | 
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 117 | 
         
             
              "architectures": [
         
     | 
| 118 | 
         
             
                "BertForMaskedLM"
         
     | 
| 119 | 
         
             
              ],
         
     | 
| 
         @@ -138,18 +138,73 @@ experiments: 
     | 
|
| 138 | 
         
             
              "pooler_size_per_head": 128,
         
     | 
| 139 | 
         
             
              "pooler_type": "first_token_transform",
         
     | 
| 140 | 
         
             
              "position_embedding_type": "absolute",
         
     | 
| 141 | 
         
            -
              "transformers_version": "4.53. 
     | 
| 142 | 
         
             
              "type_vocab_size": 2,
         
     | 
| 143 | 
         
             
              "use_cache": true,
         
     | 
| 144 | 
         
             
              "vocab_size": 29794
         
     | 
| 145 | 
         
             
            }
         
     | 
| 146 | 
         | 
| 147 | 
         
            -
            [2025-07-01 
     | 
| 148 | 
         
            -
            [2025-07-01 
     | 
| 149 | 
         
            -
             
     | 
| 150 | 
         
            -
             
     | 
| 151 | 
         
            -
             
     | 
| 152 | 
         
            -
             
     | 
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 153 | 
         
             
              "architectures": [
         
     | 
| 154 | 
         
             
                "BertForSequenceClassification"
         
     | 
| 155 | 
         
             
              ],
         
     | 
| 
         @@ -190,37 +245,36 @@ experiments: 
     | 
|
| 190 | 
         
             
              "pooler_size_per_head": 128,
         
     | 
| 191 | 
         
             
              "pooler_type": "first_token_transform",
         
     | 
| 192 | 
         
             
              "position_embedding_type": "absolute",
         
     | 
| 193 | 
         
            -
              "problem_type": "single_label_classification",
         
     | 
| 194 | 
         
             
              "torch_dtype": "float32",
         
     | 
| 195 | 
         
            -
              "transformers_version": "4.53. 
     | 
| 196 | 
         
             
              "type_vocab_size": 2,
         
     | 
| 197 | 
         
             
              "use_cache": true,
         
     | 
| 198 | 
         
             
              "vocab_size": 29794
         
     | 
| 199 | 
         
             
            }
         
     | 
| 200 | 
         | 
| 201 | 
         
            -
            [2025-07-01 
     | 
| 202 | 
         
            -
            [2025-07-01 
     | 
| 203 | 
         
            -
            [2025-07-01 
     | 
| 204 | 
         
            -
            [2025-07-01 
     | 
| 205 | 
         | 
| 206 | 
         
            -
            [2025-07-01 
     | 
| 207 | 
         
             
            If your task is similar to the task the model of the checkpoint was trained on, you can already use BertForSequenceClassification for predictions without further training.
         
     | 
| 208 | 
         
            -
            [2025-07-01 
     | 
| 209 | 
         
            -
            [2025-07-01 
     | 
| 210 | 
         
            -
            [2025-07-01 
     | 
| 211 | 
         
            -
            [2025-07-01 
     | 
| 212 | 
         
            -
            [2025-07-01 
     | 
| 213 | 
         
            -
            [2025-07-01 
     | 
| 214 | 
         
            -
            [2025-07-01 
     | 
| 215 | 
         
             
            ***** Running Prediction *****
         
     | 
| 216 | 
         
            -
            [2025-07-01 
     | 
| 217 | 
         
            -
            [2025-07-01 
     | 
| 218 | 
         
            -
            [2025-07-01 
     | 
| 219 | 
         
            -
            [2025-07-01 
     | 
| 220 | 
         
            -
            [2025-07-01 
     | 
| 221 | 
         
            -
            [2025-07-01 
     | 
| 222 | 
         
            -
            [2025-07-01 
     | 
| 223 | 
         
            -
            [2025-07-01 
     | 
| 224 | 
         
            -
            [2025-07-01 
     | 
| 225 | 
         
            -
            [2025-07-01 
     | 
| 226 | 
         
            -
            [2025-07-01 
     | 
| 
         | 
|
| 1 | 
         
            +
            [2025-07-10 01:19:39,398][__main__][INFO] - Starting inference experiment
         
     | 
| 2 | 
         
            +
            [2025-07-10 01:19:39,399][__main__][INFO] - cache_dir: /tmp/
         
     | 
| 3 | 
         
             
            dataset:
         
     | 
| 4 | 
         
             
              name: kamel-usp/aes_enem_dataset
         
     | 
| 5 | 
         
             
              split: JBCS2025
         
     | 
| 
         | 
|
| 21 | 
         
             
              model_path: /workspace/jbcs2025/outputs/2025-03-24/20-42-59
         
     | 
| 22 | 
         
             
            experiments:
         
     | 
| 23 | 
         
             
              model:
         
     | 
| 24 | 
         
            +
                name: kamel-usp/jbcs2025_bert-large-portuguese-cased-encoder_classification-C4-essay_only
         
     | 
| 25 | 
         
             
                type: encoder_classification
         
     | 
| 26 | 
         
             
                num_labels: 6
         
     | 
| 27 | 
         
             
                output_dir: ./results/bertimbau_large/C4
         
     | 
| 28 | 
         
             
                logging_dir: ./logs/bertimbau_large/C4
         
     | 
| 29 | 
         
            +
                best_model_dir: kamel-usp/jbcs2025_bert-large-portuguese-cased-encoder_classification-C4-essay_only
         
     | 
| 30 | 
         
             
              tokenizer:
         
     | 
| 31 | 
         
             
                name: neuralmind/bert-large-portuguese-cased
         
     | 
| 32 | 
         
             
              dataset:
         
     | 
| 
         | 
|
| 41 | 
         
             
                gradient_accumulation_steps: 1
         
     | 
| 42 | 
         
             
                gradient_checkpointing: false
         
     | 
| 43 | 
         | 
| 44 | 
         
            +
            [2025-07-10 01:19:39,401][__main__][INFO] - Running inference with fine-tuned HF model
         
     | 
| 45 | 
         
            +
            [2025-07-10 01:19:44,024][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/config.json
         
     | 
| 46 | 
         
            +
            [2025-07-10 01:19:44,025][transformers.configuration_utils][INFO] - Model config BertConfig {
         
     | 
| 47 | 
         
             
              "architectures": [
         
     | 
| 48 | 
         
             
                "BertForMaskedLM"
         
     | 
| 49 | 
         
             
              ],
         
     | 
| 
         | 
|
| 68 | 
         
             
              "pooler_size_per_head": 128,
         
     | 
| 69 | 
         
             
              "pooler_type": "first_token_transform",
         
     | 
| 70 | 
         
             
              "position_embedding_type": "absolute",
         
     | 
| 71 | 
         
            +
              "transformers_version": "4.53.1",
         
     | 
| 72 | 
         
             
              "type_vocab_size": 2,
         
     | 
| 73 | 
         
             
              "use_cache": true,
         
     | 
| 74 | 
         
             
              "vocab_size": 29794
         
     | 
| 75 | 
         
             
            }
         
     | 
| 76 | 
         | 
| 77 | 
         
            +
            [2025-07-10 01:19:44,243][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/config.json
         
     | 
| 78 | 
         
            +
            [2025-07-10 01:19:44,243][transformers.configuration_utils][INFO] - Model config BertConfig {
         
     | 
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 79 | 
         
             
              "architectures": [
         
     | 
| 80 | 
         
             
                "BertForMaskedLM"
         
     | 
| 81 | 
         
             
              ],
         
     | 
| 
         | 
|
| 100 | 
         
             
              "pooler_size_per_head": 128,
         
     | 
| 101 | 
         
             
              "pooler_type": "first_token_transform",
         
     | 
| 102 | 
         
             
              "position_embedding_type": "absolute",
         
     | 
| 103 | 
         
            +
              "transformers_version": "4.53.1",
         
     | 
| 104 | 
         
             
              "type_vocab_size": 2,
         
     | 
| 105 | 
         
             
              "use_cache": true,
         
     | 
| 106 | 
         
             
              "vocab_size": 29794
         
     | 
| 107 | 
         
             
            }
         
     | 
| 108 | 
         | 
| 109 | 
         
            +
            [2025-07-10 01:19:44,443][transformers.tokenization_utils_base][INFO] - loading file vocab.txt from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/vocab.txt
         
     | 
| 110 | 
         
            +
            [2025-07-10 01:19:44,443][transformers.tokenization_utils_base][INFO] - loading file tokenizer.json from cache at None
         
     | 
| 111 | 
         
            +
            [2025-07-10 01:19:44,443][transformers.tokenization_utils_base][INFO] - loading file added_tokens.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/added_tokens.json
         
     | 
| 112 | 
         
            +
            [2025-07-10 01:19:44,443][transformers.tokenization_utils_base][INFO] - loading file special_tokens_map.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/special_tokens_map.json
         
     | 
| 113 | 
         
            +
            [2025-07-10 01:19:44,443][transformers.tokenization_utils_base][INFO] - loading file tokenizer_config.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/tokenizer_config.json
         
     | 
| 114 | 
         
            +
            [2025-07-10 01:19:44,443][transformers.tokenization_utils_base][INFO] - loading file chat_template.jinja from cache at None
         
     | 
| 115 | 
         
            +
            [2025-07-10 01:19:44,443][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/config.json
         
     | 
| 116 | 
         
            +
            [2025-07-10 01:19:44,444][transformers.configuration_utils][INFO] - Model config BertConfig {
         
     | 
| 117 | 
         
             
              "architectures": [
         
     | 
| 118 | 
         
             
                "BertForMaskedLM"
         
     | 
| 119 | 
         
             
              ],
         
     | 
| 
         | 
|
| 138 | 
         
             
              "pooler_size_per_head": 128,
         
     | 
| 139 | 
         
             
              "pooler_type": "first_token_transform",
         
     | 
| 140 | 
         
             
              "position_embedding_type": "absolute",
         
     | 
| 141 | 
         
            +
              "transformers_version": "4.53.1",
         
     | 
| 142 | 
         
             
              "type_vocab_size": 2,
         
     | 
| 143 | 
         
             
              "use_cache": true,
         
     | 
| 144 | 
         
             
              "vocab_size": 29794
         
     | 
| 145 | 
         
             
            }
         
     | 
| 146 | 
         | 
| 147 | 
         
            +
            [2025-07-10 01:19:44,474][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/config.json
         
     | 
| 148 | 
         
            +
            [2025-07-10 01:19:44,475][transformers.configuration_utils][INFO] - Model config BertConfig {
         
     | 
| 149 | 
         
            +
              "architectures": [
         
     | 
| 150 | 
         
            +
                "BertForMaskedLM"
         
     | 
| 151 | 
         
            +
              ],
         
     | 
| 152 | 
         
            +
              "attention_probs_dropout_prob": 0.1,
         
     | 
| 153 | 
         
            +
              "classifier_dropout": null,
         
     | 
| 154 | 
         
            +
              "directionality": "bidi",
         
     | 
| 155 | 
         
            +
              "hidden_act": "gelu",
         
     | 
| 156 | 
         
            +
              "hidden_dropout_prob": 0.1,
         
     | 
| 157 | 
         
            +
              "hidden_size": 1024,
         
     | 
| 158 | 
         
            +
              "initializer_range": 0.02,
         
     | 
| 159 | 
         
            +
              "intermediate_size": 4096,
         
     | 
| 160 | 
         
            +
              "layer_norm_eps": 1e-12,
         
     | 
| 161 | 
         
            +
              "max_position_embeddings": 512,
         
     | 
| 162 | 
         
            +
              "model_type": "bert",
         
     | 
| 163 | 
         
            +
              "num_attention_heads": 16,
         
     | 
| 164 | 
         
            +
              "num_hidden_layers": 24,
         
     | 
| 165 | 
         
            +
              "output_past": true,
         
     | 
| 166 | 
         
            +
              "pad_token_id": 0,
         
     | 
| 167 | 
         
            +
              "pooler_fc_size": 768,
         
     | 
| 168 | 
         
            +
              "pooler_num_attention_heads": 12,
         
     | 
| 169 | 
         
            +
              "pooler_num_fc_layers": 3,
         
     | 
| 170 | 
         
            +
              "pooler_size_per_head": 128,
         
     | 
| 171 | 
         
            +
              "pooler_type": "first_token_transform",
         
     | 
| 172 | 
         
            +
              "position_embedding_type": "absolute",
         
     | 
| 173 | 
         
            +
              "transformers_version": "4.53.1",
         
     | 
| 174 | 
         
            +
              "type_vocab_size": 2,
         
     | 
| 175 | 
         
            +
              "use_cache": true,
         
     | 
| 176 | 
         
            +
              "vocab_size": 29794
         
     | 
| 177 | 
         
            +
            }
         
     | 
| 178 | 
         
            +
             
     | 
| 179 | 
         
            +
            [2025-07-10 01:19:44,492][__main__][INFO] - Tokenizer function parameters- Padding:longest; Truncation: True; Use Full Context: False
         
     | 
| 180 | 
         
            +
            [2025-07-10 01:19:44,896][__main__][INFO] - 
         
     | 
| 181 | 
         
            +
            Token statistics for 'train' split:
         
     | 
| 182 | 
         
            +
            [2025-07-10 01:19:44,896][__main__][INFO] -   Total examples: 500
         
     | 
| 183 | 
         
            +
            [2025-07-10 01:19:44,897][__main__][INFO] -   Min tokens: 512
         
     | 
| 184 | 
         
            +
            [2025-07-10 01:19:44,897][__main__][INFO] -   Max tokens: 512
         
     | 
| 185 | 
         
            +
            [2025-07-10 01:19:44,897][__main__][INFO] -   Avg tokens: 512.00
         
     | 
| 186 | 
         
            +
            [2025-07-10 01:19:44,897][__main__][INFO] -   Std tokens: 0.00
         
     | 
| 187 | 
         
            +
            [2025-07-10 01:19:44,985][__main__][INFO] - 
         
     | 
| 188 | 
         
            +
            Token statistics for 'validation' split:
         
     | 
| 189 | 
         
            +
            [2025-07-10 01:19:44,985][__main__][INFO] -   Total examples: 132
         
     | 
| 190 | 
         
            +
            [2025-07-10 01:19:44,985][__main__][INFO] -   Min tokens: 512
         
     | 
| 191 | 
         
            +
            [2025-07-10 01:19:44,985][__main__][INFO] -   Max tokens: 512
         
     | 
| 192 | 
         
            +
            [2025-07-10 01:19:44,986][__main__][INFO] -   Avg tokens: 512.00
         
     | 
| 193 | 
         
            +
            [2025-07-10 01:19:44,986][__main__][INFO] -   Std tokens: 0.00
         
     | 
| 194 | 
         
            +
            [2025-07-10 01:19:45,083][__main__][INFO] - 
         
     | 
| 195 | 
         
            +
            Token statistics for 'test' split:
         
     | 
| 196 | 
         
            +
            [2025-07-10 01:19:45,083][__main__][INFO] -   Total examples: 138
         
     | 
| 197 | 
         
            +
            [2025-07-10 01:19:45,083][__main__][INFO] -   Min tokens: 512
         
     | 
| 198 | 
         
            +
            [2025-07-10 01:19:45,083][__main__][INFO] -   Max tokens: 512
         
     | 
| 199 | 
         
            +
            [2025-07-10 01:19:45,083][__main__][INFO] -   Avg tokens: 512.00
         
     | 
| 200 | 
         
            +
            [2025-07-10 01:19:45,083][__main__][INFO] -   Std tokens: 0.00
         
     | 
| 201 | 
         
            +
            [2025-07-10 01:19:45,083][__main__][INFO] - If token statistics are the same (max, avg, min) keep in mind that this is due to batched tokenization and padding.
         
     | 
| 202 | 
         
            +
            [2025-07-10 01:19:45,083][__main__][INFO] - Model max length: 512. If it is the same as stats, then there is a high chance that sequences are being truncated.
         
     | 
| 203 | 
         
            +
            [2025-07-10 01:19:45,083][__main__][INFO] - Loading model from: kamel-usp/jbcs2025_bert-large-portuguese-cased-encoder_classification-C4-essay_only
         
     | 
| 204 | 
         
            +
            [2025-07-10 01:19:45,084][__main__][INFO] - Loading model from: kamel-usp/jbcs2025_bert-large-portuguese-cased-encoder_classification-C4-essay_only
         
     | 
| 205 | 
         
            +
            [2025-07-10 01:19:46,216][__main__][INFO] - Model need ≈ 2.62 GiB to run inference and 6.36 for training 
         
     | 
| 206 | 
         
            +
            [2025-07-10 01:19:47,074][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--kamel-usp--jbcs2025_bert-large-portuguese-cased-encoder_classification-C4-essay_only/snapshots/0322eca1d32dfaf784bb8ad63fa85977ca109d48/config.json
         
     | 
| 207 | 
         
            +
            [2025-07-10 01:19:47,075][transformers.configuration_utils][INFO] - Model config BertConfig {
         
     | 
| 208 | 
         
             
              "architectures": [
         
     | 
| 209 | 
         
             
                "BertForSequenceClassification"
         
     | 
| 210 | 
         
             
              ],
         
     | 
| 
         | 
|
| 245 | 
         
             
              "pooler_size_per_head": 128,
         
     | 
| 246 | 
         
             
              "pooler_type": "first_token_transform",
         
     | 
| 247 | 
         
             
              "position_embedding_type": "absolute",
         
     | 
| 
         | 
|
| 248 | 
         
             
              "torch_dtype": "float32",
         
     | 
| 249 | 
         
            +
              "transformers_version": "4.53.1",
         
     | 
| 250 | 
         
             
              "type_vocab_size": 2,
         
     | 
| 251 | 
         
             
              "use_cache": true,
         
     | 
| 252 | 
         
             
              "vocab_size": 29794
         
     | 
| 253 | 
         
             
            }
         
     | 
| 254 | 
         | 
| 255 | 
         
            +
            [2025-07-10 01:20:14,543][transformers.modeling_utils][INFO] - loading weights file model.safetensors from cache at /tmp/models--kamel-usp--jbcs2025_bert-large-portuguese-cased-encoder_classification-C4-essay_only/snapshots/0322eca1d32dfaf784bb8ad63fa85977ca109d48/model.safetensors
         
     | 
| 256 | 
         
            +
            [2025-07-10 01:20:14,546][transformers.modeling_utils][INFO] - Will use torch_dtype=torch.float32 as defined in model's config object
         
     | 
| 257 | 
         
            +
            [2025-07-10 01:20:14,546][transformers.modeling_utils][INFO] - Instantiating BertForSequenceClassification model under default dtype torch.float32.
         
     | 
| 258 | 
         
            +
            [2025-07-10 01:20:15,314][transformers.modeling_utils][INFO] - All model checkpoint weights were used when initializing BertForSequenceClassification.
         
     | 
| 259 | 
         | 
| 260 | 
         
            +
            [2025-07-10 01:20:15,315][transformers.modeling_utils][INFO] - All the weights of BertForSequenceClassification were initialized from the model checkpoint at kamel-usp/jbcs2025_bert-large-portuguese-cased-encoder_classification-C4-essay_only.
         
     | 
| 261 | 
         
             
            If your task is similar to the task the model of the checkpoint was trained on, you can already use BertForSequenceClassification for predictions without further training.
         
     | 
| 262 | 
         
            +
            [2025-07-10 01:20:15,333][transformers.training_args][INFO] - PyTorch: setting up devices
         
     | 
| 263 | 
         
            +
            [2025-07-10 01:20:15,364][transformers.training_args][INFO] - The default value for the training argument `--report_to` will change in v5 (from all installed integrations to none). In v5, you will need to use `--report_to all` to get the same behavior as now. You should start updating your code and make this info disappear :-).
         
     | 
| 264 | 
         
            +
            [2025-07-10 01:20:15,377][transformers.trainer][INFO] - You have loaded a model on multiple GPUs. `is_model_parallel` attribute will be force-set to `True` to avoid any unexpected behavior such as device placement mismatching.
         
     | 
| 265 | 
         
            +
            [2025-07-10 01:20:15,409][transformers.trainer][INFO] - Using auto half precision backend
         
     | 
| 266 | 
         
            +
            [2025-07-10 01:20:18,784][__main__][INFO] - Running inference on test dataset
         
     | 
| 267 | 
         
            +
            [2025-07-10 01:20:18,785][transformers.trainer][INFO] - The following columns in the test set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: reference, id_prompt, essay_year, prompt, essay_text, supporting_text, grades, id. If reference, id_prompt, essay_year, prompt, essay_text, supporting_text, grades, id are not expected by `BertForSequenceClassification.forward`,  you can safely ignore this message.
         
     | 
| 268 | 
         
            +
            [2025-07-10 01:20:18,793][transformers.trainer][INFO] - 
         
     | 
| 269 | 
         
             
            ***** Running Prediction *****
         
     | 
| 270 | 
         
            +
            [2025-07-10 01:20:18,793][transformers.trainer][INFO] -   Num examples = 138
         
     | 
| 271 | 
         
            +
            [2025-07-10 01:20:18,793][transformers.trainer][INFO] -   Batch size = 16
         
     | 
| 272 | 
         
            +
            [2025-07-10 01:20:20,322][__main__][INFO] - Inference results saved to jbcs2025_bert-large-portuguese-cased-encoder_classification-C4-essay_only-encoder_classification-C4-essay_only_inference_results.jsonl
         
     | 
| 273 | 
         
            +
            [2025-07-10 01:20:20,323][__main__][INFO] - Computing bootstrap confidence intervals for metrics: ['QWK', 'Macro_F1', 'Weighted_F1']
         
     | 
| 274 | 
         
            +
            [2025-07-10 01:22:24,410][__main__][INFO] - Bootstrap CI results saved to bootstrap_confidence_intervals.csv
         
     | 
| 275 | 
         
            +
            [2025-07-10 01:22:24,413][__main__][INFO] - Bootstrap Confidence Intervals (95%):
         
     | 
| 276 | 
         
            +
            [2025-07-10 01:22:24,413][__main__][INFO] -   QWK: 0.5943 [0.4700, 0.7064]
         
     | 
| 277 | 
         
            +
            [2025-07-10 01:22:24,413][__main__][INFO] -   Macro_F1: 0.4620 [0.2994, 0.6410]
         
     | 
| 278 | 
         
            +
            [2025-07-10 01:22:24,413][__main__][INFO] -   Weighted_F1: 0.6060 [0.5248, 0.6835]
         
     | 
| 279 | 
         
            +
            [2025-07-10 01:22:24,413][__main__][INFO] - Inference results: {'accuracy': 0.5869565217391305, 'RMSE': 29.29114225312413, 'QWK': 0.5985849056603774, 'HDIV': 0.007246376811594235, 'Macro_F1': 0.45502820381025977, 'Micro_F1': 0.5869565217391305, 'Weighted_F1': 0.6030619269196934, 'TP_0': np.int64(0), 'TN_0': np.int64(137), 'FP_0': np.int64(0), 'FN_0': np.int64(1), 'TP_1': np.int64(1), 'TN_1': np.int64(136), 'FP_1': np.int64(1), 'FN_1': np.int64(0), 'TP_2': np.int64(5), 'TN_2': np.int64(114), 'FP_2': np.int64(15), 'FN_2': np.int64(4), 'TP_3': np.int64(42), 'TN_3': np.int64(52), 'FP_3': np.int64(10), 'FN_3': np.int64(34), 'TP_4': np.int64(29), 'TN_4': np.int64(69), 'FP_4': np.int64(23), 'FN_4': np.int64(17), 'TP_5': np.int64(4), 'TN_5': np.int64(125), 'FP_5': np.int64(8), 'FN_5': np.int64(1)}
         
     | 
| 280 | 
         
            +
            [2025-07-10 01:22:24,419][__main__][INFO] - Inference experiment completed
         
     | 
    	
        runs/large_models/bertimbau/{jbcs2025_bertimbau-large-C5-encoder_classification-C5-essay_only → jbcs2025_bert-large-portuguese-cased-encoder_classification-C5-essay_only-encoder_classification-C5-essay_only}/.hydra/config.yaml
    RENAMED
    
    | 
         @@ -20,12 +20,12 @@ post_training_results: 
     | 
|
| 20 | 
         
             
              model_path: /workspace/jbcs2025/outputs/2025-03-24/20-42-59
         
     | 
| 21 | 
         
             
            experiments:
         
     | 
| 22 | 
         
             
              model:
         
     | 
| 23 | 
         
            -
                name: kamel-usp/ 
     | 
| 24 | 
         
             
                type: encoder_classification
         
     | 
| 25 | 
         
             
                num_labels: 6
         
     | 
| 26 | 
         
             
                output_dir: ./results/bertimbau_large/C5
         
     | 
| 27 | 
         
             
                logging_dir: ./logs/bertimbau_large/C5
         
     | 
| 28 | 
         
            -
                best_model_dir:  
     | 
| 29 | 
         
             
              tokenizer:
         
     | 
| 30 | 
         
             
                name: neuralmind/bert-large-portuguese-cased
         
     | 
| 31 | 
         
             
              dataset:
         
     | 
| 
         | 
|
| 20 | 
         
             
              model_path: /workspace/jbcs2025/outputs/2025-03-24/20-42-59
         
     | 
| 21 | 
         
             
            experiments:
         
     | 
| 22 | 
         
             
              model:
         
     | 
| 23 | 
         
            +
                name: kamel-usp/jbcs2025_bert-large-portuguese-cased-encoder_classification-C5-essay_only
         
     | 
| 24 | 
         
             
                type: encoder_classification
         
     | 
| 25 | 
         
             
                num_labels: 6
         
     | 
| 26 | 
         
             
                output_dir: ./results/bertimbau_large/C5
         
     | 
| 27 | 
         
             
                logging_dir: ./logs/bertimbau_large/C5
         
     | 
| 28 | 
         
            +
                best_model_dir: kamel-usp/jbcs2025_bert-large-portuguese-cased-encoder_classification-C5-essay_only
         
     | 
| 29 | 
         
             
              tokenizer:
         
     | 
| 30 | 
         
             
                name: neuralmind/bert-large-portuguese-cased
         
     | 
| 31 | 
         
             
              dataset:
         
     | 
    	
        runs/large_models/bertimbau/jbcs2025_bert-large-portuguese-cased-encoder_classification-C5-essay_only-encoder_classification-C5-essay_only/.hydra/hydra.yaml
    ADDED
    
    | 
         @@ -0,0 +1,157 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            hydra:
         
     | 
| 2 | 
         
            +
              run:
         
     | 
| 3 | 
         
            +
                dir: inference_output/2025-07-10/01-22-30
         
     | 
| 4 | 
         
            +
              sweep:
         
     | 
| 5 | 
         
            +
                dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S}
         
     | 
| 6 | 
         
            +
                subdir: ${hydra.job.num}
         
     | 
| 7 | 
         
            +
              launcher:
         
     | 
| 8 | 
         
            +
                _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
         
     | 
| 9 | 
         
            +
              sweeper:
         
     | 
| 10 | 
         
            +
                _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
         
     | 
| 11 | 
         
            +
                max_batch_size: null
         
     | 
| 12 | 
         
            +
                params: null
         
     | 
| 13 | 
         
            +
              help:
         
     | 
| 14 | 
         
            +
                app_name: ${hydra.job.name}
         
     | 
| 15 | 
         
            +
                header: '${hydra.help.app_name} is powered by Hydra.
         
     | 
| 16 | 
         
            +
             
     | 
| 17 | 
         
            +
                  '
         
     | 
| 18 | 
         
            +
                footer: 'Powered by Hydra (https://hydra.cc)
         
     | 
| 19 | 
         
            +
             
     | 
| 20 | 
         
            +
                  Use --hydra-help to view Hydra specific help
         
     | 
| 21 | 
         
            +
             
     | 
| 22 | 
         
            +
                  '
         
     | 
| 23 | 
         
            +
                template: '${hydra.help.header}
         
     | 
| 24 | 
         
            +
             
     | 
| 25 | 
         
            +
                  == Configuration groups ==
         
     | 
| 26 | 
         
            +
             
     | 
| 27 | 
         
            +
                  Compose your configuration from those groups (group=option)
         
     | 
| 28 | 
         
            +
             
     | 
| 29 | 
         
            +
             
     | 
| 30 | 
         
            +
                  $APP_CONFIG_GROUPS
         
     | 
| 31 | 
         
            +
             
     | 
| 32 | 
         
            +
             
     | 
| 33 | 
         
            +
                  == Config ==
         
     | 
| 34 | 
         
            +
             
     | 
| 35 | 
         
            +
                  Override anything in the config (foo.bar=value)
         
     | 
| 36 | 
         
            +
             
     | 
| 37 | 
         
            +
             
     | 
| 38 | 
         
            +
                  $CONFIG
         
     | 
| 39 | 
         
            +
             
     | 
| 40 | 
         
            +
             
     | 
| 41 | 
         
            +
                  ${hydra.help.footer}
         
     | 
| 42 | 
         
            +
             
     | 
| 43 | 
         
            +
                  '
         
     | 
| 44 | 
         
            +
              hydra_help:
         
     | 
| 45 | 
         
            +
                template: 'Hydra (${hydra.runtime.version})
         
     | 
| 46 | 
         
            +
             
     | 
| 47 | 
         
            +
                  See https://hydra.cc for more info.
         
     | 
| 48 | 
         
            +
             
     | 
| 49 | 
         
            +
             
     | 
| 50 | 
         
            +
                  == Flags ==
         
     | 
| 51 | 
         
            +
             
     | 
| 52 | 
         
            +
                  $FLAGS_HELP
         
     | 
| 53 | 
         
            +
             
     | 
| 54 | 
         
            +
             
     | 
| 55 | 
         
            +
                  == Configuration groups ==
         
     | 
| 56 | 
         
            +
             
     | 
| 57 | 
         
            +
                  Compose your configuration from those groups (For example, append hydra/job_logging=disabled
         
     | 
| 58 | 
         
            +
                  to command line)
         
     | 
| 59 | 
         
            +
             
     | 
| 60 | 
         
            +
             
     | 
| 61 | 
         
            +
                  $HYDRA_CONFIG_GROUPS
         
     | 
| 62 | 
         
            +
             
     | 
| 63 | 
         
            +
             
     | 
| 64 | 
         
            +
                  Use ''--cfg hydra'' to Show the Hydra config.
         
     | 
| 65 | 
         
            +
             
     | 
| 66 | 
         
            +
                  '
         
     | 
| 67 | 
         
            +
                hydra_help: ???
         
     | 
| 68 | 
         
            +
              hydra_logging:
         
     | 
| 69 | 
         
            +
                version: 1
         
     | 
| 70 | 
         
            +
                formatters:
         
     | 
| 71 | 
         
            +
                  simple:
         
     | 
| 72 | 
         
            +
                    format: '[%(asctime)s][HYDRA] %(message)s'
         
     | 
| 73 | 
         
            +
                handlers:
         
     | 
| 74 | 
         
            +
                  console:
         
     | 
| 75 | 
         
            +
                    class: logging.StreamHandler
         
     | 
| 76 | 
         
            +
                    formatter: simple
         
     | 
| 77 | 
         
            +
                    stream: ext://sys.stdout
         
     | 
| 78 | 
         
            +
                root:
         
     | 
| 79 | 
         
            +
                  level: INFO
         
     | 
| 80 | 
         
            +
                  handlers:
         
     | 
| 81 | 
         
            +
                  - console
         
     | 
| 82 | 
         
            +
                loggers:
         
     | 
| 83 | 
         
            +
                  logging_example:
         
     | 
| 84 | 
         
            +
                    level: DEBUG
         
     | 
| 85 | 
         
            +
                disable_existing_loggers: false
         
     | 
| 86 | 
         
            +
              job_logging:
         
     | 
| 87 | 
         
            +
                version: 1
         
     | 
| 88 | 
         
            +
                formatters:
         
     | 
| 89 | 
         
            +
                  simple:
         
     | 
| 90 | 
         
            +
                    format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
         
     | 
| 91 | 
         
            +
                handlers:
         
     | 
| 92 | 
         
            +
                  console:
         
     | 
| 93 | 
         
            +
                    class: logging.StreamHandler
         
     | 
| 94 | 
         
            +
                    formatter: simple
         
     | 
| 95 | 
         
            +
                    stream: ext://sys.stdout
         
     | 
| 96 | 
         
            +
                  file:
         
     | 
| 97 | 
         
            +
                    class: logging.FileHandler
         
     | 
| 98 | 
         
            +
                    formatter: simple
         
     | 
| 99 | 
         
            +
                    filename: ${hydra.runtime.output_dir}/${hydra.job.name}.log
         
     | 
| 100 | 
         
            +
                root:
         
     | 
| 101 | 
         
            +
                  level: INFO
         
     | 
| 102 | 
         
            +
                  handlers:
         
     | 
| 103 | 
         
            +
                  - console
         
     | 
| 104 | 
         
            +
                  - file
         
     | 
| 105 | 
         
            +
                disable_existing_loggers: false
         
     | 
| 106 | 
         
            +
              env: {}
         
     | 
| 107 | 
         
            +
              mode: RUN
         
     | 
| 108 | 
         
            +
              searchpath: []
         
     | 
| 109 | 
         
            +
              callbacks: {}
         
     | 
| 110 | 
         
            +
              output_subdir: .hydra
         
     | 
| 111 | 
         
            +
              overrides:
         
     | 
| 112 | 
         
            +
                hydra:
         
     | 
| 113 | 
         
            +
                - hydra.run.dir=inference_output/2025-07-10/01-22-30
         
     | 
| 114 | 
         
            +
                - hydra.mode=RUN
         
     | 
| 115 | 
         
            +
                task:
         
     | 
| 116 | 
         
            +
                - experiments=temp_inference/kamel-usp_jbcs2025_bert-large-portuguese-cased-encoder_classification-C5-essay_only
         
     | 
| 117 | 
         
            +
              job:
         
     | 
| 118 | 
         
            +
                name: run_inference_experiment
         
     | 
| 119 | 
         
            +
                chdir: null
         
     | 
| 120 | 
         
            +
                override_dirname: experiments=temp_inference/kamel-usp_jbcs2025_bert-large-portuguese-cased-encoder_classification-C5-essay_only
         
     | 
| 121 | 
         
            +
                id: ???
         
     | 
| 122 | 
         
            +
                num: ???
         
     | 
| 123 | 
         
            +
                config_name: config
         
     | 
| 124 | 
         
            +
                env_set: {}
         
     | 
| 125 | 
         
            +
                env_copy: []
         
     | 
| 126 | 
         
            +
                config:
         
     | 
| 127 | 
         
            +
                  override_dirname:
         
     | 
| 128 | 
         
            +
                    kv_sep: '='
         
     | 
| 129 | 
         
            +
                    item_sep: ','
         
     | 
| 130 | 
         
            +
                    exclude_keys: []
         
     | 
| 131 | 
         
            +
              runtime:
         
     | 
| 132 | 
         
            +
                version: 1.3.2
         
     | 
| 133 | 
         
            +
                version_base: '1.1'
         
     | 
| 134 | 
         
            +
                cwd: /workspace/jbcs2025
         
     | 
| 135 | 
         
            +
                config_sources:
         
     | 
| 136 | 
         
            +
                - path: hydra.conf
         
     | 
| 137 | 
         
            +
                  schema: pkg
         
     | 
| 138 | 
         
            +
                  provider: hydra
         
     | 
| 139 | 
         
            +
                - path: /workspace/jbcs2025/configs
         
     | 
| 140 | 
         
            +
                  schema: file
         
     | 
| 141 | 
         
            +
                  provider: main
         
     | 
| 142 | 
         
            +
                - path: ''
         
     | 
| 143 | 
         
            +
                  schema: structured
         
     | 
| 144 | 
         
            +
                  provider: schema
         
     | 
| 145 | 
         
            +
                output_dir: /workspace/jbcs2025/inference_output/2025-07-10/01-22-30
         
     | 
| 146 | 
         
            +
                choices:
         
     | 
| 147 | 
         
            +
                  experiments: temp_inference/kamel-usp_jbcs2025_bert-large-portuguese-cased-encoder_classification-C5-essay_only
         
     | 
| 148 | 
         
            +
                  hydra/env: default
         
     | 
| 149 | 
         
            +
                  hydra/callbacks: null
         
     | 
| 150 | 
         
            +
                  hydra/job_logging: default
         
     | 
| 151 | 
         
            +
                  hydra/hydra_logging: default
         
     | 
| 152 | 
         
            +
                  hydra/hydra_help: default
         
     | 
| 153 | 
         
            +
                  hydra/help: default
         
     | 
| 154 | 
         
            +
                  hydra/sweeper: basic
         
     | 
| 155 | 
         
            +
                  hydra/launcher: basic
         
     | 
| 156 | 
         
            +
                  hydra/output: default
         
     | 
| 157 | 
         
            +
              verbose: false
         
     | 
    	
        runs/large_models/bertimbau/jbcs2025_bert-large-portuguese-cased-encoder_classification-C5-essay_only-encoder_classification-C5-essay_only/.hydra/overrides.yaml
    ADDED
    
    | 
         @@ -0,0 +1 @@ 
     | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            - experiments=temp_inference/kamel-usp_jbcs2025_bert-large-portuguese-cased-encoder_classification-C5-essay_only
         
     | 
    	
        runs/large_models/bertimbau/{jbcs2025_bertimbau-large-C3-encoder_classification-C3-essay_only → jbcs2025_bert-large-portuguese-cased-encoder_classification-C5-essay_only-encoder_classification-C5-essay_only}/bootstrap_confidence_intervals.csv
    RENAMED
    
    | 
         @@ -1,2 +1,2 @@ 
     | 
|
| 1 | 
         
             
            experiment_id,timestamp,QWK_mean,QWK_lower_95ci,QWK_upper_95ci,QWK_ci_width,Macro_F1_mean,Macro_F1_lower_95ci,Macro_F1_upper_95ci,Macro_F1_ci_width,Weighted_F1_mean,Weighted_F1_lower_95ci,Weighted_F1_upper_95ci,Weighted_F1_ci_width
         
     | 
| 2 | 
         
            -
             
     | 
| 
         | 
|
| 1 | 
         
             
            experiment_id,timestamp,QWK_mean,QWK_lower_95ci,QWK_upper_95ci,QWK_ci_width,Macro_F1_mean,Macro_F1_lower_95ci,Macro_F1_upper_95ci,Macro_F1_ci_width,Weighted_F1_mean,Weighted_F1_lower_95ci,Weighted_F1_upper_95ci,Weighted_F1_ci_width
         
     | 
| 2 | 
         
            +
            jbcs2025_bert-large-portuguese-cased-encoder_classification-C5-essay_only-encoder_classification-C5-essay_only,2025-07-10 01:22:36,0.4570995991774188,0.3260729920596689,0.582680258986133,0.2566072669264641,0.3005407739499739,0.23044185263938957,0.3822276851313438,0.15178583249195424,0.35876023783511307,0.270756957437106,0.44869522570312514,0.17793826826601916
         
     | 
    	
        runs/large_models/bertimbau/{jbcs2025_bertimbau-large-C4-encoder_classification-C4-essay_only → jbcs2025_bert-large-portuguese-cased-encoder_classification-C5-essay_only-encoder_classification-C5-essay_only}/evaluation_results.csv
    RENAMED
    
    | 
         @@ -1,2 +1,2 @@ 
     | 
|
| 1 | 
         
            -
            accuracy,RMSE,QWK,HDIV,Macro_F1,Micro_F1,Weighted_F1,TP_0,TN_0,FP_0,FN_0,TP_1,TN_1,FP_1,FN_1,TP_2,TN_2,FP_2,FN_2,TP_3,TN_3,FP_3,FN_3,TP_4,TN_4,FP_4,FN_4,TP_5,TN_5,FP_5,FN_5,timestamp,id
         
     | 
| 2 | 
         
            -
            0. 
     | 
| 
         | 
|
| 1 | 
         
            +
            accuracy,RMSE,QWK,HDIV,Macro_F1,Micro_F1,Weighted_F1,TP_0,TN_0,FP_0,FN_0,TP_1,TN_1,FP_1,FN_1,TP_2,TN_2,FP_2,FN_2,TP_3,TN_3,FP_3,FN_3,TP_4,TN_4,FP_4,FN_4,TP_5,TN_5,FP_5,FN_5,timestamp,id
         
     | 
| 2 | 
         
            +
            0.41304347826086957,59.75796593554388,0.45915406932356084,0.14492753623188404,0.3013676792556103,0.41304347826086957,0.3597310868375336,5,115,1,17,6,94,12,26,14,82,32,10,3,112,1,22,29,71,35,3,0,135,0,3,2025-07-10 01:22:36,jbcs2025_bert-large-portuguese-cased-encoder_classification-C5-essay_only-encoder_classification-C5-essay_only
         
     | 
    	
        runs/large_models/bertimbau/{jbcs2025_bertimbau-large-C5-encoder_classification-C5-essay_only/jbcs2025_bertimbau-large-C5-encoder_classification-C5-essay_only_inference_results.jsonl → jbcs2025_bert-large-portuguese-cased-encoder_classification-C5-essay_only-encoder_classification-C5-essay_only/jbcs2025_bert-large-portuguese-cased-encoder_classification-C5-essay_only-encoder_classification-C5-essay_only_inference_results.jsonl}
    RENAMED
    
    | 
         The diff for this file is too large to render. 
		See raw diff 
     | 
| 
         | 
    	
        runs/large_models/bertimbau/{jbcs2025_bertimbau-large-C5-encoder_classification-C5-essay_only → jbcs2025_bert-large-portuguese-cased-encoder_classification-C5-essay_only-encoder_classification-C5-essay_only}/run_inference_experiment.log
    RENAMED
    
    | 
         @@ -1,5 +1,5 @@ 
     | 
|
| 1 | 
         
            -
            [2025-07-01 
     | 
| 2 | 
         
            -
            [2025-07-01 
     | 
| 3 | 
         
             
            dataset:
         
     | 
| 4 | 
         
             
              name: kamel-usp/aes_enem_dataset
         
     | 
| 5 | 
         
             
              split: JBCS2025
         
     | 
| 
         @@ -21,12 +21,12 @@ post_training_results: 
     | 
|
| 21 | 
         
             
              model_path: /workspace/jbcs2025/outputs/2025-03-24/20-42-59
         
     | 
| 22 | 
         
             
            experiments:
         
     | 
| 23 | 
         
             
              model:
         
     | 
| 24 | 
         
            -
                name: kamel-usp/ 
     | 
| 25 | 
         
             
                type: encoder_classification
         
     | 
| 26 | 
         
             
                num_labels: 6
         
     | 
| 27 | 
         
             
                output_dir: ./results/bertimbau_large/C5
         
     | 
| 28 | 
         
             
                logging_dir: ./logs/bertimbau_large/C5
         
     | 
| 29 | 
         
            -
                best_model_dir:  
     | 
| 30 | 
         
             
              tokenizer:
         
     | 
| 31 | 
         
             
                name: neuralmind/bert-large-portuguese-cased
         
     | 
| 32 | 
         
             
              dataset:
         
     | 
| 
         @@ -41,9 +41,9 @@ experiments: 
     | 
|
| 41 | 
         
             
                gradient_accumulation_steps: 1
         
     | 
| 42 | 
         
             
                gradient_checkpointing: false
         
     | 
| 43 | 
         | 
| 44 | 
         
            -
            [2025-07-01 
     | 
| 45 | 
         
            -
            [2025-07-01 
     | 
| 46 | 
         
            -
            [2025-07-01 
     | 
| 47 | 
         
             
              "architectures": [
         
     | 
| 48 | 
         
             
                "BertForMaskedLM"
         
     | 
| 49 | 
         
             
              ],
         
     | 
| 
         @@ -68,20 +68,14 @@ experiments: 
     | 
|
| 68 | 
         
             
              "pooler_size_per_head": 128,
         
     | 
| 69 | 
         
             
              "pooler_type": "first_token_transform",
         
     | 
| 70 | 
         
             
              "position_embedding_type": "absolute",
         
     | 
| 71 | 
         
            -
              "transformers_version": "4.53. 
     | 
| 72 | 
         
             
              "type_vocab_size": 2,
         
     | 
| 73 | 
         
             
              "use_cache": true,
         
     | 
| 74 | 
         
             
              "vocab_size": 29794
         
     | 
| 75 | 
         
             
            }
         
     | 
| 76 | 
         | 
| 77 | 
         
            -
            [2025-07-01 
     | 
| 78 | 
         
            -
            [2025-07-01 
     | 
| 79 | 
         
            -
            [2025-07-01 00:13:22,154][transformers.tokenization_utils_base][INFO] - loading file added_tokens.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/added_tokens.json
         
     | 
| 80 | 
         
            -
            [2025-07-01 00:13:22,154][transformers.tokenization_utils_base][INFO] - loading file special_tokens_map.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/special_tokens_map.json
         
     | 
| 81 | 
         
            -
            [2025-07-01 00:13:22,154][transformers.tokenization_utils_base][INFO] - loading file tokenizer_config.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/tokenizer_config.json
         
     | 
| 82 | 
         
            -
            [2025-07-01 00:13:22,154][transformers.tokenization_utils_base][INFO] - loading file chat_template.jinja from cache at None
         
     | 
| 83 | 
         
            -
            [2025-07-01 00:13:22,154][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/config.json
         
     | 
| 84 | 
         
            -
            [2025-07-01 00:13:22,155][transformers.configuration_utils][INFO] - Model config BertConfig {
         
     | 
| 85 | 
         
             
              "architectures": [
         
     | 
| 86 | 
         
             
                "BertForMaskedLM"
         
     | 
| 87 | 
         
             
              ],
         
     | 
| 
         @@ -106,14 +100,20 @@ experiments: 
     | 
|
| 106 | 
         
             
              "pooler_size_per_head": 128,
         
     | 
| 107 | 
         
             
              "pooler_type": "first_token_transform",
         
     | 
| 108 | 
         
             
              "position_embedding_type": "absolute",
         
     | 
| 109 | 
         
            -
              "transformers_version": "4.53. 
     | 
| 110 | 
         
             
              "type_vocab_size": 2,
         
     | 
| 111 | 
         
             
              "use_cache": true,
         
     | 
| 112 | 
         
             
              "vocab_size": 29794
         
     | 
| 113 | 
         
             
            }
         
     | 
| 114 | 
         | 
| 115 | 
         
            -
            [2025-07-01 
     | 
| 116 | 
         
            -
            [2025-07-01 
     | 
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 117 | 
         
             
              "architectures": [
         
     | 
| 118 | 
         
             
                "BertForMaskedLM"
         
     | 
| 119 | 
         
             
              ],
         
     | 
| 
         @@ -138,18 +138,73 @@ experiments: 
     | 
|
| 138 | 
         
             
              "pooler_size_per_head": 128,
         
     | 
| 139 | 
         
             
              "pooler_type": "first_token_transform",
         
     | 
| 140 | 
         
             
              "position_embedding_type": "absolute",
         
     | 
| 141 | 
         
            -
              "transformers_version": "4.53. 
     | 
| 142 | 
         
             
              "type_vocab_size": 2,
         
     | 
| 143 | 
         
             
              "use_cache": true,
         
     | 
| 144 | 
         
             
              "vocab_size": 29794
         
     | 
| 145 | 
         
             
            }
         
     | 
| 146 | 
         | 
| 147 | 
         
            -
            [2025-07-01 
     | 
| 148 | 
         
            -
            [2025-07-01 
     | 
| 149 | 
         
            -
             
     | 
| 150 | 
         
            -
             
     | 
| 151 | 
         
            -
             
     | 
| 152 | 
         
            -
             
     | 
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 153 | 
         
             
              "architectures": [
         
     | 
| 154 | 
         
             
                "BertForSequenceClassification"
         
     | 
| 155 | 
         
             
              ],
         
     | 
| 
         @@ -190,37 +245,36 @@ experiments: 
     | 
|
| 190 | 
         
             
              "pooler_size_per_head": 128,
         
     | 
| 191 | 
         
             
              "pooler_type": "first_token_transform",
         
     | 
| 192 | 
         
             
              "position_embedding_type": "absolute",
         
     | 
| 193 | 
         
            -
              "problem_type": "single_label_classification",
         
     | 
| 194 | 
         
             
              "torch_dtype": "float32",
         
     | 
| 195 | 
         
            -
              "transformers_version": "4.53. 
     | 
| 196 | 
         
             
              "type_vocab_size": 2,
         
     | 
| 197 | 
         
             
              "use_cache": true,
         
     | 
| 198 | 
         
             
              "vocab_size": 29794
         
     | 
| 199 | 
         
             
            }
         
     | 
| 200 | 
         | 
| 201 | 
         
            -
            [2025-07-01 
     | 
| 202 | 
         
            -
            [2025-07-01 
     | 
| 203 | 
         
            -
            [2025-07-01 
     | 
| 204 | 
         
            -
            [2025-07-01 
     | 
| 205 | 
         | 
| 206 | 
         
            -
            [2025-07-01 
     | 
| 207 | 
         
             
            If your task is similar to the task the model of the checkpoint was trained on, you can already use BertForSequenceClassification for predictions without further training.
         
     | 
| 208 | 
         
            -
            [2025-07-01 
     | 
| 209 | 
         
            -
            [2025-07-01 
     | 
| 210 | 
         
            -
            [2025-07-01 
     | 
| 211 | 
         
            -
            [2025-07-01 
     | 
| 212 | 
         
            -
            [2025-07-01 
     | 
| 213 | 
         
            -
            [2025-07-01 
     | 
| 214 | 
         
            -
            [2025-07-01 
     | 
| 215 | 
         
             
            ***** Running Prediction *****
         
     | 
| 216 | 
         
            -
            [2025-07-01 
     | 
| 217 | 
         
            -
            [2025-07-01 
     | 
| 218 | 
         
            -
            [2025-07-01 
     | 
| 219 | 
         
            -
            [2025-07-01 
     | 
| 220 | 
         
            -
            [2025-07-01 
     | 
| 221 | 
         
            -
            [2025-07-01 
     | 
| 222 | 
         
            -
            [2025-07-01 
     | 
| 223 | 
         
            -
            [2025-07-01 
     | 
| 224 | 
         
            -
            [2025-07-01 
     | 
| 225 | 
         
            -
            [2025-07-01 
     | 
| 226 | 
         
            -
            [2025-07-01 
     | 
| 
         | 
|
| 1 | 
         
            +
            [2025-07-10 01:22:36,158][__main__][INFO] - Starting inference experiment
         
     | 
| 2 | 
         
            +
            [2025-07-10 01:22:36,160][__main__][INFO] - cache_dir: /tmp/
         
     | 
| 3 | 
         
             
            dataset:
         
     | 
| 4 | 
         
             
              name: kamel-usp/aes_enem_dataset
         
     | 
| 5 | 
         
             
              split: JBCS2025
         
     | 
| 
         | 
|
| 21 | 
         
             
              model_path: /workspace/jbcs2025/outputs/2025-03-24/20-42-59
         
     | 
| 22 | 
         
             
            experiments:
         
     | 
| 23 | 
         
             
              model:
         
     | 
| 24 | 
         
            +
                name: kamel-usp/jbcs2025_bert-large-portuguese-cased-encoder_classification-C5-essay_only
         
     | 
| 25 | 
         
             
                type: encoder_classification
         
     | 
| 26 | 
         
             
                num_labels: 6
         
     | 
| 27 | 
         
             
                output_dir: ./results/bertimbau_large/C5
         
     | 
| 28 | 
         
             
                logging_dir: ./logs/bertimbau_large/C5
         
     | 
| 29 | 
         
            +
                best_model_dir: kamel-usp/jbcs2025_bert-large-portuguese-cased-encoder_classification-C5-essay_only
         
     | 
| 30 | 
         
             
              tokenizer:
         
     | 
| 31 | 
         
             
                name: neuralmind/bert-large-portuguese-cased
         
     | 
| 32 | 
         
             
              dataset:
         
     | 
| 
         | 
|
| 41 | 
         
             
                gradient_accumulation_steps: 1
         
     | 
| 42 | 
         
             
                gradient_checkpointing: false
         
     | 
| 43 | 
         | 
| 44 | 
         
            +
            [2025-07-10 01:22:36,162][__main__][INFO] - Running inference with fine-tuned HF model
         
     | 
| 45 | 
         
            +
            [2025-07-10 01:22:40,872][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/config.json
         
     | 
| 46 | 
         
            +
            [2025-07-10 01:22:40,873][transformers.configuration_utils][INFO] - Model config BertConfig {
         
     | 
| 47 | 
         
             
              "architectures": [
         
     | 
| 48 | 
         
             
                "BertForMaskedLM"
         
     | 
| 49 | 
         
             
              ],
         
     | 
| 
         | 
|
| 68 | 
         
             
              "pooler_size_per_head": 128,
         
     | 
| 69 | 
         
             
              "pooler_type": "first_token_transform",
         
     | 
| 70 | 
         
             
              "position_embedding_type": "absolute",
         
     | 
| 71 | 
         
            +
              "transformers_version": "4.53.1",
         
     | 
| 72 | 
         
             
              "type_vocab_size": 2,
         
     | 
| 73 | 
         
             
              "use_cache": true,
         
     | 
| 74 | 
         
             
              "vocab_size": 29794
         
     | 
| 75 | 
         
             
            }
         
     | 
| 76 | 
         | 
| 77 | 
         
            +
            [2025-07-10 01:22:41,085][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/config.json
         
     | 
| 78 | 
         
            +
            [2025-07-10 01:22:41,086][transformers.configuration_utils][INFO] - Model config BertConfig {
         
     | 
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 79 | 
         
             
              "architectures": [
         
     | 
| 80 | 
         
             
                "BertForMaskedLM"
         
     | 
| 81 | 
         
             
              ],
         
     | 
| 
         | 
|
| 100 | 
         
             
              "pooler_size_per_head": 128,
         
     | 
| 101 | 
         
             
              "pooler_type": "first_token_transform",
         
     | 
| 102 | 
         
             
              "position_embedding_type": "absolute",
         
     | 
| 103 | 
         
            +
              "transformers_version": "4.53.1",
         
     | 
| 104 | 
         
             
              "type_vocab_size": 2,
         
     | 
| 105 | 
         
             
              "use_cache": true,
         
     | 
| 106 | 
         
             
              "vocab_size": 29794
         
     | 
| 107 | 
         
             
            }
         
     | 
| 108 | 
         | 
| 109 | 
         
            +
            [2025-07-10 01:22:41,339][transformers.tokenization_utils_base][INFO] - loading file vocab.txt from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/vocab.txt
         
     | 
| 110 | 
         
            +
            [2025-07-10 01:22:41,339][transformers.tokenization_utils_base][INFO] - loading file tokenizer.json from cache at None
         
     | 
| 111 | 
         
            +
            [2025-07-10 01:22:41,339][transformers.tokenization_utils_base][INFO] - loading file added_tokens.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/added_tokens.json
         
     | 
| 112 | 
         
            +
            [2025-07-10 01:22:41,339][transformers.tokenization_utils_base][INFO] - loading file special_tokens_map.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/special_tokens_map.json
         
     | 
| 113 | 
         
            +
            [2025-07-10 01:22:41,339][transformers.tokenization_utils_base][INFO] - loading file tokenizer_config.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/tokenizer_config.json
         
     | 
| 114 | 
         
            +
            [2025-07-10 01:22:41,339][transformers.tokenization_utils_base][INFO] - loading file chat_template.jinja from cache at None
         
     | 
| 115 | 
         
            +
            [2025-07-10 01:22:41,340][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/config.json
         
     | 
| 116 | 
         
            +
            [2025-07-10 01:22:41,340][transformers.configuration_utils][INFO] - Model config BertConfig {
         
     | 
| 117 | 
         
             
              "architectures": [
         
     | 
| 118 | 
         
             
                "BertForMaskedLM"
         
     | 
| 119 | 
         
             
              ],
         
     | 
| 
         | 
|
| 138 | 
         
             
              "pooler_size_per_head": 128,
         
     | 
| 139 | 
         
             
              "pooler_type": "first_token_transform",
         
     | 
| 140 | 
         
             
              "position_embedding_type": "absolute",
         
     | 
| 141 | 
         
            +
              "transformers_version": "4.53.1",
         
     | 
| 142 | 
         
             
              "type_vocab_size": 2,
         
     | 
| 143 | 
         
             
              "use_cache": true,
         
     | 
| 144 | 
         
             
              "vocab_size": 29794
         
     | 
| 145 | 
         
             
            }
         
     | 
| 146 | 
         | 
| 147 | 
         
            +
            [2025-07-10 01:22:41,370][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/config.json
         
     | 
| 148 | 
         
            +
            [2025-07-10 01:22:41,371][transformers.configuration_utils][INFO] - Model config BertConfig {
         
     | 
| 149 | 
         
            +
              "architectures": [
         
     | 
| 150 | 
         
            +
                "BertForMaskedLM"
         
     | 
| 151 | 
         
            +
              ],
         
     | 
| 152 | 
         
            +
              "attention_probs_dropout_prob": 0.1,
         
     | 
| 153 | 
         
            +
              "classifier_dropout": null,
         
     | 
| 154 | 
         
            +
              "directionality": "bidi",
         
     | 
| 155 | 
         
            +
              "hidden_act": "gelu",
         
     | 
| 156 | 
         
            +
              "hidden_dropout_prob": 0.1,
         
     | 
| 157 | 
         
            +
              "hidden_size": 1024,
         
     | 
| 158 | 
         
            +
              "initializer_range": 0.02,
         
     | 
| 159 | 
         
            +
              "intermediate_size": 4096,
         
     | 
| 160 | 
         
            +
              "layer_norm_eps": 1e-12,
         
     | 
| 161 | 
         
            +
              "max_position_embeddings": 512,
         
     | 
| 162 | 
         
            +
              "model_type": "bert",
         
     | 
| 163 | 
         
            +
              "num_attention_heads": 16,
         
     | 
| 164 | 
         
            +
              "num_hidden_layers": 24,
         
     | 
| 165 | 
         
            +
              "output_past": true,
         
     | 
| 166 | 
         
            +
              "pad_token_id": 0,
         
     | 
| 167 | 
         
            +
              "pooler_fc_size": 768,
         
     | 
| 168 | 
         
            +
              "pooler_num_attention_heads": 12,
         
     | 
| 169 | 
         
            +
              "pooler_num_fc_layers": 3,
         
     | 
| 170 | 
         
            +
              "pooler_size_per_head": 128,
         
     | 
| 171 | 
         
            +
              "pooler_type": "first_token_transform",
         
     | 
| 172 | 
         
            +
              "position_embedding_type": "absolute",
         
     | 
| 173 | 
         
            +
              "transformers_version": "4.53.1",
         
     | 
| 174 | 
         
            +
              "type_vocab_size": 2,
         
     | 
| 175 | 
         
            +
              "use_cache": true,
         
     | 
| 176 | 
         
            +
              "vocab_size": 29794
         
     | 
| 177 | 
         
            +
            }
         
     | 
| 178 | 
         
            +
             
     | 
| 179 | 
         
            +
            [2025-07-10 01:22:41,388][__main__][INFO] - Tokenizer function parameters- Padding:longest; Truncation: True; Use Full Context: False
         
     | 
| 180 | 
         
            +
            [2025-07-10 01:22:41,797][__main__][INFO] - 
         
     | 
| 181 | 
         
            +
            Token statistics for 'train' split:
         
     | 
| 182 | 
         
            +
            [2025-07-10 01:22:41,797][__main__][INFO] -   Total examples: 500
         
     | 
| 183 | 
         
            +
            [2025-07-10 01:22:41,797][__main__][INFO] -   Min tokens: 512
         
     | 
| 184 | 
         
            +
            [2025-07-10 01:22:41,797][__main__][INFO] -   Max tokens: 512
         
     | 
| 185 | 
         
            +
            [2025-07-10 01:22:41,797][__main__][INFO] -   Avg tokens: 512.00
         
     | 
| 186 | 
         
            +
            [2025-07-10 01:22:41,797][__main__][INFO] -   Std tokens: 0.00
         
     | 
| 187 | 
         
            +
            [2025-07-10 01:22:41,888][__main__][INFO] - 
         
     | 
| 188 | 
         
            +
            Token statistics for 'validation' split:
         
     | 
| 189 | 
         
            +
            [2025-07-10 01:22:41,888][__main__][INFO] -   Total examples: 132
         
     | 
| 190 | 
         
            +
            [2025-07-10 01:22:41,888][__main__][INFO] -   Min tokens: 512
         
     | 
| 191 | 
         
            +
            [2025-07-10 01:22:41,888][__main__][INFO] -   Max tokens: 512
         
     | 
| 192 | 
         
            +
            [2025-07-10 01:22:41,888][__main__][INFO] -   Avg tokens: 512.00
         
     | 
| 193 | 
         
            +
            [2025-07-10 01:22:41,888][__main__][INFO] -   Std tokens: 0.00
         
     | 
| 194 | 
         
            +
            [2025-07-10 01:22:41,983][__main__][INFO] - 
         
     | 
| 195 | 
         
            +
            Token statistics for 'test' split:
         
     | 
| 196 | 
         
            +
            [2025-07-10 01:22:41,983][__main__][INFO] -   Total examples: 138
         
     | 
| 197 | 
         
            +
            [2025-07-10 01:22:41,983][__main__][INFO] -   Min tokens: 512
         
     | 
| 198 | 
         
            +
            [2025-07-10 01:22:41,983][__main__][INFO] -   Max tokens: 512
         
     | 
| 199 | 
         
            +
            [2025-07-10 01:22:41,983][__main__][INFO] -   Avg tokens: 512.00
         
     | 
| 200 | 
         
            +
            [2025-07-10 01:22:41,983][__main__][INFO] -   Std tokens: 0.00
         
     | 
| 201 | 
         
            +
            [2025-07-10 01:22:41,983][__main__][INFO] - If token statistics are the same (max, avg, min) keep in mind that this is due to batched tokenization and padding.
         
     | 
| 202 | 
         
            +
            [2025-07-10 01:22:41,983][__main__][INFO] - Model max length: 512. If it is the same as stats, then there is a high chance that sequences are being truncated.
         
     | 
| 203 | 
         
            +
            [2025-07-10 01:22:41,983][__main__][INFO] - Loading model from: kamel-usp/jbcs2025_bert-large-portuguese-cased-encoder_classification-C5-essay_only
         
     | 
| 204 | 
         
            +
            [2025-07-10 01:22:41,983][__main__][INFO] - Loading model from: kamel-usp/jbcs2025_bert-large-portuguese-cased-encoder_classification-C5-essay_only
         
     | 
| 205 | 
         
            +
            [2025-07-10 01:22:43,230][__main__][INFO] - Model need ≈ 2.62 GiB to run inference and 6.36 for training 
         
     | 
| 206 | 
         
            +
            [2025-07-10 01:22:44,519][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--kamel-usp--jbcs2025_bert-large-portuguese-cased-encoder_classification-C5-essay_only/snapshots/96dc152f3c31b3b49ac73d4d82b684487d6e2bf5/config.json
         
     | 
| 207 | 
         
            +
            [2025-07-10 01:22:44,521][transformers.configuration_utils][INFO] - Model config BertConfig {
         
     | 
| 208 | 
         
             
              "architectures": [
         
     | 
| 209 | 
         
             
                "BertForSequenceClassification"
         
     | 
| 210 | 
         
             
              ],
         
     | 
| 
         | 
|
| 245 | 
         
             
              "pooler_size_per_head": 128,
         
     | 
| 246 | 
         
             
              "pooler_type": "first_token_transform",
         
     | 
| 247 | 
         
             
              "position_embedding_type": "absolute",
         
     | 
| 
         | 
|
| 248 | 
         
             
              "torch_dtype": "float32",
         
     | 
| 249 | 
         
            +
              "transformers_version": "4.53.1",
         
     | 
| 250 | 
         
             
              "type_vocab_size": 2,
         
     | 
| 251 | 
         
             
              "use_cache": true,
         
     | 
| 252 | 
         
             
              "vocab_size": 29794
         
     | 
| 253 | 
         
             
            }
         
     | 
| 254 | 
         | 
| 255 | 
         
            +
            [2025-07-10 01:23:10,193][transformers.modeling_utils][INFO] - loading weights file model.safetensors from cache at /tmp/models--kamel-usp--jbcs2025_bert-large-portuguese-cased-encoder_classification-C5-essay_only/snapshots/96dc152f3c31b3b49ac73d4d82b684487d6e2bf5/model.safetensors
         
     | 
| 256 | 
         
            +
            [2025-07-10 01:23:10,194][transformers.modeling_utils][INFO] - Will use torch_dtype=torch.float32 as defined in model's config object
         
     | 
| 257 | 
         
            +
            [2025-07-10 01:23:10,194][transformers.modeling_utils][INFO] - Instantiating BertForSequenceClassification model under default dtype torch.float32.
         
     | 
| 258 | 
         
            +
            [2025-07-10 01:23:10,938][transformers.modeling_utils][INFO] - All model checkpoint weights were used when initializing BertForSequenceClassification.
         
     | 
| 259 | 
         | 
| 260 | 
         
            +
            [2025-07-10 01:23:10,939][transformers.modeling_utils][INFO] - All the weights of BertForSequenceClassification were initialized from the model checkpoint at kamel-usp/jbcs2025_bert-large-portuguese-cased-encoder_classification-C5-essay_only.
         
     | 
| 261 | 
         
             
            If your task is similar to the task the model of the checkpoint was trained on, you can already use BertForSequenceClassification for predictions without further training.
         
     | 
| 262 | 
         
            +
            [2025-07-10 01:23:10,952][transformers.training_args][INFO] - PyTorch: setting up devices
         
     | 
| 263 | 
         
            +
            [2025-07-10 01:23:10,975][transformers.training_args][INFO] - The default value for the training argument `--report_to` will change in v5 (from all installed integrations to none). In v5, you will need to use `--report_to all` to get the same behavior as now. You should start updating your code and make this info disappear :-).
         
     | 
| 264 | 
         
            +
            [2025-07-10 01:23:10,982][transformers.trainer][INFO] - You have loaded a model on multiple GPUs. `is_model_parallel` attribute will be force-set to `True` to avoid any unexpected behavior such as device placement mismatching.
         
     | 
| 265 | 
         
            +
            [2025-07-10 01:23:11,007][transformers.trainer][INFO] - Using auto half precision backend
         
     | 
| 266 | 
         
            +
            [2025-07-10 01:23:14,298][__main__][INFO] - Running inference on test dataset
         
     | 
| 267 | 
         
            +
            [2025-07-10 01:23:14,299][transformers.trainer][INFO] - The following columns in the test set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: id_prompt, prompt, reference, essay_year, grades, id, supporting_text, essay_text. If id_prompt, prompt, reference, essay_year, grades, id, supporting_text, essay_text are not expected by `BertForSequenceClassification.forward`,  you can safely ignore this message.
         
     | 
| 268 | 
         
            +
            [2025-07-10 01:23:14,307][transformers.trainer][INFO] - 
         
     | 
| 269 | 
         
             
            ***** Running Prediction *****
         
     | 
| 270 | 
         
            +
            [2025-07-10 01:23:14,307][transformers.trainer][INFO] -   Num examples = 138
         
     | 
| 271 | 
         
            +
            [2025-07-10 01:23:14,307][transformers.trainer][INFO] -   Batch size = 16
         
     | 
| 272 | 
         
            +
            [2025-07-10 01:23:15,542][__main__][INFO] - Inference results saved to jbcs2025_bert-large-portuguese-cased-encoder_classification-C5-essay_only-encoder_classification-C5-essay_only_inference_results.jsonl
         
     | 
| 273 | 
         
            +
            [2025-07-10 01:23:15,543][__main__][INFO] - Computing bootstrap confidence intervals for metrics: ['QWK', 'Macro_F1', 'Weighted_F1']
         
     | 
| 274 | 
         
            +
            [2025-07-10 01:25:20,892][__main__][INFO] - Bootstrap CI results saved to bootstrap_confidence_intervals.csv
         
     | 
| 275 | 
         
            +
            [2025-07-10 01:25:20,892][__main__][INFO] - Bootstrap Confidence Intervals (95%):
         
     | 
| 276 | 
         
            +
            [2025-07-10 01:25:20,892][__main__][INFO] -   QWK: 0.4571 [0.3261, 0.5827]
         
     | 
| 277 | 
         
            +
            [2025-07-10 01:25:20,892][__main__][INFO] -   Macro_F1: 0.3005 [0.2304, 0.3822]
         
     | 
| 278 | 
         
            +
            [2025-07-10 01:25:20,892][__main__][INFO] -   Weighted_F1: 0.3588 [0.2708, 0.4487]
         
     | 
| 279 | 
         
            +
            [2025-07-10 01:25:20,892][__main__][INFO] - Inference results: {'accuracy': 0.41304347826086957, 'RMSE': 59.75796593554388, 'QWK': 0.45915406932356084, 'HDIV': 0.14492753623188404, 'Macro_F1': 0.3013676792556103, 'Micro_F1': 0.41304347826086957, 'Weighted_F1': 0.3597310868375336, 'TP_0': np.int64(5), 'TN_0': np.int64(115), 'FP_0': np.int64(1), 'FN_0': np.int64(17), 'TP_1': np.int64(6), 'TN_1': np.int64(94), 'FP_1': np.int64(12), 'FN_1': np.int64(26), 'TP_2': np.int64(14), 'TN_2': np.int64(82), 'FP_2': np.int64(32), 'FN_2': np.int64(10), 'TP_3': np.int64(3), 'TN_3': np.int64(112), 'FP_3': np.int64(1), 'FN_3': np.int64(22), 'TP_4': np.int64(29), 'TN_4': np.int64(71), 'FP_4': np.int64(35), 'FN_4': np.int64(3), 'TP_5': np.int64(0), 'TN_5': np.int64(135), 'FP_5': np.int64(0), 'FN_5': np.int64(3)}
         
     | 
| 280 | 
         
            +
            [2025-07-10 01:25:20,892][__main__][INFO] - Inference experiment completed
         
     | 
    	
        runs/large_models/bertimbau/jbcs2025_bertimbau-large-C1-encoder_classification-C1-essay_only/.hydra/overrides.yaml
    DELETED
    
    | 
         @@ -1 +0,0 @@ 
     | 
|
| 1 | 
         
            -
            - experiments=large_models/C1
         
     | 
| 
         | 
|
| 
         | 
    	
        runs/large_models/bertimbau/jbcs2025_bertimbau-large-C2-encoder_classification-C2-essay_only/.hydra/overrides.yaml
    DELETED
    
    | 
         @@ -1 +0,0 @@ 
     | 
|
| 1 | 
         
            -
            - experiments=large_models/C2
         
     | 
| 
         | 
|
| 
         | 
    	
        runs/large_models/bertimbau/jbcs2025_bertimbau-large-C3-encoder_classification-C3-essay_only/.hydra/overrides.yaml
    DELETED
    
    | 
         @@ -1 +0,0 @@ 
     | 
|
| 1 | 
         
            -
            - experiments=large_models/C3
         
     | 
| 
         | 
|
| 
         | 
    	
        runs/large_models/bertimbau/jbcs2025_bertimbau-large-C4-encoder_classification-C4-essay_only/.hydra/overrides.yaml
    DELETED
    
    | 
         @@ -1 +0,0 @@ 
     | 
|
| 1 | 
         
            -
            - experiments=large_models/C4
         
     | 
| 
         | 
|
| 
         | 
    	
        runs/large_models/bertimbau/jbcs2025_bertimbau-large-C5-encoder_classification-C5-essay_only/.hydra/hydra.yaml
    DELETED
    
    | 
         @@ -1,156 +0,0 @@ 
     | 
|
| 1 | 
         
            -
            hydra:
         
     | 
| 2 | 
         
            -
              run:
         
     | 
| 3 | 
         
            -
                dir: outputs/${now:%Y-%m-%d}/${now:%H-%M-%S}
         
     | 
| 4 | 
         
            -
              sweep:
         
     | 
| 5 | 
         
            -
                dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S}
         
     | 
| 6 | 
         
            -
                subdir: ${hydra.job.num}
         
     | 
| 7 | 
         
            -
              launcher:
         
     | 
| 8 | 
         
            -
                _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
         
     | 
| 9 | 
         
            -
              sweeper:
         
     | 
| 10 | 
         
            -
                _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
         
     | 
| 11 | 
         
            -
                max_batch_size: null
         
     | 
| 12 | 
         
            -
                params: null
         
     | 
| 13 | 
         
            -
              help:
         
     | 
| 14 | 
         
            -
                app_name: ${hydra.job.name}
         
     | 
| 15 | 
         
            -
                header: '${hydra.help.app_name} is powered by Hydra.
         
     | 
| 16 | 
         
            -
             
     | 
| 17 | 
         
            -
                  '
         
     | 
| 18 | 
         
            -
                footer: 'Powered by Hydra (https://hydra.cc)
         
     | 
| 19 | 
         
            -
             
     | 
| 20 | 
         
            -
                  Use --hydra-help to view Hydra specific help
         
     | 
| 21 | 
         
            -
             
     | 
| 22 | 
         
            -
                  '
         
     | 
| 23 | 
         
            -
                template: '${hydra.help.header}
         
     | 
| 24 | 
         
            -
             
     | 
| 25 | 
         
            -
                  == Configuration groups ==
         
     | 
| 26 | 
         
            -
             
     | 
| 27 | 
         
            -
                  Compose your configuration from those groups (group=option)
         
     | 
| 28 | 
         
            -
             
     | 
| 29 | 
         
            -
             
     | 
| 30 | 
         
            -
                  $APP_CONFIG_GROUPS
         
     | 
| 31 | 
         
            -
             
     | 
| 32 | 
         
            -
             
     | 
| 33 | 
         
            -
                  == Config ==
         
     | 
| 34 | 
         
            -
             
     | 
| 35 | 
         
            -
                  Override anything in the config (foo.bar=value)
         
     | 
| 36 | 
         
            -
             
     | 
| 37 | 
         
            -
             
     | 
| 38 | 
         
            -
                  $CONFIG
         
     | 
| 39 | 
         
            -
             
     | 
| 40 | 
         
            -
             
     | 
| 41 | 
         
            -
                  ${hydra.help.footer}
         
     | 
| 42 | 
         
            -
             
     | 
| 43 | 
         
            -
                  '
         
     | 
| 44 | 
         
            -
              hydra_help:
         
     | 
| 45 | 
         
            -
                template: 'Hydra (${hydra.runtime.version})
         
     | 
| 46 | 
         
            -
             
     | 
| 47 | 
         
            -
                  See https://hydra.cc for more info.
         
     | 
| 48 | 
         
            -
             
     | 
| 49 | 
         
            -
             
     | 
| 50 | 
         
            -
                  == Flags ==
         
     | 
| 51 | 
         
            -
             
     | 
| 52 | 
         
            -
                  $FLAGS_HELP
         
     | 
| 53 | 
         
            -
             
     | 
| 54 | 
         
            -
             
     | 
| 55 | 
         
            -
                  == Configuration groups ==
         
     | 
| 56 | 
         
            -
             
     | 
| 57 | 
         
            -
                  Compose your configuration from those groups (For example, append hydra/job_logging=disabled
         
     | 
| 58 | 
         
            -
                  to command line)
         
     | 
| 59 | 
         
            -
             
     | 
| 60 | 
         
            -
             
     | 
| 61 | 
         
            -
                  $HYDRA_CONFIG_GROUPS
         
     | 
| 62 | 
         
            -
             
     | 
| 63 | 
         
            -
             
     | 
| 64 | 
         
            -
                  Use ''--cfg hydra'' to Show the Hydra config.
         
     | 
| 65 | 
         
            -
             
     | 
| 66 | 
         
            -
                  '
         
     | 
| 67 | 
         
            -
                hydra_help: ???
         
     | 
| 68 | 
         
            -
              hydra_logging:
         
     | 
| 69 | 
         
            -
                version: 1
         
     | 
| 70 | 
         
            -
                formatters:
         
     | 
| 71 | 
         
            -
                  simple:
         
     | 
| 72 | 
         
            -
                    format: '[%(asctime)s][HYDRA] %(message)s'
         
     | 
| 73 | 
         
            -
                handlers:
         
     | 
| 74 | 
         
            -
                  console:
         
     | 
| 75 | 
         
            -
                    class: logging.StreamHandler
         
     | 
| 76 | 
         
            -
                    formatter: simple
         
     | 
| 77 | 
         
            -
                    stream: ext://sys.stdout
         
     | 
| 78 | 
         
            -
                root:
         
     | 
| 79 | 
         
            -
                  level: INFO
         
     | 
| 80 | 
         
            -
                  handlers:
         
     | 
| 81 | 
         
            -
                  - console
         
     | 
| 82 | 
         
            -
                loggers:
         
     | 
| 83 | 
         
            -
                  logging_example:
         
     | 
| 84 | 
         
            -
                    level: DEBUG
         
     | 
| 85 | 
         
            -
                disable_existing_loggers: false
         
     | 
| 86 | 
         
            -
              job_logging:
         
     | 
| 87 | 
         
            -
                version: 1
         
     | 
| 88 | 
         
            -
                formatters:
         
     | 
| 89 | 
         
            -
                  simple:
         
     | 
| 90 | 
         
            -
                    format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
         
     | 
| 91 | 
         
            -
                handlers:
         
     | 
| 92 | 
         
            -
                  console:
         
     | 
| 93 | 
         
            -
                    class: logging.StreamHandler
         
     | 
| 94 | 
         
            -
                    formatter: simple
         
     | 
| 95 | 
         
            -
                    stream: ext://sys.stdout
         
     | 
| 96 | 
         
            -
                  file:
         
     | 
| 97 | 
         
            -
                    class: logging.FileHandler
         
     | 
| 98 | 
         
            -
                    formatter: simple
         
     | 
| 99 | 
         
            -
                    filename: ${hydra.runtime.output_dir}/${hydra.job.name}.log
         
     | 
| 100 | 
         
            -
                root:
         
     | 
| 101 | 
         
            -
                  level: INFO
         
     | 
| 102 | 
         
            -
                  handlers:
         
     | 
| 103 | 
         
            -
                  - console
         
     | 
| 104 | 
         
            -
                  - file
         
     | 
| 105 | 
         
            -
                disable_existing_loggers: false
         
     | 
| 106 | 
         
            -
              env: {}
         
     | 
| 107 | 
         
            -
              mode: RUN
         
     | 
| 108 | 
         
            -
              searchpath: []
         
     | 
| 109 | 
         
            -
              callbacks: {}
         
     | 
| 110 | 
         
            -
              output_subdir: .hydra
         
     | 
| 111 | 
         
            -
              overrides:
         
     | 
| 112 | 
         
            -
                hydra:
         
     | 
| 113 | 
         
            -
                - hydra.mode=RUN
         
     | 
| 114 | 
         
            -
                task:
         
     | 
| 115 | 
         
            -
                - experiments=large_models/C5
         
     | 
| 116 | 
         
            -
              job:
         
     | 
| 117 | 
         
            -
                name: run_inference_experiment
         
     | 
| 118 | 
         
            -
                chdir: null
         
     | 
| 119 | 
         
            -
                override_dirname: experiments=large_models/C5
         
     | 
| 120 | 
         
            -
                id: ???
         
     | 
| 121 | 
         
            -
                num: ???
         
     | 
| 122 | 
         
            -
                config_name: config
         
     | 
| 123 | 
         
            -
                env_set: {}
         
     | 
| 124 | 
         
            -
                env_copy: []
         
     | 
| 125 | 
         
            -
                config:
         
     | 
| 126 | 
         
            -
                  override_dirname:
         
     | 
| 127 | 
         
            -
                    kv_sep: '='
         
     | 
| 128 | 
         
            -
                    item_sep: ','
         
     | 
| 129 | 
         
            -
                    exclude_keys: []
         
     | 
| 130 | 
         
            -
              runtime:
         
     | 
| 131 | 
         
            -
                version: 1.3.2
         
     | 
| 132 | 
         
            -
                version_base: '1.1'
         
     | 
| 133 | 
         
            -
                cwd: /workspace/jbcs2025
         
     | 
| 134 | 
         
            -
                config_sources:
         
     | 
| 135 | 
         
            -
                - path: hydra.conf
         
     | 
| 136 | 
         
            -
                  schema: pkg
         
     | 
| 137 | 
         
            -
                  provider: hydra
         
     | 
| 138 | 
         
            -
                - path: /workspace/jbcs2025/configs
         
     | 
| 139 | 
         
            -
                  schema: file
         
     | 
| 140 | 
         
            -
                  provider: main
         
     | 
| 141 | 
         
            -
                - path: ''
         
     | 
| 142 | 
         
            -
                  schema: structured
         
     | 
| 143 | 
         
            -
                  provider: schema
         
     | 
| 144 | 
         
            -
                output_dir: /workspace/jbcs2025/outputs/2025-07-01/00-13-17
         
     | 
| 145 | 
         
            -
                choices:
         
     | 
| 146 | 
         
            -
                  experiments: large_models/C5
         
     | 
| 147 | 
         
            -
                  hydra/env: default
         
     | 
| 148 | 
         
            -
                  hydra/callbacks: null
         
     | 
| 149 | 
         
            -
                  hydra/job_logging: default
         
     | 
| 150 | 
         
            -
                  hydra/hydra_logging: default
         
     | 
| 151 | 
         
            -
                  hydra/hydra_help: default
         
     | 
| 152 | 
         
            -
                  hydra/help: default
         
     | 
| 153 | 
         
            -
                  hydra/sweeper: basic
         
     | 
| 154 | 
         
            -
                  hydra/launcher: basic
         
     | 
| 155 | 
         
            -
                  hydra/output: default
         
     | 
| 156 | 
         
            -
              verbose: false
         
     | 
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
    	
        runs/large_models/bertimbau/jbcs2025_bertimbau-large-C5-encoder_classification-C5-essay_only/.hydra/overrides.yaml
    DELETED
    
    | 
         @@ -1 +0,0 @@ 
     | 
|
| 1 | 
         
            -
            - experiments=large_models/C5
         
     | 
| 
         | 
|
| 
         | 
    	
        runs/large_models/bertimbau/jbcs2025_bertimbau-large-C5-encoder_classification-C5-essay_only/bootstrap_confidence_intervals.csv
    DELETED
    
    | 
         @@ -1,2 +0,0 @@ 
     | 
|
| 1 | 
         
            -
            experiment_id,timestamp,QWK_mean,QWK_lower_95ci,QWK_upper_95ci,QWK_ci_width,Macro_F1_mean,Macro_F1_lower_95ci,Macro_F1_upper_95ci,Macro_F1_ci_width,Weighted_F1_mean,Weighted_F1_lower_95ci,Weighted_F1_upper_95ci,Weighted_F1_ci_width
         
     | 
| 2 | 
         
            -
            jbcs2025_bertimbau-large-C5-encoder_classification-C5-essay_only,2025-07-01 00:13:17,0.4765829843065011,0.34979869197784125,0.5987775413884535,0.24897884941061227,0.3185511976163596,0.23313502944056644,0.4147475443192737,0.18161251487870725,0.3518695561256887,0.2709850141597966,0.4369107158200968,0.16592570166030018
         
     | 
| 
         | 
|
| 
         | 
|
| 
         | 
    	
        runs/large_models/bertimbau/jbcs2025_bertimbau-large-C5-encoder_classification-C5-essay_only/evaluation_results.csv
    DELETED
    
    | 
         @@ -1,2 +0,0 @@ 
     | 
|
| 1 | 
         
            -
            accuracy,RMSE,QWK,HDIV,Macro_F1,Micro_F1,Weighted_F1,TP_0,TN_0,FP_0,FN_0,TP_1,TN_1,FP_1,FN_1,TP_2,TN_2,FP_2,FN_2,TP_3,TN_3,FP_3,FN_3,TP_4,TN_4,FP_4,FN_4,TP_5,TN_5,FP_5,FN_5,timestamp,id
         
     | 
| 2 | 
         
            -
            0.36231884057971014,61.10100926607787,0.4785241515279538,0.14492753623188404,0.3255241258616379,0.36231884057971014,0.3520852841017614,6,112,4,16,10,88,18,22,4,108,6,20,8,85,28,17,21,79,27,11,1,130,5,2,2025-07-01 00:13:17,jbcs2025_bertimbau-large-C5-encoder_classification-C5-essay_only
         
     | 
| 
         | 
|
| 
         | 
|
| 
         | 
    	
        runs/slm_decoder_models/llama-3.1-8b/jbcs2025_Llama-3.1-8B-llama31_classification_lora-C1-essay_only-r16-llama31_classification_lora-C1-essay_only-r16/.hydra/hydra.yaml
    CHANGED
    
    | 
         @@ -1,6 +1,6 @@ 
     | 
|
| 1 | 
         
             
            hydra:
         
     | 
| 2 | 
         
             
              run:
         
     | 
| 3 | 
         
            -
                dir: inference_output/2025-07- 
     | 
| 4 | 
         
             
              sweep:
         
     | 
| 5 | 
         
             
                dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S}
         
     | 
| 6 | 
         
             
                subdir: ${hydra.job.num}
         
     | 
| 
         @@ -110,7 +110,7 @@ hydra: 
     | 
|
| 110 | 
         
             
              output_subdir: .hydra
         
     | 
| 111 | 
         
             
              overrides:
         
     | 
| 112 | 
         
             
                hydra:
         
     | 
| 113 | 
         
            -
                - hydra.run.dir=inference_output/2025-07- 
     | 
| 114 | 
         
             
                - hydra.mode=RUN
         
     | 
| 115 | 
         
             
                task:
         
     | 
| 116 | 
         
             
                - experiments=temp_inference/kamel-usp_jbcs2025_Llama-3_1-8B-llama31_classification_lora-C1-essay_only-r16
         
     | 
| 
         @@ -142,7 +142,7 @@ hydra: 
     | 
|
| 142 | 
         
             
                - path: ''
         
     | 
| 143 | 
         
             
                  schema: structured
         
     | 
| 144 | 
         
             
                  provider: schema
         
     | 
| 145 | 
         
            -
                output_dir: /workspace/jbcs2025/inference_output/2025-07- 
     | 
| 146 | 
         
             
                choices:
         
     | 
| 147 | 
         
             
                  experiments: temp_inference/kamel-usp_jbcs2025_Llama-3_1-8B-llama31_classification_lora-C1-essay_only-r16
         
     | 
| 148 | 
         
             
                  hydra/env: default
         
     | 
| 
         | 
|
| 1 | 
         
             
            hydra:
         
     | 
| 2 | 
         
             
              run:
         
     | 
| 3 | 
         
            +
                dir: inference_output/2025-07-09/19-56-19
         
     | 
| 4 | 
         
             
              sweep:
         
     | 
| 5 | 
         
             
                dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S}
         
     | 
| 6 | 
         
             
                subdir: ${hydra.job.num}
         
     | 
| 
         | 
|
| 110 | 
         
             
              output_subdir: .hydra
         
     | 
| 111 | 
         
             
              overrides:
         
     | 
| 112 | 
         
             
                hydra:
         
     | 
| 113 | 
         
            +
                - hydra.run.dir=inference_output/2025-07-09/19-56-19
         
     | 
| 114 | 
         
             
                - hydra.mode=RUN
         
     | 
| 115 | 
         
             
                task:
         
     | 
| 116 | 
         
             
                - experiments=temp_inference/kamel-usp_jbcs2025_Llama-3_1-8B-llama31_classification_lora-C1-essay_only-r16
         
     | 
| 
         | 
|
| 142 | 
         
             
                - path: ''
         
     | 
| 143 | 
         
             
                  schema: structured
         
     | 
| 144 | 
         
             
                  provider: schema
         
     | 
| 145 | 
         
            +
                output_dir: /workspace/jbcs2025/inference_output/2025-07-09/19-56-19
         
     | 
| 146 | 
         
             
                choices:
         
     | 
| 147 | 
         
             
                  experiments: temp_inference/kamel-usp_jbcs2025_Llama-3_1-8B-llama31_classification_lora-C1-essay_only-r16
         
     | 
| 148 | 
         
             
                  hydra/env: default
         
     | 
    	
        runs/slm_decoder_models/llama-3.1-8b/jbcs2025_Llama-3.1-8B-llama31_classification_lora-C1-essay_only-r16-llama31_classification_lora-C1-essay_only-r16/bootstrap_confidence_intervals.csv
    CHANGED
    
    | 
         @@ -1,2 +1,2 @@ 
     | 
|
| 1 | 
         
             
            experiment_id,timestamp,QWK_mean,QWK_lower_95ci,QWK_upper_95ci,QWK_ci_width,Macro_F1_mean,Macro_F1_lower_95ci,Macro_F1_upper_95ci,Macro_F1_ci_width,Weighted_F1_mean,Weighted_F1_lower_95ci,Weighted_F1_upper_95ci,Weighted_F1_ci_width
         
     | 
| 2 | 
         
            -
            jbcs2025_Llama-3.1-8B-llama31_classification_lora-C1-essay_only-r16-llama31_classification_lora-C1-essay_only-r16,2025-07- 
     | 
| 
         | 
|
| 1 | 
         
             
            experiment_id,timestamp,QWK_mean,QWK_lower_95ci,QWK_upper_95ci,QWK_ci_width,Macro_F1_mean,Macro_F1_lower_95ci,Macro_F1_upper_95ci,Macro_F1_ci_width,Weighted_F1_mean,Weighted_F1_lower_95ci,Weighted_F1_upper_95ci,Weighted_F1_ci_width
         
     | 
| 2 | 
         
            +
            jbcs2025_Llama-3.1-8B-llama31_classification_lora-C1-essay_only-r16-llama31_classification_lora-C1-essay_only-r16,2025-07-09 19:56:26,0.6852010393656068,0.5881177044303095,0.7734829594694392,0.1853652550391297,0.5201141818478505,0.40078232785453927,0.6699223338679838,0.26914000601344457,0.6659186862378589,0.5849321458851546,0.7433844647084771,0.15845231882332245
         
     | 
    	
        runs/slm_decoder_models/llama-3.1-8b/jbcs2025_Llama-3.1-8B-llama31_classification_lora-C1-essay_only-r16-llama31_classification_lora-C1-essay_only-r16/evaluation_results.csv
    CHANGED
    
    | 
         @@ -1,2 +1,2 @@ 
     | 
|
| 1 | 
         
             
            accuracy,RMSE,QWK,HDIV,Macro_F1,Micro_F1,Weighted_F1,TP_0,TN_0,FP_0,FN_0,TP_1,TN_1,FP_1,FN_1,TP_2,TN_2,FP_2,FN_2,TP_3,TN_3,FP_3,FN_3,TP_4,TN_4,FP_4,FN_4,TP_5,TN_5,FP_5,FN_5,timestamp,id
         
     | 
| 2 | 
         
            -
            0.6594202898550725,25.931906372573962,0.6867564182842831,0.007246376811594235,0.48154898864576284,0.6594202898550725,0.6655256851610287,0,137,0,1,0,138,0,0,7,123,5,3,50,58,14,16,28,73,14,23,6,114,14,4,2025-07- 
     | 
| 
         | 
|
| 1 | 
         
             
            accuracy,RMSE,QWK,HDIV,Macro_F1,Micro_F1,Weighted_F1,TP_0,TN_0,FP_0,FN_0,TP_1,TN_1,FP_1,FN_1,TP_2,TN_2,FP_2,FN_2,TP_3,TN_3,FP_3,FN_3,TP_4,TN_4,FP_4,FN_4,TP_5,TN_5,FP_5,FN_5,timestamp,id
         
     | 
| 2 | 
         
            +
            0.6594202898550725,25.931906372573962,0.6867564182842831,0.007246376811594235,0.48154898864576284,0.6594202898550725,0.6655256851610287,0,137,0,1,0,138,0,0,7,123,5,3,50,58,14,16,28,73,14,23,6,114,14,4,2025-07-09 19:56:26,jbcs2025_Llama-3.1-8B-llama31_classification_lora-C1-essay_only-r16-llama31_classification_lora-C1-essay_only-r16
         
     | 
    	
        runs/slm_decoder_models/llama-3.1-8b/jbcs2025_Llama-3.1-8B-llama31_classification_lora-C1-essay_only-r16-llama31_classification_lora-C1-essay_only-r16/jbcs2025_Llama-3.1-8B-llama31_classification_lora-C1-essay_only-r16-llama31_classification_lora-C1-essay_only-r16_inference_results.jsonl
    CHANGED
    
    | 
         The diff for this file is too large to render. 
		See raw diff 
     | 
| 
         | 
    	
        runs/slm_decoder_models/llama-3.1-8b/jbcs2025_Llama-3.1-8B-llama31_classification_lora-C1-essay_only-r16-llama31_classification_lora-C1-essay_only-r16/run_inference_experiment.log
    CHANGED
    
    | 
         @@ -1,5 +1,5 @@ 
     | 
|
| 1 | 
         
            -
            [2025-07- 
     | 
| 2 | 
         
            -
            [2025-07- 
     | 
| 3 | 
         
             
            dataset:
         
     | 
| 4 | 
         
             
              name: kamel-usp/aes_enem_dataset
         
     | 
| 5 | 
         
             
              split: JBCS2025
         
     | 
| 
         @@ -45,22 +45,45 @@ experiments: 
     | 
|
| 45 | 
         
             
                gradient_accumulation_steps: 2
         
     | 
| 46 | 
         
             
                gradient_checkpointing: true
         
     | 
| 47 | 
         | 
| 48 | 
         
            -
            [2025-07- 
     | 
| 49 | 
         
            -
            [2025-07- 
     | 
| 50 | 
         
            -
            [2025-07- 
     | 
| 51 | 
         
            -
            [2025-07- 
     | 
| 52 | 
         
            -
            [2025-07- 
     | 
| 53 | 
         
            -
            [2025-07- 
     | 
| 54 | 
         
            -
            [2025-07- 
     | 
| 55 | 
         
            -
            [2025-07- 
     | 
| 56 | 
         
            -
            [2025-07- 
     | 
| 57 | 
         
            -
            [2025-07- 
     | 
| 58 | 
         
            -
             
     | 
| 59 | 
         
            -
            [2025-07- 
     | 
| 60 | 
         
            -
            [2025-07- 
     | 
| 61 | 
         
            -
            [2025-07- 
     | 
| 62 | 
         
            -
            [2025-07- 
     | 
| 63 | 
         
            -
            [2025-07- 
     | 
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 64 | 
         
             
              "architectures": [
         
     | 
| 65 | 
         
             
                "LlamaForCausalLM"
         
     | 
| 66 | 
         
             
              ],
         
     | 
| 
         @@ -107,38 +130,38 @@ experiments: 
     | 
|
| 107 | 
         
             
              "rope_theta": 500000.0,
         
     | 
| 108 | 
         
             
              "tie_word_embeddings": false,
         
     | 
| 109 | 
         
             
              "torch_dtype": "bfloat16",
         
     | 
| 110 | 
         
            -
              "transformers_version": "4.53. 
     | 
| 111 | 
         
             
              "use_cache": true,
         
     | 
| 112 | 
         
             
              "vocab_size": 128256
         
     | 
| 113 | 
         
             
            }
         
     | 
| 114 | 
         | 
| 115 | 
         
            -
            [2025-07- 
     | 
| 116 | 
         
            -
            [2025-07- 
     | 
| 117 | 
         
            -
            [2025-07- 
     | 
| 118 | 
         
            -
            [2025-07- 
     | 
| 119 | 
         
             
            - This IS expected if you are initializing LlamaForSequenceClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).
         
     | 
| 120 | 
         
             
            - This IS NOT expected if you are initializing LlamaForSequenceClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).
         
     | 
| 121 | 
         
            -
            [2025-07- 
     | 
| 122 | 
         
             
            You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.
         
     | 
| 123 | 
         
            -
            [2025-07- 
     | 
| 124 | 
         
            -
            [2025-07- 
     | 
| 125 | 
         
            -
            [2025-07- 
     | 
| 126 | 
         
            -
            [2025-07- 
     | 
| 127 | 
         
            -
            [2025-07- 
     | 
| 128 | 
         
            -
            [2025-07- 
     | 
| 129 | 
         
            -
            [2025-07- 
     | 
| 130 | 
         
            -
            [2025-07- 
     | 
| 131 | 
         
            -
            [2025-07- 
     | 
| 132 | 
         
            -
            [2025-07- 
     | 
| 133 | 
         
             
            ***** Running Prediction *****
         
     | 
| 134 | 
         
            -
            [2025-07- 
     | 
| 135 | 
         
            -
            [2025-07- 
     | 
| 136 | 
         
            -
            [2025-07- 
     | 
| 137 | 
         
            -
            [2025-07- 
     | 
| 138 | 
         
            -
            [2025-07- 
     | 
| 139 | 
         
            -
            [2025-07- 
     | 
| 140 | 
         
            -
            [2025-07- 
     | 
| 141 | 
         
            -
            [2025-07- 
     | 
| 142 | 
         
            -
            [2025-07- 
     | 
| 143 | 
         
            -
            [2025-07- 
     | 
| 144 | 
         
            -
            [2025-07- 
     | 
| 
         | 
|
| 1 | 
         
            +
            [2025-07-09 19:56:26,399][__main__][INFO] - Starting inference experiment
         
     | 
| 2 | 
         
            +
            [2025-07-09 19:56:26,400][__main__][INFO] - cache_dir: /tmp/
         
     | 
| 3 | 
         
             
            dataset:
         
     | 
| 4 | 
         
             
              name: kamel-usp/aes_enem_dataset
         
     | 
| 5 | 
         
             
              split: JBCS2025
         
     | 
| 
         | 
|
| 45 | 
         
             
                gradient_accumulation_steps: 2
         
     | 
| 46 | 
         
             
                gradient_checkpointing: true
         
     | 
| 47 | 
         | 
| 48 | 
         
            +
            [2025-07-09 19:56:26,404][__main__][INFO] - Running inference with fine-tuned HF model
         
     | 
| 49 | 
         
            +
            [2025-07-09 19:56:31,012][transformers.tokenization_utils_base][INFO] - loading file tokenizer.json from cache at /tmp/models--meta-llama--Llama-3.1-8B/snapshots/d04e592bb4f6aa9cfee91e2e20afa771667e1d4b/tokenizer.json
         
     | 
| 50 | 
         
            +
            [2025-07-09 19:56:31,013][transformers.tokenization_utils_base][INFO] - loading file tokenizer.model from cache at None
         
     | 
| 51 | 
         
            +
            [2025-07-09 19:56:31,013][transformers.tokenization_utils_base][INFO] - loading file added_tokens.json from cache at None
         
     | 
| 52 | 
         
            +
            [2025-07-09 19:56:31,013][transformers.tokenization_utils_base][INFO] - loading file special_tokens_map.json from cache at /tmp/models--meta-llama--Llama-3.1-8B/snapshots/d04e592bb4f6aa9cfee91e2e20afa771667e1d4b/special_tokens_map.json
         
     | 
| 53 | 
         
            +
            [2025-07-09 19:56:31,013][transformers.tokenization_utils_base][INFO] - loading file tokenizer_config.json from cache at /tmp/models--meta-llama--Llama-3.1-8B/snapshots/d04e592bb4f6aa9cfee91e2e20afa771667e1d4b/tokenizer_config.json
         
     | 
| 54 | 
         
            +
            [2025-07-09 19:56:31,013][transformers.tokenization_utils_base][INFO] - loading file chat_template.jinja from cache at None
         
     | 
| 55 | 
         
            +
            [2025-07-09 19:56:31,557][transformers.tokenization_utils_base][INFO] - Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.
         
     | 
| 56 | 
         
            +
            [2025-07-09 19:56:31,566][__main__][INFO] - Tokenizer function parameters- Padding:longest; Truncation: False; Use Full Context: False
         
     | 
| 57 | 
         
            +
            [2025-07-09 19:56:33,275][__main__][INFO] - 
         
     | 
| 58 | 
         
            +
            Token statistics for 'train' split:
         
     | 
| 59 | 
         
            +
            [2025-07-09 19:56:33,275][__main__][INFO] -   Total examples: 500
         
     | 
| 60 | 
         
            +
            [2025-07-09 19:56:33,275][__main__][INFO] -   Min tokens: 2479
         
     | 
| 61 | 
         
            +
            [2025-07-09 19:56:33,275][__main__][INFO] -   Max tokens: 2479
         
     | 
| 62 | 
         
            +
            [2025-07-09 19:56:33,275][__main__][INFO] -   Avg tokens: 2479.00
         
     | 
| 63 | 
         
            +
            [2025-07-09 19:56:33,276][__main__][INFO] -   Std tokens: 0.00
         
     | 
| 64 | 
         
            +
            [2025-07-09 19:56:33,503][__main__][INFO] - 
         
     | 
| 65 | 
         
            +
            Token statistics for 'validation' split:
         
     | 
| 66 | 
         
            +
            [2025-07-09 19:56:33,504][__main__][INFO] -   Total examples: 132
         
     | 
| 67 | 
         
            +
            [2025-07-09 19:56:33,504][__main__][INFO] -   Min tokens: 2193
         
     | 
| 68 | 
         
            +
            [2025-07-09 19:56:33,504][__main__][INFO] -   Max tokens: 2193
         
     | 
| 69 | 
         
            +
            [2025-07-09 19:56:33,504][__main__][INFO] -   Avg tokens: 2193.00
         
     | 
| 70 | 
         
            +
            [2025-07-09 19:56:33,504][__main__][INFO] -   Std tokens: 0.00
         
     | 
| 71 | 
         
            +
            [2025-07-09 19:56:33,753][__main__][INFO] - 
         
     | 
| 72 | 
         
            +
            Token statistics for 'test' split:
         
     | 
| 73 | 
         
            +
            [2025-07-09 19:56:33,753][__main__][INFO] -   Total examples: 138
         
     | 
| 74 | 
         
            +
            [2025-07-09 19:56:33,753][__main__][INFO] -   Min tokens: 2254
         
     | 
| 75 | 
         
            +
            [2025-07-09 19:56:33,753][__main__][INFO] -   Max tokens: 2254
         
     | 
| 76 | 
         
            +
            [2025-07-09 19:56:33,753][__main__][INFO] -   Avg tokens: 2254.00
         
     | 
| 77 | 
         
            +
            [2025-07-09 19:56:33,753][__main__][INFO] -   Std tokens: 0.00
         
     | 
| 78 | 
         
            +
            [2025-07-09 19:56:33,753][__main__][INFO] - If token statistics are the same (max, avg, min) keep in mind that this is due to batched tokenization and padding.
         
     | 
| 79 | 
         
            +
            [2025-07-09 19:56:33,753][__main__][INFO] - Model max length: 131072. If it is the same as stats, then there is a high chance that sequences are being truncated.
         
     | 
| 80 | 
         
            +
            [2025-07-09 19:56:33,754][__main__][INFO] - Loading model from: kamel-usp/jbcs2025_Llama-3.1-8B-llama31_classification_lora-C1-essay_only-r16
         
     | 
| 81 | 
         
            +
            [2025-07-09 19:56:33,754][__main__][INFO] - Loading model from: kamel-usp/jbcs2025_Llama-3.1-8B-llama31_classification_lora-C1-essay_only-r16
         
     | 
| 82 | 
         
            +
            [2025-07-09 19:56:36,618][__main__][INFO] - Model need ≈ 46.09 GiB to run inference and 136.77 for training 
         
     | 
| 83 | 
         
            +
            [2025-07-09 19:56:36,879][__main__][INFO] - Loading PEFT model configuration from kamel-usp/jbcs2025_Llama-3.1-8B-llama31_classification_lora-C1-essay_only-r16
         
     | 
| 84 | 
         
            +
            [2025-07-09 19:56:36,879][__main__][INFO] - Base model name: meta-llama/Llama-3.1-8B
         
     | 
| 85 | 
         
            +
            [2025-07-09 19:56:37,086][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--meta-llama--Llama-3.1-8B/snapshots/d04e592bb4f6aa9cfee91e2e20afa771667e1d4b/config.json
         
     | 
| 86 | 
         
            +
            [2025-07-09 19:56:37,089][transformers.configuration_utils][INFO] - Model config LlamaConfig {
         
     | 
| 87 | 
         
             
              "architectures": [
         
     | 
| 88 | 
         
             
                "LlamaForCausalLM"
         
     | 
| 89 | 
         
             
              ],
         
     | 
| 
         | 
|
| 130 | 
         
             
              "rope_theta": 500000.0,
         
     | 
| 131 | 
         
             
              "tie_word_embeddings": false,
         
     | 
| 132 | 
         
             
              "torch_dtype": "bfloat16",
         
     | 
| 133 | 
         
            +
              "transformers_version": "4.53.1",
         
     | 
| 134 | 
         
             
              "use_cache": true,
         
     | 
| 135 | 
         
             
              "vocab_size": 128256
         
     | 
| 136 | 
         
             
            }
         
     | 
| 137 | 
         | 
| 138 | 
         
            +
            [2025-07-09 19:56:37,312][transformers.modeling_utils][INFO] - loading weights file model.safetensors from cache at /tmp/models--meta-llama--Llama-3.1-8B/snapshots/d04e592bb4f6aa9cfee91e2e20afa771667e1d4b/model.safetensors.index.json
         
     | 
| 139 | 
         
            +
            [2025-07-09 19:56:37,313][transformers.modeling_utils][INFO] - Will use torch_dtype=torch.bfloat16 as defined in model's config object
         
     | 
| 140 | 
         
            +
            [2025-07-09 19:56:37,313][transformers.modeling_utils][INFO] - Instantiating LlamaForSequenceClassification model under default dtype torch.bfloat16.
         
     | 
| 141 | 
         
            +
            [2025-07-09 19:56:41,901][transformers.modeling_utils][INFO] - Some weights of the model checkpoint at meta-llama/Llama-3.1-8B were not used when initializing LlamaForSequenceClassification: ['lm_head.weight']
         
     | 
| 142 | 
         
             
            - This IS expected if you are initializing LlamaForSequenceClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).
         
     | 
| 143 | 
         
             
            - This IS NOT expected if you are initializing LlamaForSequenceClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).
         
     | 
| 144 | 
         
            +
            [2025-07-09 19:56:41,902][transformers.modeling_utils][WARNING] - Some weights of LlamaForSequenceClassification were not initialized from the model checkpoint at meta-llama/Llama-3.1-8B and are newly initialized: ['score.weight']
         
     | 
| 145 | 
         
             
            You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.
         
     | 
| 146 | 
         
            +
            [2025-07-09 19:56:48,788][__main__][INFO] - Loaded fine-tuned PEFT model from kamel-usp/jbcs2025_Llama-3.1-8B-llama31_classification_lora-C1-essay_only-r16
         
     | 
| 147 | 
         
            +
            [2025-07-09 19:56:48,794][__main__][INFO] - None
         
     | 
| 148 | 
         
            +
            [2025-07-09 19:56:48,806][transformers.training_args][INFO] - PyTorch: setting up devices
         
     | 
| 149 | 
         
            +
            [2025-07-09 19:56:48,831][transformers.training_args][INFO] - The default value for the training argument `--report_to` will change in v5 (from all installed integrations to none). In v5, you will need to use `--report_to all` to get the same behavior as now. You should start updating your code and make this info disappear :-).
         
     | 
| 150 | 
         
            +
            [2025-07-09 19:56:48,845][transformers.trainer][INFO] - You have loaded a model on multiple GPUs. `is_model_parallel` attribute will be force-set to `True` to avoid any unexpected behavior such as device placement mismatching.
         
     | 
| 151 | 
         
            +
            [2025-07-09 19:56:48,882][transformers.trainer][INFO] - Using auto half precision backend
         
     | 
| 152 | 
         
            +
            [2025-07-09 19:56:48,883][transformers.trainer][WARNING] - No label_names provided for model class `PeftModelForSequenceClassification`. Since `PeftModel` hides base models input arguments, if label_names is not given, label_names can't be set automatically within `Trainer`. Note that empty label_names list will be used instead.
         
     | 
| 153 | 
         
            +
            [2025-07-09 19:56:52,225][__main__][INFO] - Running inference on test dataset
         
     | 
| 154 | 
         
            +
            [2025-07-09 19:56:52,226][transformers.trainer][INFO] - The following columns in the test set don't have a corresponding argument in `PeftModelForSequenceClassification.forward` and have been ignored: grades, prompt, essay_year, essay_text, supporting_text, id, reference, id_prompt. If grades, prompt, essay_year, essay_text, supporting_text, id, reference, id_prompt are not expected by `PeftModelForSequenceClassification.forward`,  you can safely ignore this message.
         
     | 
| 155 | 
         
            +
            [2025-07-09 19:56:52,252][transformers.trainer][INFO] - 
         
     | 
| 156 | 
         
             
            ***** Running Prediction *****
         
     | 
| 157 | 
         
            +
            [2025-07-09 19:56:52,252][transformers.trainer][INFO] -   Num examples = 138
         
     | 
| 158 | 
         
            +
            [2025-07-09 19:56:52,252][transformers.trainer][INFO] -   Batch size = 4
         
     | 
| 159 | 
         
            +
            [2025-07-09 19:57:52,735][__main__][INFO] - Inference results saved to jbcs2025_Llama-3.1-8B-llama31_classification_lora-C1-essay_only-r16-llama31_classification_lora-C1-essay_only-r16_inference_results.jsonl
         
     | 
| 160 | 
         
            +
            [2025-07-09 19:57:52,737][__main__][INFO] - Computing bootstrap confidence intervals for metrics: ['QWK', 'Macro_F1', 'Weighted_F1']
         
     | 
| 161 | 
         
            +
            [2025-07-09 20:00:05,966][__main__][INFO] - Bootstrap CI results saved to bootstrap_confidence_intervals.csv
         
     | 
| 162 | 
         
            +
            [2025-07-09 20:00:05,967][__main__][INFO] - Bootstrap Confidence Intervals (95%):
         
     | 
| 163 | 
         
            +
            [2025-07-09 20:00:05,967][__main__][INFO] -   QWK: 0.6852 [0.5881, 0.7735]
         
     | 
| 164 | 
         
            +
            [2025-07-09 20:00:05,967][__main__][INFO] -   Macro_F1: 0.5201 [0.4008, 0.6699]
         
     | 
| 165 | 
         
            +
            [2025-07-09 20:00:05,967][__main__][INFO] -   Weighted_F1: 0.6659 [0.5849, 0.7434]
         
     | 
| 166 | 
         
            +
            [2025-07-09 20:00:05,967][__main__][INFO] - Inference results: {'accuracy': 0.6594202898550725, 'RMSE': 25.931906372573962, 'QWK': 0.6867564182842831, 'HDIV': 0.007246376811594235, 'Macro_F1': 0.48154898864576284, 'Micro_F1': 0.6594202898550725, 'Weighted_F1': 0.6655256851610287, 'TP_0': np.int64(0), 'TN_0': np.int64(137), 'FP_0': np.int64(0), 'FN_0': np.int64(1), 'TP_1': np.int64(0), 'TN_1': np.int64(138), 'FP_1': np.int64(0), 'FN_1': np.int64(0), 'TP_2': np.int64(7), 'TN_2': np.int64(123), 'FP_2': np.int64(5), 'FN_2': np.int64(3), 'TP_3': np.int64(50), 'TN_3': np.int64(58), 'FP_3': np.int64(14), 'FN_3': np.int64(16), 'TP_4': np.int64(28), 'TN_4': np.int64(73), 'FP_4': np.int64(14), 'FN_4': np.int64(23), 'TP_5': np.int64(6), 'TN_5': np.int64(114), 'FP_5': np.int64(14), 'FN_5': np.int64(4)}
         
     | 
| 167 | 
         
            +
            [2025-07-09 20:00:05,972][__main__][INFO] - Inference experiment completed
         
     |