# Filename: Validate_sudokuCSV_V2.1.py # # Description: # This script validates the 'solutions' column of a sudoku.csv file generated by # Generate_sudokuCSV_V2.1.py or a similar script. It mathematically checks if each # solved grid adheres to the rules of Sudoku. # # Key Logic: # 1. **Loads Data:** Reads the 'sudoku.csv' file into a pandas DataFrame. # 2. **Validates Each Solution:** For every row, it checks the 'solutions' grid to ensure: # - Each row (1-9) contains exactly one of each digit from 1 to 9. # - Each column (1-9) contains exactly one of each digit from 1 to 9. # - Each 3x3 box (1-9) contains exactly one of each digit from 1 to 9. # 3. **Reports Findings:** Prints a clear summary of how many grids are valid and invalid. # 4. **Handles Invalid Grids (Colab-Friendly):** # - If invalid grids are found, it saves their row indices to a file named # `invalid_sudoku_indices.txt`. # - It then instructs the user on how to clean the master CSV by changing a # single configuration flag (`PERFORM_CLEANUP`) and re-running the script. # 5. **Performs Cleanup (If Enabled):** If the `PERFORM_CLEANUP` flag is set to True # and the index file exists, it will: # - Move the invalid rows from 'sudoku.csv' to a new 'invalid_sudokus.csv' file. # - Overwrite 'sudoku.csv' with a new, clean version. import pandas as pd import numpy as np import os import sys # ============================================================================== # === CONFIGURATION === # ============================================================================== # --- Primary Action --- # Set this to True AFTER running the script once and seeing an 'invalid' report. # This will trigger the cleanup process on the next run. PERFORM_CLEANUP = False # --- File Paths --- INPUT_CSV_PATH = 'sudoku.csv' INVALID_INDICES_FILE = 'invalid_sudoku_indices.txt' INVALID_CSV_EXPORT_PATH = 'invalid_sudokus.csv' # ============================================================================== # === VALIDATION LOGIC === # ============================================================================== def validate_solution_grid(grid_1d: np.ndarray) -> bool: """ Mathematically validates a 1D numpy array representing a 9x9 Sudoku solution. Args: grid_1d: A numpy array of 81 integers. Returns: True if the grid is a valid Sudoku solution, False otherwise. """ if grid_1d.shape[0] != 81 or np.any(grid_1d == 0): # Must be a fully filled 81-cell grid return False grid = grid_1d.reshape(9, 9) # The set of digits {1, 2, 3, 4, 5, 6, 7, 8, 9} required_set = set(range(1, 10)) # 1. Check all rows for i in range(9): if set(grid[i, :]) != required_set: return False # 2. Check all columns for j in range(9): if set(grid[:, j]) != required_set: return False # 3. Check all 3x3 boxes for box_row_start in range(0, 9, 3): for box_col_start in range(0, 9, 3): box = grid[box_row_start:box_row_start+3, box_col_start:box_col_start+3] if set(box.flatten()) != required_set: return False # If all checks pass, the grid is valid return True # ============================================================================== # === MAIN EXECUTION SCRIPT === # ============================================================================== def run_validation(): """Main function to run the validation and reporting process.""" print("--- Sudoku CSV Validator V2.1 ---") if not os.path.exists(INPUT_CSV_PATH): print(f"FATAL ERROR: The file '{INPUT_CSV_PATH}' was not found.") print("Please make sure you have generated it using 'Generate_sudokuCSV_V2.1.py'.") sys.exit(1) print(f"Loading data from '{INPUT_CSV_PATH}'...") try: df = pd.read_csv(INPUT_CSV_PATH) # Ensure columns are treated as strings to preserve leading zeros, then convert to numbers df['solutions_str'] = df['solutions'].astype(str).str.zfill(81) except Exception as e: print(f"Error reading CSV file: {e}") sys.exit(1) print("Validating all solution grids...") valid_indices = [] invalid_indices = [] for index, row in df.iterrows(): try: # Convert the string of digits into a numpy array of integers solution_grid_1d = np.array(list(map(int, row['solutions_str']))) if validate_solution_grid(solution_grid_1d): valid_indices.append(index) else: invalid_indices.append(index) except (ValueError, TypeError): # Handle cases where a row might be malformed invalid_indices.append(index) # --- Report Findings --- num_valid = len(valid_indices) num_invalid = len(invalid_indices) total_grids = len(df) print("\n--- VALIDATION REPORT ---") print(f"Total grids scanned: {total_grids}") print(f" => Valid solutions: {num_valid}") print(f" => Invalid solutions: {num_invalid}") print("-------------------------\n") if num_invalid > 0: print(f"Found {num_invalid} invalid grids. Saving their indices to '{INVALID_INDICES_FILE}'.") # Save indices to the text file, one index per line with open(INVALID_INDICES_FILE, 'w') as f: for index in invalid_indices: f.write(f"{index}\n") print("\n*** ACTION REQUIRED ***") print(f"To clean your '{INPUT_CSV_PATH}', please follow these steps:") print("1. Open this script ('Validate_sudokuCSV_V2.1.py') in the editor.") print("2. Change the configuration flag at the top from 'PERFORM_CLEANUP = False' to 'PERFORM_CLEANUP = True'.") print("3. Re-run this script.") print("This will move the invalid entries to 'invalid_sudokus.csv' and create a clean 'sudoku.csv'.") else: print("Congratulations! All solution grids in the CSV are valid.") # If no invalid grids were found, remove the old index file if it exists if os.path.exists(INVALID_INDICES_FILE): os.remove(INVALID_INDICES_FILE) def run_cleanup(): """Main function to perform the cleanup process.""" print("--- Sudoku CSV Cleanup Utility ---") print(f"PERFORM_CLEANUP is set to True. Attempting to clean '{INPUT_CSV_PATH}'.") if not os.path.exists(INVALID_INDICES_FILE): print(f"ERROR: The file '{INVALID_INDICES_FILE}' was not found.") print("Please run the script with 'PERFORM_CLEANUP = False' first to generate the list of invalid indices.") sys.exit(1) print(f"Loading master data from '{INPUT_CSV_PATH}'...") df = pd.read_csv(INPUT_CSV_PATH) print(f"Loading invalid indices from '{INVALID_INDICES_FILE}'...") with open(INVALID_INDICES_FILE, 'r') as f: invalid_indices = [int(line.strip()) for line in f] print(f"Found {len(invalid_indices)} indices to remove.") # Separate the DataFrame into invalid and valid parts df_invalid = df.loc[invalid_indices] df_valid = df.drop(invalid_indices) # --- Perform the file operations --- # 1. Export invalid puzzles and their solutions print(f"Saving {len(df_invalid)} invalid entries to '{INVALID_CSV_EXPORT_PATH}'...") df_invalid.to_csv(INVALID_CSV_EXPORT_PATH, index=False) # 2. Overwrite the original file with only the valid data print(f"Overwriting '{INPUT_CSV_PATH}' with {len(df_valid)} valid entries...") df_valid.to_csv(INPUT_CSV_PATH, index=False) # 3. Clean up the index file os.remove(INVALID_INDICES_FILE) print("\n--- Cleanup Complete! ---") print(f"Your '{INPUT_CSV_PATH}' is now clean.") print(f"The invalid entries have been moved to '{INVALID_CSV_EXPORT_PATH}'.") print("Set 'PERFORM_CLEANUP = False' before running validation again.") if __name__ == '__main__': if PERFORM_CLEANUP: run_cleanup() else: run_validation()