external_test_preprocessing_part.py 5.0 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182
  1. # USAGE:
  2. # 1. Download raw text data with `wikipedia_download.py`
  3. # 2. Install python==3.7.12 and tensorflow==1.15.5
  4. # Run `create_pretraining_data.py` to create TFRecords on specific part (This will take some time)
  5. # Command: python3 create_pretraining_data.py --input_file=/path/to/part-00XXX-of-00500 --vocab_file=/path/to/vocab.txt \
  6. # --output_file=/path/to/output.tfrecord --max_seq_length=512 --max_predictions_per_seq=76
  7. #
  8. # 2.1 For eval: --input_file=/path/to/eval.txt and
  9. # Command: python3 pick_eval_samples.py --input_tfrecord=/path/to/eval.tfrecord --output_tfrecord=/path/to/output_eval.tfrecord
  10. # 3. Run `wikipedia.py` to preprocess the data with tinygrad (Use python > 3.7)
  11. # Command: BASEDIR=/path/to/basedir python3 wikipedia.py pre-train XXX (NOTE: part number needs to match part of step 2)
  12. # This will output to /path/to/basedir/train/XXX
  13. #
  14. # 3.1 For eval:
  15. # Command: BASEDIR=/path/to/basedir python3 wikipedia.py pre-eval
  16. # This will output to /path/to/basedir/eval
  17. # 4. Run this script to verify the correctness of the preprocessing script for specific part
  18. # Command: python3 external_test_preprocessing_part.py --preprocessed_part_dir=/path/to/basedir/part --tf_records=/path/to/output.tfrecord
  19. import os, argparse, pickle
  20. from tqdm import tqdm
  21. # This is a workaround for protobuf issue
  22. os.environ['PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION'] = 'python'
  23. os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
  24. import tensorflow as tf
  25. tf.compat.v1.enable_eager_execution()
  26. tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
  27. def _parse_function(proto, max_seq_length, max_predictions_per_seq):
  28. feature_description = {
  29. 'input_ids': tf.io.FixedLenFeature([max_seq_length], tf.int64),
  30. 'input_mask': tf.io.FixedLenFeature([max_seq_length], tf.int64),
  31. 'segment_ids': tf.io.FixedLenFeature([max_seq_length], tf.int64),
  32. 'masked_lm_positions': tf.io.FixedLenFeature([max_predictions_per_seq], tf.int64),
  33. 'masked_lm_ids': tf.io.FixedLenFeature([max_predictions_per_seq], tf.int64),
  34. 'masked_lm_weights': tf.io.FixedLenFeature([max_predictions_per_seq], tf.float32),
  35. 'next_sentence_labels': tf.io.FixedLenFeature([1], tf.int64),
  36. }
  37. return tf.io.parse_single_example(proto, feature_description)
  38. def load_dataset(file_path, max_seq_length=512, max_predictions_per_seq=76):
  39. dataset = tf.data.TFRecordDataset(file_path)
  40. parse_function = lambda proto: _parse_function(proto, max_seq_length, max_predictions_per_seq) # noqa: E731
  41. return dataset.map(parse_function)
  42. if __name__ == "__main__":
  43. parser = argparse.ArgumentParser(description="Verify the correctness of the preprocessing script for specific part",
  44. formatter_class=argparse.ArgumentDefaultsHelpFormatter)
  45. parser.add_argument("--preprocessed_part_dir", type=str, default=None,
  46. help="Path to dir with preprocessed samples from `wikipedia.py`")
  47. parser.add_argument("--tf_records", type=str, default=None,
  48. help="Path to TFRecords file from `create_pretraining_data.py` (Reference implementation)")
  49. parser.add_argument("--max_seq_length", type=int, default=512, help="Max sequence length. For MLPerf keep it as 512")
  50. parser.add_argument("--max_predictions_per_seq", type=int, default=76, help="Max predictions per sequence. For MLPerf keep it as 76")
  51. parser.add_argument("--is_eval", type=bool, default=False, help="Whether to run eval or train preprocessing")
  52. args = parser.parse_args()
  53. assert os.path.isdir(args.preprocessed_part_dir), f"The specified directory {args.preprocessed_part_dir} does not exist."
  54. assert os.path.isfile(args.tf_records), f"The specified TFRecords file {args.tf_records} does not exist."
  55. preprocessed_samples = []
  56. for file_name in sorted(os.listdir(args.preprocessed_part_dir), key=lambda x: int(x.split("_")[1]) if not args.is_eval else int(x.split(".")[0])): # 0_3.pkl -> 3 # noqa: E501
  57. with open(os.path.join(args.preprocessed_part_dir, file_name), 'rb') as f:
  58. samples = pickle.load(f)
  59. preprocessed_samples.extend(samples)
  60. dataset = load_dataset(args.tf_records, args.max_seq_length, args.max_predictions_per_seq)
  61. tf_record_count = sum(1 for _ in dataset)
  62. assert tf_record_count == len(preprocessed_samples), f"Samples in reference: {tf_record_count} != Preprocessed samples: {len(preprocessed_samples)}"
  63. print(f"Total samples in the part: {tf_record_count}")
  64. for i, (reference_example, preprocessed_sample) in tqdm(enumerate(zip(dataset, preprocessed_samples)), desc="Checking samples", total=len(preprocessed_samples)): # noqa: E501
  65. feature_keys = ["input_ids", "input_mask", "segment_ids", "masked_lm_positions", "masked_lm_ids", "masked_lm_weights", "next_sentence_labels"]
  66. for key in feature_keys:
  67. reference_example_feature = reference_example[key].numpy()
  68. assert (reference_example_feature == preprocessed_sample[key]).all(), \
  69. f"{key} are not equal at index {i}\nReference: {reference_example_feature}\nPreprocessed: {preprocessed_sample[key]}"