git clone https://github.com/tensorflow/tpu.git
sudo apt-get install -y python-tk
pip install tensorflow-gpu==1.15
pip install --user Cython matplotlib opencv-python-headless pyyaml Pillow
pip install 'git+https://github.com/cocodataset/cocoapi#egg=pycocotools&subdirectory=PythonAPI'
Download any model https://github.com/tensorflow/tpu/blob/master/models/official/detection/MODEL_ZOO.md
Perform inference
  1:person
  2:bicycle
  3:car
category_id: In the form of category If you want to change the class, create a csv file according to the above format
  python ~/tpu/models/official/detection/inference.py \
    --model="retinanet" \
    --image_size=640\
    --checkpoint_path="./detection_retinanet_50/model.  ckpt" \
    --label_map_file="./retinanet/tpu/models/official/  detection/datasets/coco_label_map.csv" \
    --image_file_pattern="path/to/input/file" \
    --output_html="path/to/output/file" \
    --max_boxes_to_draw=10 \
    --min_score_threshold=0.05
  #!/bin/bash
  TRAIN_IMAGE_DIR="path/to/train/images/dir"
  TRAIN_OBJ_ANNOTATIONS_FILE="path/to/train/file"
  OUTPUT_DIR="path/to/output/dir"
  VAL_IMAGE_DIR="path/to/test/images/dir"
  VAL_OBJ_ANNOTATIONS_FILE="path/to/test/images/dir"
  function create_train_dataset(){
    python3 create_coco_tf_record.py \
      --logtostderr \
      --include_masks \
      --image_dir="${TRAIN_IMAGE_DIR}" \
      --object_annotations_file="$  {TRAIN_OBJ_ANNOTATIONS_FILE}" \
      --output_file_prefix="${OUTPUT_DIR}/train" \
      --num_shards=256
  }
  function create_val_dataset() {
    SCRIPT_DIR=$(dirname "$(readlink -f "$0")")
    PYTHONPATH="tf-models:tf-models/research"
    python3 $SCRIPT_DIR/create_coco_tf_record.py \
      --logtostderr \
      --include_masks \
      --image_dir="${VAL_IMAGE_DIR}" \
      --object_annotations_file="$  {VAL_OBJ_ANNOTATIONS_FILE}" \
      --output_file_prefix="${OUTPUT_DIR}/val" \
      --num_shards=32
  }
  create_train_dataset
  create_val_dataset
 3. Perform learning
  MODEL_DIR="<path to the directory to store model files>"
  TRAIN_FILE_PATTERN="<path to the TFRecord training data>"
  EVAL_FILE_PATTERN="<path to the TFRecord validation data>"
  VAL_JSON_FILE="<path to the validation annotation JSON file>"
  RESNET_CHECKPOINT="<path to trained model>"
  python ~/tpu/models/official/detection/main.py \
    --model="retinanet" \
    --model_dir="${MODEL_DIR?}" \
    --mode=train \
    --eval_after_training=True \
    --use_tpu=False \
    --params_override="{train: { checkpoint: { path: ${RESNET_CHECKPOINT?}, prefix: resnet50/ }, train_file_pattern: ${TRAIN_FILE_PATTERN?} }, eval: { val_json_file: ${VAL_JSON_FILE?}, eval_file_pattern: ${EVAL_FILE_PATTERN?} }}"
  INFO:tensorflow:examples/sec: 0.622754
  INFO:tensorflow:global_step/sec: 0.078258
  python ~/tpu/models/official/detection/inference.py \
      --model="retinanet" \
      --image_size=640\
      --checkpoint_path="path/to/input" \
      --label_map_file="path/to/label" \
      --image_file_pattern="path/to/input/file" \
      --output_html="path/to/output/file" \
      --max_boxes_to_draw=10 \
      --min_score_threshold=0.05
--Model before learning with original data (Inference results will be uploaded)  --Model after learning (Inference results will be uploaded)
  python ${RETINA_ROOT}/evaluate_model.py\
    --model="retinanet"\
    --checkpoint_path="path/to/imput/file"\
    --config_file="${CONFIG_PATH}"\
    --params_override="${PARAMS_PATH}"\ 
    --dump_predictions_only = True\
    --predictions_path="path/to/output/file"
 
Since it's a big deal, I compared it with other models built in the past. All were batch size 8 and were trained and evaluated using the original dataset.
Compare the average time spent on 100 iterations.
| time | |
|---|---|
| retinanet | About 21[min] | 
| ttfnet | About 228[min] | 
Compare the accuracy from the AP and inference results at 2000 iterations.
| mAP | |
|---|---|
| retinanet | 96.35 | 
| ttfnet | 79.78 | 
Recommended Posts