Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

coco_evaluation: No predictions from the model! #5142

Closed
aymanaboghonim opened this issue Nov 7, 2023 · 1 comment
Closed

coco_evaluation: No predictions from the model! #5142

aymanaboghonim opened this issue Nov 7, 2023 · 1 comment

Comments

@aymanaboghonim
Copy link

aymanaboghonim commented Nov 7, 2023

If you do not know the root cause of the problem, please post according to this template:

Instructions To Reproduce the Issue:

Check https://stackoverflow.com/help/minimal-reproducible-example for how to ask good questions.
Simplify the steps to reproduce the issue using suggestions from the above link, and provide them below:

  1. Full runnable code or full changes you made:
If making changes to the project itself, please use output of the following command:
git rev-parse HEAD; git diff

# check torch version and cuda availability 
import torch, torchvision
print(torch.__version__, torch.cuda.is_available())

# Some basic setup:
# Setup detectron2 logger
import detectron2
from detectron2.utils.logger import setup_logger
setup_logger()

# import some common libraries
import numpy as np
import cv2
import torch , os

# import some common detectron2 utilities
from detectron2 import model_zoo
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer, ColorMode
from detectron2.data import MetadataCatalog
#coco_metadata = MetadataCatalog.get("coco_2017_val")
from detectron2.engine import DefaultTrainer

# import PointRend project
from detectron2.projects import point_rend

# import other modules 
from detectron2.evaluation import COCOEvaluator, inference_on_dataset
from detectron2.data import build_detection_test_loader
from detectron2.data import MetadataCatalog, DatasetCatalog
from detectron2.engine import HookBase
from detectron2.data import build_detection_train_loader
import detectron2.utils.comm as comm
from detectron2.data.datasets import register_coco_instances

# register dataset
register_coco_instances("buildings_riyadh-images_google_train", {}, "data/train.json", "images/")
register_coco_instances("buildings_riyadh-images_google_val", {}, "data/val.json", "images/")


# get dataset metadata
buildings_metadata = MetadataCatalog.get("buildings_riyadh-images_google_train").thing_classes = ["building"]
print(buildings_metadata)

# intitate config
cfg = get_cfg()

# Add PointRend-specific config
point_rend.add_pointrend_config(cfg)
# Load a config from file
cfg.merge_from_file("pointrend_rcnn_R_50_FPN_3x_coco_config.yaml")
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5  # set threshold for this model
cfg.MODEL.WEIGHTS = "model_0113999.pth"
cfg.DEVICE = "cuda:0"
cfg.SOLVER.BASE_LR =  0.00025 
cfg.DATASETS.TRAIN = ("buildings_riyadh-images_google_train",)
cfg.DATASETS.TEST = ("buildings_riyadh-images_google_val",)
cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS= False # include Negative examples 
cfg.DATALOADER.NUM_WORKERS =12
cfg.SOLVER.IMS_PER_BATCH = 8
cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1
cfg.MODEL.POINT_HEAD.NUM_CLASSES = 1
cfg.SOLVER.CHECKPOINT_PERIOD = 2000
cfg.CUDNN_BENCHMARK = True
cfg.SOLVER.MAX_ITER = 40000000
cfg.OUTPUT_DIR = "google_output_22_5_23"
cfg.SOLVER.CHECKPOINT_PERIOD = 2000
cfg.TEST.EVAL_PERIOD = 2000 # run evaluation every 2000 iterations 





# create val class
class ValidationLoss(HookBase):
    def __init__(self, cfg):
        super().__init__()
        self.cfg = cfg.clone()
        self.cfg.DATASETS.TRAIN = cfg.DATASETS.TEST
        self._loader = iter(build_detection_train_loader(self.cfg))
        
    def after_step(self):
        data = next(self._loader)
        with torch.no_grad():
            loss_dict = self.trainer.model(data)
            
            losses = sum(loss_dict.values())
            assert torch.isfinite(losses).all(), loss_dict

            loss_dict_reduced = {"val_" + k: v.item() for k, v in 
                                 comm.reduce_dict(loss_dict).items()}
            losses_reduced = sum(loss for loss in loss_dict_reduced.values())
            if comm.is_main_process():
                self.trainer.storage.put_scalars(total_val_loss=losses_reduced, 
                                                 **loss_dict_reduced)
val_loss = ValidationLoss(cfg)  

# create trainner class
class MyTrainer(DefaultTrainer):
    @classmethod
    def build_evaluator(cls, cfg, dataset_name, output_folder=None):
        if output_folder is None:
            output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
        return COCOEvaluator(dataset_name, cfg, True, output_folder)


#print(cfg)
os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
trainer = MyTrainer(cfg) 
trainer.register_hooks([val_loss])
trainer._hooks = trainer._hooks[:-2] + trainer._hooks[-2:][::-1]
trainer.resume_or_load(resume=False)
trainer.train()


  1. What exact command you run: python the_above_code.py
  2. Full logs or other relevant observations:

[11/07 04:37:43 d2.data.datasets.coco]: Loaded 3277 images in COCO format from data/val.json
[11/07 04:37:43 d2.data.dataset_mapper]: [DatasetMapper] Augmentations used in inference: [ResizeShortestEdge(short_edge_length=(800, 800), max_size=1333, sample_style='choice')]
[11/07 04:37:43 d2.data.common]: Serializing the dataset using: <class 'detectron2.data.common._TorchSerializedList'>
[11/07 04:37:43 d2.data.common]: Serializing 3277 elements to byte tensors and concatenating them all ...
[11/07 04:37:43 d2.data.common]: Serialized dataset takes 7.75 MiB
[11/07 04:37:43 d2.evaluation.coco_evaluation]: Fast COCO eval is not built. Falling back to official COCO eval.
WARNING [11/07 04:37:43 d2.evaluation.coco_evaluation]: COCO Evaluator instantiated using config, this is deprecated behavior. Please pass in explicit arguments instead.
[11/07 04:37:44 d2.evaluation.evaluator]: Start inference on 3277 batches
[11/07 04:37:45 d2.evaluation.evaluator]: Inference done 11/3277. Dataloading: 0.0010 s/iter. Inference: 0.0805 s/iter. Eval: 0.0001 s/iter. Total: 0.0816 s/iter. ETA=0:04:26
[11/07 04:37:50 d2.evaluation.evaluator]: Inference done 72/3277. Dataloading: 0.0011 s/iter. Inference: 0.0819 s/iter. Eval: 0.0001 s/iter. Total: 0.0831 s/iter. ETA=0:04:26
[11/07 04:37:55 d2.evaluation.evaluator]: Inference done 132/3277. Dataloading: 0.0011 s/iter. Inference: 0.0820 s/iter. Eval: 0.0001 s/iter. Total: 0.0833 s/iter. ETA=0:04:21
[11/07 04:38:00 d2.evaluation.evaluator]: Inference done 192/3277. Dataloading: 0.0011 s/iter. Inference: 0.0821 s/iter. Eval: 0.0001 s/iter. Total: 0.0833 s/iter. ETA=0:04:17
[11/07 04:38:05 d2.evaluation.evaluator]: Inference done 252/3277. Dataloading: 0.0012 s/iter. Inference: 0.0821 s/iter. Eval: 0.0001 s/iter. Total: 0.0834 s/iter. ETA=0:04:12
[11/07 04:38:10 d2.evaluation.evaluator]: Inference done 312/3277. Dataloading: 0.0012 s/iter. Inference: 0.0822 s/iter. Eval: 0.0001 s/iter. Total: 0.0835 s/iter. ETA=0:04:07
[11/07 04:38:15 d2.evaluation.evaluator]: Inference done 372/3277. Dataloading: 0.0012 s/iter. Inference: 0.0822 s/iter. Eval: 0.0001 s/iter. Total: 0.0835 s/iter. ETA=0:04:02
[11/07 04:38:20 d2.evaluation.evaluator]: Inference done 432/3277. Dataloading: 0.0012 s/iter. Inference: 0.0823 s/iter. Eval: 0.0001 s/iter. Total: 0.0836 s/iter. ETA=0:03:57
[11/07 04:38:25 d2.evaluation.evaluator]: Inference done 492/3277. Dataloading: 0.0012 s/iter. Inference: 0.0823 s/iter. Eval: 0.0001 s/iter. Total: 0.0836 s/iter. ETA=0:03:52
[11/07 04:38:30 d2.evaluation.evaluator]: Inference done 552/3277. Dataloading: 0.0012 s/iter. Inference: 0.0823 s/iter. Eval: 0.0001 s/iter. Total: 0.0836 s/iter. ETA=0:03:47
[11/07 04:38:35 d2.evaluation.evaluator]: Inference done 612/3277. Dataloading: 0.0012 s/iter. Inference: 0.0823 s/iter. Eval: 0.0001 s/iter. Total: 0.0837 s/iter. ETA=0:03:42
[11/07 04:38:41 d2.evaluation.evaluator]: Inference done 672/3277. Dataloading: 0.0012 s/iter. Inference: 0.0823 s/iter. Eval: 0.0001 s/iter. Total: 0.0837 s/iter. ETA=0:03:37
[11/07 04:38:46 d2.evaluation.evaluator]: Inference done 732/3277. Dataloading: 0.0012 s/iter. Inference: 0.0823 s/iter. Eval: 0.0001 s/iter. Total: 0.0837 s/iter. ETA=0:03:32
[11/07 04:38:51 d2.evaluation.evaluator]: Inference done 792/3277. Dataloading: 0.0012 s/iter. Inference: 0.0823 s/iter. Eval: 0.0001 s/iter. Total: 0.0837 s/iter. ETA=0:03:27
[11/07 04:38:56 d2.evaluation.evaluator]: Inference done 852/3277. Dataloading: 0.0012 s/iter. Inference: 0.0824 s/iter. Eval: 0.0001 s/iter. Total: 0.0837 s/iter. ETA=0:03:22
[11/07 04:39:01 d2.evaluation.evaluator]: Inference done 912/3277. Dataloading: 0.0012 s/iter. Inference: 0.0824 s/iter. Eval: 0.0001 s/iter. Total: 0.0837 s/iter. ETA=0:03:17
[11/07 04:39:06 d2.evaluation.evaluator]: Inference done 972/3277. Dataloading: 0.0012 s/iter. Inference: 0.0824 s/iter. Eval: 0.0001 s/iter. Total: 0.0837 s/iter. ETA=0:03:12
[11/07 04:39:11 d2.evaluation.evaluator]: Inference done 1032/3277. Dataloading: 0.0012 s/iter. Inference: 0.0824 s/iter. Eval: 0.0001 s/iter. Total: 0.0837 s/iter. ETA=0:03:07
[11/07 04:39:16 d2.evaluation.evaluator]: Inference done 1092/3277. Dataloading: 0.0012 s/iter. Inference: 0.0824 s/iter. Eval: 0.0001 s/iter. Total: 0.0837 s/iter. ETA=0:03:02
[11/07 04:39:21 d2.evaluation.evaluator]: Inference done 1152/3277. Dataloading: 0.0012 s/iter. Inference: 0.0824 s/iter. Eval: 0.0001 s/iter. Total: 0.0837 s/iter. ETA=0:02:57
[11/07 04:39:26 d2.evaluation.evaluator]: Inference done 1212/3277. Dataloading: 0.0012 s/iter. Inference: 0.0824 s/iter. Eval: 0.0001 s/iter. Total: 0.0838 s/iter. ETA=0:02:52
[11/07 04:39:31 d2.evaluation.evaluator]: Inference done 1272/3277. Dataloading: 0.0012 s/iter. Inference: 0.0824 s/iter. Eval: 0.0001 s/iter. Total: 0.0838 s/iter. ETA=0:02:47
[11/07 04:39:36 d2.evaluation.evaluator]: Inference done 1332/3277. Dataloading: 0.0012 s/iter. Inference: 0.0824 s/iter. Eval: 0.0001 s/iter. Total: 0.0837 s/iter. ETA=0:02:42
[11/07 04:39:41 d2.evaluation.evaluator]: Inference done 1392/3277. Dataloading: 0.0012 s/iter. Inference: 0.0824 s/iter. Eval: 0.0001 s/iter. Total: 0.0838 s/iter. ETA=0:02:37
[11/07 04:39:46 d2.evaluation.evaluator]: Inference done 1452/3277. Dataloading: 0.0012 s/iter. Inference: 0.0824 s/iter. Eval: 0.0001 s/iter. Total: 0.0837 s/iter. ETA=0:02:32
[11/07 04:39:51 d2.evaluation.evaluator]: Inference done 1512/3277. Dataloading: 0.0012 s/iter. Inference: 0.0824 s/iter. Eval: 0.0001 s/iter. Total: 0.0837 s/iter. ETA=0:02:27
[11/07 04:39:56 d2.evaluation.evaluator]: Inference done 1572/3277. Dataloading: 0.0012 s/iter. Inference: 0.0824 s/iter. Eval: 0.0001 s/iter. Total: 0.0837 s/iter. ETA=0:02:22
[11/07 04:40:01 d2.evaluation.evaluator]: Inference done 1632/3277. Dataloading: 0.0012 s/iter. Inference: 0.0824 s/iter. Eval: 0.0001 s/iter. Total: 0.0837 s/iter. ETA=0:02:17
[11/07 04:40:06 d2.evaluation.evaluator]: Inference done 1692/3277. Dataloading: 0.0012 s/iter. Inference: 0.0824 s/iter. Eval: 0.0001 s/iter. Total: 0.0837 s/iter. ETA=0:02:12
[11/07 04:40:11 d2.evaluation.evaluator]: Inference done 1752/3277. Dataloading: 0.0012 s/iter. Inference: 0.0823 s/iter. Eval: 0.0001 s/iter. Total: 0.0837 s/iter. ETA=0:02:07
[11/07 04:40:16 d2.evaluation.evaluator]: Inference done 1812/3277. Dataloading: 0.0012 s/iter. Inference: 0.0823 s/iter. Eval: 0.0001 s/iter. Total: 0.0837 s/iter. ETA=0:02:02
[11/07 04:40:21 d2.evaluation.evaluator]: Inference done 1873/3277. Dataloading: 0.0012 s/iter. Inference: 0.0823 s/iter. Eval: 0.0001 s/iter. Total: 0.0837 s/iter. ETA=0:01:57
[11/07 04:40:26 d2.evaluation.evaluator]: Inference done 1933/3277. Dataloading: 0.0012 s/iter. Inference: 0.0823 s/iter. Eval: 0.0001 s/iter. Total: 0.0837 s/iter. ETA=0:01:52
[11/07 04:40:31 d2.evaluation.evaluator]: Inference done 1993/3277. Dataloading: 0.0012 s/iter. Inference: 0.0823 s/iter. Eval: 0.0001 s/iter. Total: 0.0837 s/iter. ETA=0:01:47
[11/07 04:40:36 d2.evaluation.evaluator]: Inference done 2053/3277. Dataloading: 0.0012 s/iter. Inference: 0.0823 s/iter. Eval: 0.0001 s/iter. Total: 0.0837 s/iter. ETA=0:01:42
[11/07 04:40:41 d2.evaluation.evaluator]: Inference done 2113/3277. Dataloading: 0.0012 s/iter. Inference: 0.0823 s/iter. Eval: 0.0001 s/iter. Total: 0.0837 s/iter. ETA=0:01:37
[11/07 04:40:46 d2.evaluation.evaluator]: Inference done 2173/3277. Dataloading: 0.0012 s/iter. Inference: 0.0823 s/iter. Eval: 0.0001 s/iter. Total: 0.0837 s/iter. ETA=0:01:32
[11/07 04:40:51 d2.evaluation.evaluator]: Inference done 2233/3277. Dataloading: 0.0012 s/iter. Inference: 0.0823 s/iter. Eval: 0.0001 s/iter. Total: 0.0837 s/iter. ETA=0:01:27
[11/07 04:40:56 d2.evaluation.evaluator]: Inference done 2293/3277. Dataloading: 0.0012 s/iter. Inference: 0.0823 s/iter. Eval: 0.0001 s/iter. Total: 0.0837 s/iter. ETA=0:01:22
[11/07 04:41:01 d2.evaluation.evaluator]: Inference done 2353/3277. Dataloading: 0.0012 s/iter. Inference: 0.0824 s/iter. Eval: 0.0001 s/iter. Total: 0.0837 s/iter. ETA=0:01:17
[11/07 04:41:06 d2.evaluation.evaluator]: Inference done 2413/3277. Dataloading: 0.0012 s/iter. Inference: 0.0824 s/iter. Eval: 0.0001 s/iter. Total: 0.0837 s/iter. ETA=0:01:12
[11/07 04:41:11 d2.evaluation.evaluator]: Inference done 2473/3277. Dataloading: 0.0013 s/iter. Inference: 0.0824 s/iter. Eval: 0.0001 s/iter. Total: 0.0837 s/iter. ETA=0:01:07
[11/07 04:41:16 d2.evaluation.evaluator]: Inference done 2533/3277. Dataloading: 0.0013 s/iter. Inference: 0.0824 s/iter. Eval: 0.0001 s/iter. Total: 0.0838 s/iter. ETA=0:01:02
[11/07 04:41:22 d2.evaluation.evaluator]: Inference done 2593/3277. Dataloading: 0.0013 s/iter. Inference: 0.0824 s/iter. Eval: 0.0001 s/iter. Total: 0.0838 s/iter. ETA=0:00:57
[11/07 04:41:27 d2.evaluation.evaluator]: Inference done 2653/3277. Dataloading: 0.0013 s/iter. Inference: 0.0824 s/iter. Eval: 0.0001 s/iter. Total: 0.0838 s/iter. ETA=0:00:52
[11/07 04:41:32 d2.evaluation.evaluator]: Inference done 2713/3277. Dataloading: 0.0013 s/iter. Inference: 0.0824 s/iter. Eval: 0.0001 s/iter. Total: 0.0838 s/iter. ETA=0:00:47
[11/07 04:41:37 d2.evaluation.evaluator]: Inference done 2773/3277. Dataloading: 0.0013 s/iter. Inference: 0.0824 s/iter. Eval: 0.0001 s/iter. Total: 0.0838 s/iter. ETA=0:00:42
[11/07 04:41:42 d2.evaluation.evaluator]: Inference done 2833/3277. Dataloading: 0.0013 s/iter. Inference: 0.0824 s/iter. Eval: 0.0001 s/iter. Total: 0.0838 s/iter. ETA=0:00:37
[11/07 04:41:47 d2.evaluation.evaluator]: Inference done 2893/3277. Dataloading: 0.0013 s/iter. Inference: 0.0824 s/iter. Eval: 0.0001 s/iter. Total: 0.0838 s/iter. ETA=0:00:32
[11/07 04:41:52 d2.evaluation.evaluator]: Inference done 2953/3277. Dataloading: 0.0013 s/iter. Inference: 0.0824 s/iter. Eval: 0.0001 s/iter. Total: 0.0838 s/iter. ETA=0:00:27
[11/07 04:41:57 d2.evaluation.evaluator]: Inference done 3013/3277. Dataloading: 0.0013 s/iter. Inference: 0.0824 s/iter. Eval: 0.0001 s/iter. Total: 0.0837 s/iter. ETA=0:00:22
[11/07 04:42:02 d2.evaluation.evaluator]: Inference done 3074/3277. Dataloading: 0.0013 s/iter. Inference: 0.0824 s/iter. Eval: 0.0001 s/iter. Total: 0.0837 s/iter. ETA=0:00:16
[11/07 04:42:07 d2.evaluation.evaluator]: Inference done 3135/3277. Dataloading: 0.0013 s/iter. Inference: 0.0823 s/iter. Eval: 0.0001 s/iter. Total: 0.0837 s/iter. ETA=0:00:11
[11/07 04:42:12 d2.evaluation.evaluator]: Inference done 3196/3277. Dataloading: 0.0013 s/iter. Inference: 0.0823 s/iter. Eval: 0.0001 s/iter. Total: 0.0837 s/iter. ETA=0:00:06
[11/07 04:42:17 d2.evaluation.evaluator]: Inference done 3256/3277. Dataloading: 0.0013 s/iter. Inference: 0.0823 s/iter. Eval: 0.0001 s/iter. Total: 0.0837 s/iter. ETA=0:00:01
[11/07 04:42:19 d2.evaluation.evaluator]: Total inference time: 0:04:33.946270 (0.083724 s / iter per device, on 1 devices)
[11/07 04:42:19 d2.evaluation.evaluator]: Total inference pure compute time: 0:04:29 (0.082327 s / iter per device, on 1 devices)
[11/07 04:42:19 d2.evaluation.coco_evaluation]: Preparing results for COCO format ...
[11/07 04:42:19 d2.evaluation.coco_evaluation]: Saving results to google_output_22_5_23/inference/coco_instances_results.json
[11/07 04:42:19 d2.evaluation.coco_evaluation]: Evaluating predictions with official COCO API...
WARNING [11/07 04:42:19 d2.evaluation.coco_evaluation]: No predictions from the model!
[11/07 04:42:19 d2.engine.defaults]: Evaluation results for buildings_riyadh-images_google_val in csv format:
[11/07 04:42:19 d2.evaluation.testing]: copypaste: Task: bbox
[11/07 04:42:19 d2.evaluation.testing]: copypaste: AP,AP50,AP75,APs,APm,APl
[11/07 04:42:19 d2.evaluation.testing]: copypaste: nan,nan,nan,nan,nan,nan```

## Expected behavior:

If there are no obvious crash in "full logs" provided above,
please tell us the expected behavior.
I've previously used this code to evaluate my model during training,
 and it should display metrics for both bounding boxes and segmentation masks.
 However, it's not showing any metrics because it's not producing any predictions.
 Despite this, the training and validation losses are decreasing as expected. 
Since I'm fine-tuning a custom model that was trained on a very similar dataset, 
it should work even if it hasn't been trained extensively. 
This suggests that there isn't a training issue here.
If you expect a model to converge / work better, we do not help with such issues, unless
a model fails to reproduce the results in detectron2 model zoo, or proves existence of bugs.

## Environment:

Paste the output of the following command:

cuobjdump info : File '/home/jupyter/detectron2/point_rend/detectron2-main/detectron2/_C.cpython-38-x86_64-linux-gnu.so' does not contain device code


sys.platform linux
Python 3.8.18 | packaged by conda-forge | (default, Oct 10 2023, 15:44:36) [GCC 12.3.0]
numpy 1.24.3
detectron2 0.6 @/home/jupyter/detectron2/point_rend/detectron2-main/detectron2
detectron2._C not built correctly: /home/jupyter/detectron2/point_rend/detectron2-main/detectron2/_C.cpython-38-x86_64-linux-gnu.so: undefined symbol: _ZNK2at6Tensor7reshapeEN3c108ArrayRefIlEE
Compiler ($CXX) c++ (Debian 8.3.0-6) 8.3.0
CUDA compiler Build cuda_11.3.r11.3/compiler.29920130_0
detectron2 arch flags /home/jupyter/detectron2/point_rend/detectron2-main/detectron2/_C.cpython-38-x86_64-linux-gnu.so
DETECTRON2_ENV_MODULE
PyTorch 1.13.1+cu116 @/opt/conda/envs/detectron2/lib/python3.8/site-packages/torch
PyTorch debug build False
torch._C._GLIBCXX_USE_CXX11_ABI False
GPU available Yes
GPU 0 Tesla T4 (arch=7.5)
Driver version 510.47.03
CUDA_HOME /usr/local/cuda
Pillow 9.5.0
torchvision 0.14.1+cu116 @/opt/conda/envs/detectron2/lib/python3.8/site-packages/torchvision
torchvision arch flags 3.5, 5.0, 6.0, 7.0, 7.5, 8.0, 8.6
fvcore 0.1.5.post20221221
iopath 0.1.9
cv2 4.8.1


PyTorch built with:

  • GCC 9.3
  • C++ Version: 201402
  • Intel(R) Math Kernel Library Version 2020.0.0 Product Build 20191122 for Intel(R) 64 architecture applications
  • Intel(R) MKL-DNN v2.6.0 (Git Hash 52b5f107dd9cf10910aaa19cb47f3abf9b349815)
  • OpenMP 201511 (a.k.a. OpenMP 4.5)
  • LAPACK is enabled (usually provided by MKL)
  • NNPACK is enabled
  • CPU capability usage: AVX2
  • CUDA Runtime 11.6
  • NVCC architecture flags: -gencode;arch=compute_37,code=sm_37;-gencode;arch=compute_50,code=sm_50;-gencode;arch=compute_60,code=sm_60;-gencode;arch=compute_70,code=sm_70;-gencode;arch=compute_75,code=sm_75;-gencode;arch=compute_80,code=sm_80;-gencode;arch=compute_86,code=sm_86
  • CuDNN 8.2 (built against CUDA 11.3)
    • Built with CuDNN 8.3.2
  • Magma 2.6.1
  • Build settings: BLAS_INFO=mkl, BUILD_TYPE=Release, CUDA_VERSION=11.6, CUDNN_VERSION=8.3.2, CXX_COMPILER=/opt/rh/devtoolset-9/root/usr/bin/c++, CXX_FLAGS= -fabi-version=11 -Wno-deprecated -fvisibility-inlines-hidden -DUSE_PTHREADPOOL -fopenmp -DNDEBUG -DUSE_KINETO -DUSE_FBGEMM -DUSE_QNNPACK -DUSE_PYTORCH_QNNPACK -DUSE_XNNPACK -DSYMBOLICATE_MOBILE_DEBUG_HANDLE -DEDGE_PROFILER_USE_KINETO -O2 -fPIC -Wno-narrowing -Wall -Wextra -Werror=return-type -Werror=non-virtual-dtor -Wno-missing-field-initializers -Wno-type-limits -Wno-array-bounds -Wno-unknown-pragmas -Wunused-local-typedefs -Wno-unused-parameter -Wno-unused-function -Wno-unused-result -Wno-strict-overflow -Wno-strict-aliasing -Wno-error=deprecated-declarations -Wno-stringop-overflow -Wno-psabi -Wno-error=pedantic -Wno-error=redundant-decls -Wno-error=old-style-cast -fdiagnostics-color=always -faligned-new -Wno-unused-but-set-variable -Wno-maybe-uninitialized -fno-math-errno -fno-trapping-math -Werror=format -Werror=cast-function-type -Wno-stringop-overflow, LAPACK_INFO=mkl, PERF_WITH_AVX=1, PERF_WITH_AVX2=1, PERF_WITH_AVX512=1, TORCH_VERSION=1.13.1, USE_CUDA=ON, USE_CUDNN=ON, USE_EXCEPTION_PTR=1, USE_GFLAGS=OFF, USE_GLOG=OFF, USE_MKL=ON, USE_MKLDNN=ON, USE_MPI=OFF, USE_NCCL=ON, USE_NNPACK=ON, USE_OPENMP=ON, USE_ROCM=OFF,

If your issue looks like an installation issue / environment issue,
please first check common issues in https://detectron2.readthedocs.io/tutorials/install.html#common-installation-issues
 
@aymanaboghonim aymanaboghonim changed the title Please read & provide the following coco_evaluation: No predictions from the model! Nov 7, 2023
@wangzitao777
Copy link

@aymanaboghonim Hello, I've got the same problem, how do you solve it?

@github-actions github-actions bot locked as resolved and limited conversation to collaborators Sep 18, 2024
Sign up for free to subscribe to this conversation on GitHub. Already have an account? Sign in.
Labels
None yet
Projects
None yet
Development

No branches or pull requests

2 participants