田运杰 10 ヶ月 前
コミット
c4a0a465fc
9 ファイル変更1588 行追加0 行削除
  1. 22 0
      tests/__init__.py
  2. 83 0
      tests/conftest.py
  3. 122 0
      tests/test_cli.py
  4. 155 0
      tests/test_cuda.py
  5. 131 0
      tests/test_engine.py
  6. 216 0
      tests/test_exports.py
  7. 150 0
      tests/test_integrations.py
  8. 615 0
      tests/test_python.py
  9. 94 0
      tests/test_solutions.py

+ 22 - 0
tests/__init__.py

@@ -0,0 +1,22 @@
+# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
+
+from ultralytics.utils import ASSETS, ROOT, WEIGHTS_DIR, checks
+
+# Constants used in tests
+MODEL = WEIGHTS_DIR / "path with spaces" / "yolo11n.pt"  # test spaces in path
+CFG = "yolo11n.yaml"
+SOURCE = ASSETS / "bus.jpg"
+SOURCES_LIST = [ASSETS / "bus.jpg", ASSETS, ASSETS / "*", ASSETS / "**/*.jpg"]
+TMP = (ROOT / "../tests/tmp").resolve()  # temp directory for test files
+CUDA_IS_AVAILABLE = checks.cuda_is_available()
+CUDA_DEVICE_COUNT = checks.cuda_device_count()
+
+__all__ = (
+    "MODEL",
+    "CFG",
+    "SOURCE",
+    "SOURCES_LIST",
+    "TMP",
+    "CUDA_IS_AVAILABLE",
+    "CUDA_DEVICE_COUNT",
+)

+ 83 - 0
tests/conftest.py

@@ -0,0 +1,83 @@
+# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
+
+import shutil
+from pathlib import Path
+
+from tests import TMP
+
+
+def pytest_addoption(parser):
+    """
+    Add custom command-line options to pytest.
+
+    Args:
+        parser (pytest.config.Parser): The pytest parser object for adding custom command-line options.
+
+    Returns:
+        (None)
+    """
+    parser.addoption("--slow", action="store_true", default=False, help="Run slow tests")
+
+
+def pytest_collection_modifyitems(config, items):
+    """
+    Modify the list of test items to exclude tests marked as slow if the --slow option is not specified.
+
+    Args:
+        config (pytest.config.Config): The pytest configuration object that provides access to command-line options.
+        items (list): The list of collected pytest item objects to be modified based on the presence of --slow option.
+
+    Returns:
+        (None) The function modifies the 'items' list in place, and does not return a value.
+    """
+    if not config.getoption("--slow"):
+        # Remove the item entirely from the list of test items if it's marked as 'slow'
+        items[:] = [item for item in items if "slow" not in item.keywords]
+
+
+def pytest_sessionstart(session):
+    """
+    Initialize session configurations for pytest.
+
+    This function is automatically called by pytest after the 'Session' object has been created but before performing
+    test collection. It sets the initial seeds and prepares the temporary directory for the test session.
+
+    Args:
+        session (pytest.Session): The pytest session object.
+
+    Returns:
+        (None)
+    """
+    from ultralytics.utils.torch_utils import init_seeds
+
+    init_seeds()
+    shutil.rmtree(TMP, ignore_errors=True)  # delete any existing tests/tmp directory
+    TMP.mkdir(parents=True, exist_ok=True)  # create a new empty directory
+
+
+def pytest_terminal_summary(terminalreporter, exitstatus, config):
+    """
+    Cleanup operations after pytest session.
+
+    This function is automatically called by pytest at the end of the entire test session. It removes certain files
+    and directories used during testing.
+
+    Args:
+        terminalreporter (pytest.terminal.TerminalReporter): The terminal reporter object used for terminal output.
+        exitstatus (int): The exit status of the test run.
+        config (pytest.config.Config): The pytest config object.
+
+    Returns:
+        (None)
+    """
+    from ultralytics.utils import WEIGHTS_DIR
+
+    # Remove files
+    models = [path for x in ["*.onnx", "*.torchscript"] for path in WEIGHTS_DIR.rglob(x)]
+    for file in ["decelera_portrait_min.mov", "bus.jpg", "yolo11n.onnx", "yolo11n.torchscript"] + models:
+        Path(file).unlink(missing_ok=True)
+
+    # Remove directories
+    models = [path for x in ["*.mlpackage", "*_openvino_model"] for path in WEIGHTS_DIR.rglob(x)]
+    for directory in [WEIGHTS_DIR / "path with spaces", TMP.parents[1] / ".pytest_cache", TMP] + models:
+        shutil.rmtree(directory, ignore_errors=True)

+ 122 - 0
tests/test_cli.py

@@ -0,0 +1,122 @@
+# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
+
+import subprocess
+
+import pytest
+from PIL import Image
+
+from tests import CUDA_DEVICE_COUNT, CUDA_IS_AVAILABLE
+from ultralytics.cfg import TASK2DATA, TASK2MODEL, TASKS
+from ultralytics.utils import ASSETS, WEIGHTS_DIR, checks
+from ultralytics.utils.torch_utils import TORCH_1_9
+
+# Constants
+TASK_MODEL_DATA = [(task, WEIGHTS_DIR / TASK2MODEL[task], TASK2DATA[task]) for task in TASKS]
+MODELS = [WEIGHTS_DIR / TASK2MODEL[task] for task in TASKS]
+
+
+def run(cmd):
+    """Execute a shell command using subprocess."""
+    subprocess.run(cmd.split(), check=True)
+
+
+def test_special_modes():
+    """Test various special command-line modes for YOLO functionality."""
+    run("yolo help")
+    run("yolo checks")
+    run("yolo version")
+    run("yolo settings reset")
+    run("yolo cfg")
+
+
+@pytest.mark.parametrize("task,model,data", TASK_MODEL_DATA)
+def test_train(task, model, data):
+    """Test YOLO training for different tasks, models, and datasets."""
+    run(f"yolo train {task} model={model} data={data} imgsz=32 epochs=1 cache=disk")
+
+
+@pytest.mark.parametrize("task,model,data", TASK_MODEL_DATA)
+def test_val(task, model, data):
+    """Test YOLO validation process for specified task, model, and data using a shell command."""
+    run(f"yolo val {task} model={model} data={data} imgsz=32 save_txt save_json")
+
+
+@pytest.mark.parametrize("task,model,data", TASK_MODEL_DATA)
+def test_predict(task, model, data):
+    """Test YOLO prediction on provided sample assets for specified task and model."""
+    run(f"yolo predict model={model} source={ASSETS} imgsz=32 save save_crop save_txt")
+
+
+@pytest.mark.parametrize("model", MODELS)
+def test_export(model):
+    """Test exporting a YOLO model to TorchScript format."""
+    run(f"yolo export model={model} format=torchscript imgsz=32")
+
+
+def test_rtdetr(task="detect", model="yolov8n-rtdetr.yaml", data="coco8.yaml"):
+    """Test the RTDETR functionality within Ultralytics for detection tasks using specified model and data."""
+    # Warning: must use imgsz=640 (note also add coma, spaces, fraction=0.25 args to test single-image training)
+    run(f"yolo train {task} model={model} data={data} --imgsz= 160 epochs =1, cache = disk fraction=0.25")
+    run(f"yolo predict {task} model={model} source={ASSETS / 'bus.jpg'} imgsz=160 save save_crop save_txt")
+    if TORCH_1_9:
+        weights = WEIGHTS_DIR / "rtdetr-l.pt"
+        run(f"yolo predict {task} model={weights} source={ASSETS / 'bus.jpg'} imgsz=160 save save_crop save_txt")
+
+
+@pytest.mark.skipif(checks.IS_PYTHON_3_12, reason="MobileSAM with CLIP is not supported in Python 3.12")
+def test_fastsam(task="segment", model=WEIGHTS_DIR / "FastSAM-s.pt", data="coco8-seg.yaml"):
+    """Test FastSAM model for segmenting objects in images using various prompts within Ultralytics."""
+    source = ASSETS / "bus.jpg"
+
+    run(f"yolo segment val {task} model={model} data={data} imgsz=32")
+    run(f"yolo segment predict model={model} source={source} imgsz=32 save save_crop save_txt")
+
+    from ultralytics import FastSAM
+    from ultralytics.models.sam import Predictor
+
+    # Create a FastSAM model
+    sam_model = FastSAM(model)  # or FastSAM-x.pt
+
+    # Run inference on an image
+    for s in (source, Image.open(source)):
+        everything_results = sam_model(s, device="cpu", retina_masks=True, imgsz=320, conf=0.4, iou=0.9)
+
+        # Remove small regions
+        new_masks, _ = Predictor.remove_small_regions(everything_results[0].masks.data, min_area=20)
+
+        # Run inference with bboxes and points and texts prompt at the same time
+        sam_model(source, bboxes=[439, 437, 524, 709], points=[[200, 200]], labels=[1], texts="a photo of a dog")
+
+
+def test_mobilesam():
+    """Test MobileSAM segmentation with point prompts using Ultralytics."""
+    from ultralytics import SAM
+
+    # Load the model
+    model = SAM(WEIGHTS_DIR / "mobile_sam.pt")
+
+    # Source
+    source = ASSETS / "zidane.jpg"
+
+    # Predict a segment based on a 1D point prompt and 1D labels.
+    model.predict(source, points=[900, 370], labels=[1])
+
+    # Predict a segment based on 3D points and 2D labels (multiple points per object).
+    model.predict(source, points=[[[900, 370], [1000, 100]]], labels=[[1, 1]])
+
+    # Predict a segment based on a box prompt
+    model.predict(source, bboxes=[439, 437, 524, 709], save=True)
+
+    # Predict all
+    # model(source)
+
+
+# Slow Tests -----------------------------------------------------------------------------------------------------------
+@pytest.mark.slow
+@pytest.mark.parametrize("task,model,data", TASK_MODEL_DATA)
+@pytest.mark.skipif(not CUDA_IS_AVAILABLE, reason="CUDA is not available")
+@pytest.mark.skipif(CUDA_DEVICE_COUNT < 2, reason="DDP is not available")
+def test_train_gpu(task, model, data):
+    """Test YOLO training on GPU(s) for various tasks and models."""
+    run(f"yolo train {task} model={model} data={data} imgsz=32 epochs=1 device=0")  # single GPU
+    run(f"yolo train {task} model={model} data={data} imgsz=32 epochs=1 device=0,1")  # multi GPU

+ 155 - 0
tests/test_cuda.py

@@ -0,0 +1,155 @@
+# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
+
+from itertools import product
+from pathlib import Path
+
+import pytest
+import torch
+
+from tests import CUDA_DEVICE_COUNT, CUDA_IS_AVAILABLE, MODEL, SOURCE
+from ultralytics import YOLO
+from ultralytics.cfg import TASK2DATA, TASK2MODEL, TASKS
+from ultralytics.utils import ASSETS, WEIGHTS_DIR
+from ultralytics.utils.checks import check_amp
+
+
+def test_checks():
+    """Validate CUDA settings against torch CUDA functions."""
+    assert torch.cuda.is_available() == CUDA_IS_AVAILABLE
+    assert torch.cuda.device_count() == CUDA_DEVICE_COUNT
+
+
+@pytest.mark.skipif(not CUDA_IS_AVAILABLE, reason="CUDA is not available")
+def test_amp():
+    """Test AMP training checks."""
+    model = YOLO("yolo11n.pt").model.cuda()
+    assert check_amp(model)
+
+
+@pytest.mark.slow
+@pytest.mark.skipif(True, reason="CUDA export tests disabled pending additional Ultralytics GPU server availability")
+@pytest.mark.skipif(not CUDA_IS_AVAILABLE, reason="CUDA is not available")
+@pytest.mark.parametrize(
+    "task, dynamic, int8, half, batch",
+    [  # generate all combinations but exclude those where both int8 and half are True
+        (task, dynamic, int8, half, batch)
+        # Note: tests reduced below pending compute availability expansion as GPU CI runner utilization is high
+        # for task, dynamic, int8, half, batch in product(TASKS, [True, False], [True, False], [True, False], [1, 2])
+        for task, dynamic, int8, half, batch in product(TASKS, [True], [True], [False], [2])
+        if not (int8 and half)  # exclude cases where both int8 and half are True
+    ],
+)
+def test_export_engine_matrix(task, dynamic, int8, half, batch):
+    """Test YOLO model export to TensorRT format for various configurations and run inference."""
+    file = YOLO(TASK2MODEL[task]).export(
+        format="engine",
+        imgsz=32,
+        dynamic=dynamic,
+        int8=int8,
+        half=half,
+        batch=batch,
+        data=TASK2DATA[task],
+        workspace=1,  # reduce workspace GB for less resource utilization during testing
+        simplify=True,  # use 'onnxslim'
+    )
+    YOLO(file)([SOURCE] * batch, imgsz=64 if dynamic else 32)  # exported model inference
+    Path(file).unlink()  # cleanup
+    Path(file).with_suffix(".cache").unlink() if int8 else None  # cleanup INT8 cache
+
+
+@pytest.mark.skipif(not CUDA_IS_AVAILABLE, reason="CUDA is not available")
+def test_train():
+    """Test model training on a minimal dataset using available CUDA devices."""
+    device = 0 if CUDA_DEVICE_COUNT == 1 else [0, 1]
+    YOLO(MODEL).train(data="coco8.yaml", imgsz=64, epochs=1, device=device)  # requires imgsz>=64
+
+
+@pytest.mark.slow
+@pytest.mark.skipif(not CUDA_IS_AVAILABLE, reason="CUDA is not available")
+def test_predict_multiple_devices():
+    """Validate model prediction consistency across CPU and CUDA devices."""
+    model = YOLO("yolo11n.pt")
+    model = model.cpu()
+    assert str(model.device) == "cpu"
+    _ = model(SOURCE)  # CPU inference
+    assert str(model.device) == "cpu"
+
+    model = model.to("cuda:0")
+    assert str(model.device) == "cuda:0"
+    _ = model(SOURCE)  # CUDA inference
+    assert str(model.device) == "cuda:0"
+
+    model = model.cpu()
+    assert str(model.device) == "cpu"
+    _ = model(SOURCE)  # CPU inference
+    assert str(model.device) == "cpu"
+
+    model = model.cuda()
+    assert str(model.device) == "cuda:0"
+    _ = model(SOURCE)  # CUDA inference
+    assert str(model.device) == "cuda:0"
+
+
+@pytest.mark.skipif(not CUDA_IS_AVAILABLE, reason="CUDA is not available")
+def test_autobatch():
+    """Check optimal batch size for YOLO model training using autobatch utility."""
+    from ultralytics.utils.autobatch import check_train_batch_size
+
+    check_train_batch_size(YOLO(MODEL).model.cuda(), imgsz=128, amp=True)
+
+
+@pytest.mark.slow
+@pytest.mark.skipif(not CUDA_IS_AVAILABLE, reason="CUDA is not available")
+def test_utils_benchmarks():
+    """Profile YOLO models for performance benchmarks."""
+    from ultralytics.utils.benchmarks import ProfileModels
+
+    # Pre-export a dynamic engine model to use dynamic inference
+    YOLO(MODEL).export(format="engine", imgsz=32, dynamic=True, batch=1)
+    ProfileModels([MODEL], imgsz=32, half=False, min_time=1, num_timed_runs=3, num_warmup_runs=1).profile()
+
+
+@pytest.mark.skipif(not CUDA_IS_AVAILABLE, reason="CUDA is not available")
+def test_predict_sam():
+    """Test SAM model predictions using different prompts, including bounding boxes and point annotations."""
+    from ultralytics import SAM
+    from ultralytics.models.sam import Predictor as SAMPredictor
+
+    # Load a model
+    model = SAM(WEIGHTS_DIR / "sam2.1_b.pt")
+
+    # Display model information (optional)
+    model.info()
+
+    # Run inference
+    model(SOURCE, device=0)
+
+    # Run inference with bboxes prompt
+    model(SOURCE, bboxes=[439, 437, 524, 709], device=0)
+
+    # Run inference with no labels
+    model(ASSETS / "zidane.jpg", points=[900, 370], device=0)
+
+    # Run inference with 1D points and 1D labels
+    model(ASSETS / "zidane.jpg", points=[900, 370], labels=[1], device=0)
+
+    # Run inference with 2D points and 1D labels
+    model(ASSETS / "zidane.jpg", points=[[900, 370]], labels=[1], device=0)
+
+    # Run inference with multiple 2D points and 1D labels
+    model(ASSETS / "zidane.jpg", points=[[400, 370], [900, 370]], labels=[1, 1], device=0)
+
+    # Run inference with 3D points and 2D labels (multiple points per object)
+    model(ASSETS / "zidane.jpg", points=[[[900, 370], [1000, 100]]], labels=[[1, 1]], device=0)
+
+    # Create SAMPredictor
+    overrides = dict(conf=0.25, task="segment", mode="predict", imgsz=1024, model=WEIGHTS_DIR / "mobile_sam.pt")
+    predictor = SAMPredictor(overrides=overrides)
+
+    # Set image
+    predictor.set_image(ASSETS / "zidane.jpg")  # set with image file
+    # predictor(bboxes=[439, 437, 524, 709])
+    # predictor(points=[900, 370], labels=[1])
+
+    # Reset image
+    predictor.reset_image()

+ 131 - 0
tests/test_engine.py

@@ -0,0 +1,131 @@
+# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
+
+import sys
+from unittest import mock
+
+from tests import MODEL
+from ultralytics import YOLO
+from ultralytics.cfg import get_cfg
+from ultralytics.engine.exporter import Exporter
+from ultralytics.models.yolo import classify, detect, segment
+from ultralytics.utils import ASSETS, DEFAULT_CFG, WEIGHTS_DIR
+
+
+def test_func(*args):  # noqa
+    """Test function callback for evaluating YOLO model performance metrics."""
+    print("callback test passed")
+
+
+def test_export():
+    """Tests the model exporting function by adding a callback and asserting its execution."""
+    exporter = Exporter()
+    exporter.add_callback("on_export_start", test_func)
+    assert test_func in exporter.callbacks["on_export_start"], "callback test failed"
+    f = exporter(model=YOLO("yolo11n.yaml").model)
+    YOLO(f)(ASSETS)  # exported model inference
+
+
+def test_detect():
+    """Test YOLO object detection training, validation, and prediction functionality."""
+    overrides = {"data": "coco8.yaml", "model": "yolo11n.yaml", "imgsz": 32, "epochs": 1, "save": False}
+    cfg = get_cfg(DEFAULT_CFG)
+    cfg.data = "coco8.yaml"
+    cfg.imgsz = 32
+
+    # Trainer
+    trainer = detect.DetectionTrainer(overrides=overrides)
+    trainer.add_callback("on_train_start", test_func)
+    assert test_func in trainer.callbacks["on_train_start"], "callback test failed"
+    trainer.train()
+
+    # Validator
+    val = detect.DetectionValidator(args=cfg)
+    val.add_callback("on_val_start", test_func)
+    assert test_func in val.callbacks["on_val_start"], "callback test failed"
+    val(model=trainer.best)  # validate best.pt
+
+    # Predictor
+    pred = detect.DetectionPredictor(overrides={"imgsz": [64, 64]})
+    pred.add_callback("on_predict_start", test_func)
+    assert test_func in pred.callbacks["on_predict_start"], "callback test failed"
+    # Confirm there is no issue with sys.argv being empty.
+    with mock.patch.object(sys, "argv", []):
+        result = pred(source=ASSETS, model=MODEL)
+        assert len(result), "predictor test failed"
+
+    overrides["resume"] = trainer.last
+    trainer = detect.DetectionTrainer(overrides=overrides)
+    try:
+        trainer.train()
+    except Exception as e:
+        print(f"Expected exception caught: {e}")
+        return
+
+    Exception("Resume test failed!")
+
+
+def test_segment():
+    """Tests image segmentation training, validation, and prediction pipelines using YOLO models."""
+    overrides = {"data": "coco8-seg.yaml", "model": "yolo11n-seg.yaml", "imgsz": 32, "epochs": 1, "save": False}
+    cfg = get_cfg(DEFAULT_CFG)
+    cfg.data = "coco8-seg.yaml"
+    cfg.imgsz = 32
+    # YOLO(CFG_SEG).train(**overrides)  # works
+
+    # Trainer
+    trainer = segment.SegmentationTrainer(overrides=overrides)
+    trainer.add_callback("on_train_start", test_func)
+    assert test_func in trainer.callbacks["on_train_start"], "callback test failed"
+    trainer.train()
+
+    # Validator
+    val = segment.SegmentationValidator(args=cfg)
+    val.add_callback("on_val_start", test_func)
+    assert test_func in val.callbacks["on_val_start"], "callback test failed"
+    val(model=trainer.best)  # validate best.pt
+
+    # Predictor
+    pred = segment.SegmentationPredictor(overrides={"imgsz": [64, 64]})
+    pred.add_callback("on_predict_start", test_func)
+    assert test_func in pred.callbacks["on_predict_start"], "callback test failed"
+    result = pred(source=ASSETS, model=WEIGHTS_DIR / "yolo11n-seg.pt")
+    assert len(result), "predictor test failed"
+
+    # Test resume
+    overrides["resume"] = trainer.last
+    trainer = segment.SegmentationTrainer(overrides=overrides)
+    try:
+        trainer.train()
+    except Exception as e:
+        print(f"Expected exception caught: {e}")
+        return
+
+    Exception("Resume test failed!")
+
+
+def test_classify():
+    """Test image classification including training, validation, and prediction phases."""
+    overrides = {"data": "imagenet10", "model": "yolo11n-cls.yaml", "imgsz": 32, "epochs": 1, "save": False}
+    cfg = get_cfg(DEFAULT_CFG)
+    cfg.data = "imagenet10"
+    cfg.imgsz = 32
+    # YOLO(CFG_SEG).train(**overrides)  # works
+
+    # Trainer
+    trainer = classify.ClassificationTrainer(overrides=overrides)
+    trainer.add_callback("on_train_start", test_func)
+    assert test_func in trainer.callbacks["on_train_start"], "callback test failed"
+    trainer.train()
+
+    # Validator
+    val = classify.ClassificationValidator(args=cfg)
+    val.add_callback("on_val_start", test_func)
+    assert test_func in val.callbacks["on_val_start"], "callback test failed"
+    val(model=trainer.best)
+
+    # Predictor
+    pred = classify.ClassificationPredictor(overrides={"imgsz": [64, 64]})
+    pred.add_callback("on_predict_start", test_func)
+    assert test_func in pred.callbacks["on_predict_start"], "callback test failed"
+    result = pred(source=ASSETS, model=trainer.best)
+    assert len(result), "predictor test failed"

+ 216 - 0
tests/test_exports.py

@@ -0,0 +1,216 @@
+# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
+
+import shutil
+import uuid
+from itertools import product
+from pathlib import Path
+
+import pytest
+
+from tests import MODEL, SOURCE
+from ultralytics import YOLO
+from ultralytics.cfg import TASK2DATA, TASK2MODEL, TASKS
+from ultralytics.utils import (
+    IS_RASPBERRYPI,
+    LINUX,
+    MACOS,
+    WINDOWS,
+    checks,
+)
+from ultralytics.utils.torch_utils import TORCH_1_9, TORCH_1_13
+
+
+def test_export_torchscript():
+    """Test YOLO model exporting to TorchScript format for compatibility and correctness."""
+    file = YOLO(MODEL).export(format="torchscript", optimize=False, imgsz=32)
+    YOLO(file)(SOURCE, imgsz=32)  # exported model inference
+
+
+def test_export_onnx():
+    """Test YOLO model export to ONNX format with dynamic axes."""
+    file = YOLO(MODEL).export(format="onnx", dynamic=True, imgsz=32)
+    YOLO(file)(SOURCE, imgsz=32)  # exported model inference
+
+
+@pytest.mark.skipif(not TORCH_1_13, reason="OpenVINO requires torch>=1.13")
+def test_export_openvino():
+    """Test YOLO exports to OpenVINO format for model inference compatibility."""
+    file = YOLO(MODEL).export(format="openvino", imgsz=32)
+    YOLO(file)(SOURCE, imgsz=32)  # exported model inference
+
+
+@pytest.mark.slow
+@pytest.mark.skipif(not TORCH_1_13, reason="OpenVINO requires torch>=1.13")
+@pytest.mark.parametrize(
+    "task, dynamic, int8, half, batch",
+    [  # generate all combinations but exclude those where both int8 and half are True
+        (task, dynamic, int8, half, batch)
+        for task, dynamic, int8, half, batch in product(TASKS, [True, False], [True, False], [True, False], [1, 2])
+        if not (int8 and half)  # exclude cases where both int8 and half are True
+    ],
+)
+def test_export_openvino_matrix(task, dynamic, int8, half, batch):
+    """Test YOLO model exports to OpenVINO under various configuration matrix conditions."""
+    file = YOLO(TASK2MODEL[task]).export(
+        format="openvino",
+        imgsz=32,
+        dynamic=dynamic,
+        int8=int8,
+        half=half,
+        batch=batch,
+        data=TASK2DATA[task],
+    )
+    if WINDOWS:
+        # Use unique filenames due to Windows file permissions bug possibly due to latent threaded use
+        # See https://github.com/ultralytics/ultralytics/actions/runs/8957949304/job/24601616830?pr=10423
+        file = Path(file)
+        file = file.rename(file.with_stem(f"{file.stem}-{uuid.uuid4()}"))
+    YOLO(file)([SOURCE] * batch, imgsz=64 if dynamic else 32)  # exported model inference
+    shutil.rmtree(file, ignore_errors=True)  # retry in case of potential lingering multi-threaded file usage errors
+
+
+@pytest.mark.slow
+@pytest.mark.parametrize(
+    "task, dynamic, int8, half, batch, simplify", product(TASKS, [True, False], [False], [False], [1, 2], [True, False])
+)
+def test_export_onnx_matrix(task, dynamic, int8, half, batch, simplify):
+    """Test YOLO exports to ONNX format with various configurations and parameters."""
+    file = YOLO(TASK2MODEL[task]).export(
+        format="onnx",
+        imgsz=32,
+        dynamic=dynamic,
+        int8=int8,
+        half=half,
+        batch=batch,
+        simplify=simplify,
+    )
+    YOLO(file)([SOURCE] * batch, imgsz=64 if dynamic else 32)  # exported model inference
+    Path(file).unlink()  # cleanup
+
+
+@pytest.mark.slow
+@pytest.mark.parametrize("task, dynamic, int8, half, batch", product(TASKS, [False], [False], [False], [1, 2]))
+def test_export_torchscript_matrix(task, dynamic, int8, half, batch):
+    """Tests YOLO model exports to TorchScript format under varied configurations."""
+    file = YOLO(TASK2MODEL[task]).export(
+        format="torchscript",
+        imgsz=32,
+        dynamic=dynamic,
+        int8=int8,
+        half=half,
+        batch=batch,
+    )
+    YOLO(file)([SOURCE] * 3, imgsz=64 if dynamic else 32)  # exported model inference at batch=3
+    Path(file).unlink()  # cleanup
+
+
+@pytest.mark.slow
+@pytest.mark.skipif(not MACOS, reason="CoreML inference only supported on macOS")
+@pytest.mark.skipif(not TORCH_1_9, reason="CoreML>=7.2 not supported with PyTorch<=1.8")
+@pytest.mark.skipif(checks.IS_PYTHON_3_12, reason="CoreML not supported in Python 3.12")
+@pytest.mark.parametrize(
+    "task, dynamic, int8, half, batch",
+    [  # generate all combinations but exclude those where both int8 and half are True
+        (task, dynamic, int8, half, batch)
+        for task, dynamic, int8, half, batch in product(TASKS, [False], [True, False], [True, False], [1])
+        if not (int8 and half)  # exclude cases where both int8 and half are True
+    ],
+)
+def test_export_coreml_matrix(task, dynamic, int8, half, batch):
+    """Test YOLO exports to CoreML format with various parameter configurations."""
+    file = YOLO(TASK2MODEL[task]).export(
+        format="coreml",
+        imgsz=32,
+        dynamic=dynamic,
+        int8=int8,
+        half=half,
+        batch=batch,
+    )
+    YOLO(file)([SOURCE] * batch, imgsz=32)  # exported model inference at batch=3
+    shutil.rmtree(file)  # cleanup
+
+
+@pytest.mark.slow
+@pytest.mark.skipif(not checks.IS_PYTHON_MINIMUM_3_10, reason="TFLite export requires Python>=3.10")
+@pytest.mark.skipif(not LINUX, reason="Test disabled as TF suffers from install conflicts on Windows and macOS")
+@pytest.mark.parametrize(
+    "task, dynamic, int8, half, batch",
+    [  # generate all combinations but exclude those where both int8 and half are True
+        (task, dynamic, int8, half, batch)
+        for task, dynamic, int8, half, batch in product(TASKS, [False], [True, False], [True, False], [1])
+        if not (int8 and half)  # exclude cases where both int8 and half are True
+    ],
+)
+def test_export_tflite_matrix(task, dynamic, int8, half, batch):
+    """Test YOLO exports to TFLite format considering various export configurations."""
+    file = YOLO(TASK2MODEL[task]).export(
+        format="tflite",
+        imgsz=32,
+        dynamic=dynamic,
+        int8=int8,
+        half=half,
+        batch=batch,
+    )
+    YOLO(file)([SOURCE] * batch, imgsz=32)  # exported model inference at batch=3
+    Path(file).unlink()  # cleanup
+
+
+@pytest.mark.skipif(not TORCH_1_9, reason="CoreML>=7.2 not supported with PyTorch<=1.8")
+@pytest.mark.skipif(WINDOWS, reason="CoreML not supported on Windows")  # RuntimeError: BlobWriter not loaded
+@pytest.mark.skipif(IS_RASPBERRYPI, reason="CoreML not supported on Raspberry Pi")
+@pytest.mark.skipif(checks.IS_PYTHON_3_12, reason="CoreML not supported in Python 3.12")
+def test_export_coreml():
+    """Test YOLO exports to CoreML format, optimized for macOS only."""
+    if MACOS:
+        file = YOLO(MODEL).export(format="coreml", imgsz=32)
+        YOLO(file)(SOURCE, imgsz=32)  # model prediction only supported on macOS for nms=False models
+    else:
+        YOLO(MODEL).export(format="coreml", nms=True, imgsz=32)
+
+
+@pytest.mark.skipif(not checks.IS_PYTHON_MINIMUM_3_10, reason="TFLite export requires Python>=3.10")
+@pytest.mark.skipif(not LINUX, reason="Test disabled as TF suffers from install conflicts on Windows and macOS")
+def test_export_tflite():
+    """Test YOLO exports to TFLite format under specific OS and Python version conditions."""
+    model = YOLO(MODEL)
+    file = model.export(format="tflite", imgsz=32)
+    YOLO(file)(SOURCE, imgsz=32)
+
+
+@pytest.mark.skipif(True, reason="Test disabled")
+@pytest.mark.skipif(not LINUX, reason="TF suffers from install conflicts on Windows and macOS")
+def test_export_pb():
+    """Test YOLO exports to TensorFlow's Protobuf (*.pb) format."""
+    model = YOLO(MODEL)
+    file = model.export(format="pb", imgsz=32)
+    YOLO(file)(SOURCE, imgsz=32)
+
+
+@pytest.mark.skipif(True, reason="Test disabled as Paddle protobuf and ONNX protobuf requirements conflict.")
+def test_export_paddle():
+    """Test YOLO exports to Paddle format, noting protobuf conflicts with ONNX."""
+    YOLO(MODEL).export(format="paddle", imgsz=32)
+
+
+@pytest.mark.slow
+@pytest.mark.skipif(IS_RASPBERRYPI, reason="MNN not supported on Raspberry Pi")
+def test_export_mnn():
+    """Test YOLO exports to MNN format (WARNING: MNN test must precede NCNN test or CI error on Windows)."""
+    file = YOLO(MODEL).export(format="mnn", imgsz=32)
+    YOLO(file)(SOURCE, imgsz=32)  # exported model inference
+
+
+@pytest.mark.slow
+def test_export_ncnn():
+    """Test YOLO exports to NCNN format."""
+    file = YOLO(MODEL).export(format="ncnn", imgsz=32)
+    YOLO(file)(SOURCE, imgsz=32)  # exported model inference
+
+
+@pytest.mark.skipif(True, reason="Test disabled as keras and tensorflow version conflicts with tflite export.")
+@pytest.mark.skipif(not LINUX or MACOS, reason="Skipping test on Windows and Macos")
+def test_export_imx():
+    """Test YOLOv8n exports to IMX format."""
+    model = YOLO("yolov8n.pt")
+    file = model.export(format="imx", imgsz=32)
+    YOLO(file)(SOURCE, imgsz=32)

+ 150 - 0
tests/test_integrations.py

@@ -0,0 +1,150 @@
+# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
+
+import contextlib
+import os
+import subprocess
+import time
+from pathlib import Path
+
+import pytest
+
+from tests import MODEL, SOURCE, TMP
+from ultralytics import YOLO, download
+from ultralytics.utils import DATASETS_DIR, SETTINGS
+from ultralytics.utils.checks import check_requirements
+
+
+@pytest.mark.skipif(not check_requirements("ray", install=False), reason="ray[tune] not installed")
+def test_model_ray_tune():
+    """Tune YOLO model using Ray for hyperparameter optimization."""
+    YOLO("yolo11n-cls.yaml").tune(
+        use_ray=True, data="imagenet10", grace_period=1, iterations=1, imgsz=32, epochs=1, plots=False, device="cpu"
+    )
+
+
+@pytest.mark.skipif(not check_requirements("mlflow", install=False), reason="mlflow not installed")
+def test_mlflow():
+    """Test training with MLflow tracking enabled (see https://mlflow.org/ for details)."""
+    SETTINGS["mlflow"] = True
+    YOLO("yolo11n-cls.yaml").train(data="imagenet10", imgsz=32, epochs=3, plots=False, device="cpu")
+    SETTINGS["mlflow"] = False
+
+
+@pytest.mark.skipif(True, reason="Test failing in scheduled CI https://github.com/ultralytics/ultralytics/pull/8868")
+@pytest.mark.skipif(not check_requirements("mlflow", install=False), reason="mlflow not installed")
+def test_mlflow_keep_run_active():
+    """Ensure MLflow run status matches MLFLOW_KEEP_RUN_ACTIVE environment variable settings."""
+    import mlflow
+
+    SETTINGS["mlflow"] = True
+    run_name = "Test Run"
+    os.environ["MLFLOW_RUN"] = run_name
+
+    # Test with MLFLOW_KEEP_RUN_ACTIVE=True
+    os.environ["MLFLOW_KEEP_RUN_ACTIVE"] = "True"
+    YOLO("yolo11n-cls.yaml").train(data="imagenet10", imgsz=32, epochs=1, plots=False, device="cpu")
+    status = mlflow.active_run().info.status
+    assert status == "RUNNING", "MLflow run should be active when MLFLOW_KEEP_RUN_ACTIVE=True"
+
+    run_id = mlflow.active_run().info.run_id
+
+    # Test with MLFLOW_KEEP_RUN_ACTIVE=False
+    os.environ["MLFLOW_KEEP_RUN_ACTIVE"] = "False"
+    YOLO("yolo11n-cls.yaml").train(data="imagenet10", imgsz=32, epochs=1, plots=False, device="cpu")
+    status = mlflow.get_run(run_id=run_id).info.status
+    assert status == "FINISHED", "MLflow run should be ended when MLFLOW_KEEP_RUN_ACTIVE=False"
+
+    # Test with MLFLOW_KEEP_RUN_ACTIVE not set
+    os.environ.pop("MLFLOW_KEEP_RUN_ACTIVE", None)
+    YOLO("yolo11n-cls.yaml").train(data="imagenet10", imgsz=32, epochs=1, plots=False, device="cpu")
+    status = mlflow.get_run(run_id=run_id).info.status
+    assert status == "FINISHED", "MLflow run should be ended by default when MLFLOW_KEEP_RUN_ACTIVE is not set"
+    SETTINGS["mlflow"] = False
+
+
+@pytest.mark.skipif(not check_requirements("tritonclient", install=False), reason="tritonclient[all] not installed")
+def test_triton():
+    """
+    Test NVIDIA Triton Server functionalities with YOLO model.
+
+    See https://catalog.ngc.nvidia.com/orgs/nvidia/containers/tritonserver.
+    """
+    check_requirements("tritonclient[all]")
+    from tritonclient.http import InferenceServerClient  # noqa
+
+    # Create variables
+    model_name = "yolo"
+    triton_repo = TMP / "triton_repo"  # Triton repo path
+    triton_model = triton_repo / model_name  # Triton model path
+
+    # Export model to ONNX
+    f = YOLO(MODEL).export(format="onnx", dynamic=True)
+
+    # Prepare Triton repo
+    (triton_model / "1").mkdir(parents=True, exist_ok=True)
+    Path(f).rename(triton_model / "1" / "model.onnx")
+    (triton_model / "config.pbtxt").touch()
+
+    # Define image https://catalog.ngc.nvidia.com/orgs/nvidia/containers/tritonserver
+    tag = "nvcr.io/nvidia/tritonserver:23.09-py3"  # 6.4 GB
+
+    # Pull the image
+    subprocess.call(f"docker pull {tag}", shell=True)
+
+    # Run the Triton server and capture the container ID
+    container_id = (
+        subprocess.check_output(
+            f"docker run -d --rm -v {triton_repo}:/models -p 8000:8000 {tag} tritonserver --model-repository=/models",
+            shell=True,
+        )
+        .decode("utf-8")
+        .strip()
+    )
+
+    # Wait for the Triton server to start
+    triton_client = InferenceServerClient(url="localhost:8000", verbose=False, ssl=False)
+
+    # Wait until model is ready
+    for _ in range(10):
+        with contextlib.suppress(Exception):
+            assert triton_client.is_model_ready(model_name)
+            break
+        time.sleep(1)
+
+    # Check Triton inference
+    YOLO(f"http://localhost:8000/{model_name}", "detect")(SOURCE)  # exported model inference
+
+    # Kill and remove the container at the end of the test
+    subprocess.call(f"docker kill {container_id}", shell=True)
+
+
+@pytest.mark.skipif(not check_requirements("pycocotools", install=False), reason="pycocotools not installed")
+def test_pycocotools():
+    """Validate YOLO model predictions on COCO dataset using pycocotools."""
+    from ultralytics.models.yolo.detect import DetectionValidator
+    from ultralytics.models.yolo.pose import PoseValidator
+    from ultralytics.models.yolo.segment import SegmentationValidator
+
+    # Download annotations after each dataset downloads first
+    url = "https://github.com/ultralytics/assets/releases/download/v0.0.0/"
+
+    args = {"model": "yolo11n.pt", "data": "coco8.yaml", "save_json": True, "imgsz": 64}
+    validator = DetectionValidator(args=args)
+    validator()
+    validator.is_coco = True
+    download(f"{url}instances_val2017.json", dir=DATASETS_DIR / "coco8/annotations")
+    _ = validator.eval_json(validator.stats)
+
+    args = {"model": "yolo11n-seg.pt", "data": "coco8-seg.yaml", "save_json": True, "imgsz": 64}
+    validator = SegmentationValidator(args=args)
+    validator()
+    validator.is_coco = True
+    download(f"{url}instances_val2017.json", dir=DATASETS_DIR / "coco8-seg/annotations")
+    _ = validator.eval_json(validator.stats)
+
+    args = {"model": "yolo11n-pose.pt", "data": "coco8-pose.yaml", "save_json": True, "imgsz": 64}
+    validator = PoseValidator(args=args)
+    validator()
+    validator.is_coco = True
+    download(f"{url}person_keypoints_val2017.json", dir=DATASETS_DIR / "coco8-pose/annotations")
+    _ = validator.eval_json(validator.stats)

+ 615 - 0
tests/test_python.py

@@ -0,0 +1,615 @@
+# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
+
+import contextlib
+import csv
+import urllib
+from copy import copy
+from pathlib import Path
+
+import cv2
+import numpy as np
+import pytest
+import torch
+import yaml
+from PIL import Image
+
+from tests import CFG, MODEL, SOURCE, SOURCES_LIST, TMP
+from ultralytics import RTDETR, YOLO
+from ultralytics.cfg import MODELS, TASK2DATA, TASKS
+from ultralytics.data.build import load_inference_source
+from ultralytics.utils import (
+    ASSETS,
+    DEFAULT_CFG,
+    DEFAULT_CFG_PATH,
+    LOGGER,
+    ONLINE,
+    ROOT,
+    WEIGHTS_DIR,
+    WINDOWS,
+    checks,
+    is_dir_writeable,
+    is_github_action_running,
+)
+from ultralytics.utils.downloads import download
+from ultralytics.utils.torch_utils import TORCH_1_9
+
+IS_TMP_WRITEABLE = is_dir_writeable(TMP)  # WARNING: must be run once tests start as TMP does not exist on tests/init
+
+
+def test_model_forward():
+    """Test the forward pass of the YOLO model."""
+    model = YOLO(CFG)
+    model(source=None, imgsz=32, augment=True)  # also test no source and augment
+
+
+def test_model_methods():
+    """Test various methods and properties of the YOLO model to ensure correct functionality."""
+    model = YOLO(MODEL)
+
+    # Model methods
+    model.info(verbose=True, detailed=True)
+    model = model.reset_weights()
+    model = model.load(MODEL)
+    model.to("cpu")
+    model.fuse()
+    model.clear_callback("on_train_start")
+    model.reset_callbacks()
+
+    # Model properties
+    _ = model.names
+    _ = model.device
+    _ = model.transforms
+    _ = model.task_map
+
+
+def test_model_profile():
+    """Test profiling of the YOLO model with `profile=True` to assess performance and resource usage."""
+    from ultralytics.nn.tasks import DetectionModel
+
+    model = DetectionModel()  # build model
+    im = torch.randn(1, 3, 64, 64)  # requires min imgsz=64
+    _ = model.predict(im, profile=True)
+
+
+@pytest.mark.skipif(not IS_TMP_WRITEABLE, reason="directory is not writeable")
+def test_predict_txt():
+    """Tests YOLO predictions with file, directory, and pattern sources listed in a text file."""
+    file = TMP / "sources_multi_row.txt"
+    with open(file, "w") as f:
+        for src in SOURCES_LIST:
+            f.write(f"{src}\n")
+    results = YOLO(MODEL)(source=file, imgsz=32)
+    assert len(results) == 7  # 1 + 2 + 2 + 2 = 7 images
+
+
+@pytest.mark.skipif(True, reason="disabled for testing")
+@pytest.mark.skipif(not IS_TMP_WRITEABLE, reason="directory is not writeable")
+def test_predict_csv_multi_row():
+    """Tests YOLO predictions with sources listed in multiple rows of a CSV file."""
+    file = TMP / "sources_multi_row.csv"
+    with open(file, "w", newline="") as f:
+        writer = csv.writer(f)
+        writer.writerow(["source"])
+        writer.writerows([[src] for src in SOURCES_LIST])
+    results = YOLO(MODEL)(source=file, imgsz=32)
+    assert len(results) == 7  # 1 + 2 + 2 + 2 = 7 images
+
+
+@pytest.mark.skipif(True, reason="disabled for testing")
+@pytest.mark.skipif(not IS_TMP_WRITEABLE, reason="directory is not writeable")
+def test_predict_csv_single_row():
+    """Tests YOLO predictions with sources listed in a single row of a CSV file."""
+    file = TMP / "sources_single_row.csv"
+    with open(file, "w", newline="") as f:
+        writer = csv.writer(f)
+        writer.writerow(SOURCES_LIST)
+    results = YOLO(MODEL)(source=file, imgsz=32)
+    assert len(results) == 7  # 1 + 2 + 2 + 2 = 7 images
+
+
+@pytest.mark.parametrize("model_name", MODELS)
+def test_predict_img(model_name):
+    """Test YOLO model predictions on various image input types and sources, including online images."""
+    model = YOLO(WEIGHTS_DIR / model_name)
+    im = cv2.imread(str(SOURCE))  # uint8 numpy array
+    assert len(model(source=Image.open(SOURCE), save=True, verbose=True, imgsz=32)) == 1  # PIL
+    assert len(model(source=im, save=True, save_txt=True, imgsz=32)) == 1  # ndarray
+    assert len(model(torch.rand((2, 3, 32, 32)), imgsz=32)) == 2  # batch-size 2 Tensor, FP32 0.0-1.0 RGB order
+    assert len(model(source=[im, im], save=True, save_txt=True, imgsz=32)) == 2  # batch
+    assert len(list(model(source=[im, im], save=True, stream=True, imgsz=32))) == 2  # stream
+    assert len(model(torch.zeros(320, 640, 3).numpy().astype(np.uint8), imgsz=32)) == 1  # tensor to numpy
+    batch = [
+        str(SOURCE),  # filename
+        Path(SOURCE),  # Path
+        "https://github.com/ultralytics/assets/releases/download/v0.0.0/zidane.jpg" if ONLINE else SOURCE,  # URI
+        cv2.imread(str(SOURCE)),  # OpenCV
+        Image.open(SOURCE),  # PIL
+        np.zeros((320, 640, 3), dtype=np.uint8),  # numpy
+    ]
+    assert len(model(batch, imgsz=32)) == len(batch)  # multiple sources in a batch
+
+
+@pytest.mark.parametrize("model", MODELS)
+def test_predict_visualize(model):
+    """Test model prediction methods with 'visualize=True' to generate and display prediction visualizations."""
+    YOLO(WEIGHTS_DIR / model)(SOURCE, imgsz=32, visualize=True)
+
+
+def test_predict_grey_and_4ch():
+    """Test YOLO prediction on SOURCE converted to greyscale and 4-channel images with various filenames."""
+    im = Image.open(SOURCE)
+    directory = TMP / "im4"
+    directory.mkdir(parents=True, exist_ok=True)
+
+    source_greyscale = directory / "greyscale.jpg"
+    source_rgba = directory / "4ch.png"
+    source_non_utf = directory / "non_UTF_测试文件_tést_image.jpg"
+    source_spaces = directory / "image with spaces.jpg"
+
+    im.convert("L").save(source_greyscale)  # greyscale
+    im.convert("RGBA").save(source_rgba)  # 4-ch PNG with alpha
+    im.save(source_non_utf)  # non-UTF characters in filename
+    im.save(source_spaces)  # spaces in filename
+
+    # Inference
+    model = YOLO(MODEL)
+    for f in source_rgba, source_greyscale, source_non_utf, source_spaces:
+        for source in Image.open(f), cv2.imread(str(f)), f:
+            results = model(source, save=True, verbose=True, imgsz=32)
+            assert len(results) == 1  # verify that an image was run
+        f.unlink()  # cleanup
+
+
+@pytest.mark.slow
+@pytest.mark.skipif(not ONLINE, reason="environment is offline")
+@pytest.mark.skipif(is_github_action_running(), reason="No auth https://github.com/JuanBindez/pytubefix/issues/166")
+def test_youtube():
+    """Test YOLO model on a YouTube video stream, handling potential network-related errors."""
+    model = YOLO(MODEL)
+    try:
+        model.predict("https://youtu.be/G17sBkb38XQ", imgsz=96, save=True)
+    # Handle internet connection errors and 'urllib.error.HTTPError: HTTP Error 429: Too Many Requests'
+    except (urllib.error.HTTPError, ConnectionError) as e:
+        LOGGER.warning(f"WARNING: YouTube Test Error: {e}")
+
+
+@pytest.mark.skipif(not ONLINE, reason="environment is offline")
+@pytest.mark.skipif(not IS_TMP_WRITEABLE, reason="directory is not writeable")
+def test_track_stream():
+    """
+    Tests streaming tracking on a short 10 frame video using ByteTrack tracker and different GMC methods.
+
+    Note imgsz=160 required for tracking for higher confidence and better matches.
+    """
+    video_url = "https://github.com/ultralytics/assets/releases/download/v0.0.0/decelera_portrait_min.mov"
+    model = YOLO(MODEL)
+    model.track(video_url, imgsz=160, tracker="bytetrack.yaml")
+    model.track(video_url, imgsz=160, tracker="botsort.yaml", save_frames=True)  # test frame saving also
+
+    # Test Global Motion Compensation (GMC) methods
+    for gmc in "orb", "sift", "ecc":
+        with open(ROOT / "cfg/trackers/botsort.yaml", encoding="utf-8") as f:
+            data = yaml.safe_load(f)
+        tracker = TMP / f"botsort-{gmc}.yaml"
+        data["gmc_method"] = gmc
+        with open(tracker, "w", encoding="utf-8") as f:
+            yaml.safe_dump(data, f)
+        model.track(video_url, imgsz=160, tracker=tracker)
+
+
+def test_val():
+    """Test the validation mode of the YOLO model."""
+    YOLO(MODEL).val(data="coco8.yaml", imgsz=32, save_hybrid=True)
+
+
+def test_train_scratch():
+    """Test training the YOLO model from scratch using the provided configuration."""
+    model = YOLO(CFG)
+    model.train(data="coco8.yaml", epochs=2, imgsz=32, cache="disk", batch=-1, close_mosaic=1, name="model")
+    model(SOURCE)
+
+
+def test_train_pretrained():
+    """Test training of the YOLO model starting from a pre-trained checkpoint."""
+    model = YOLO(WEIGHTS_DIR / "yolo11n-seg.pt")
+    model.train(data="coco8-seg.yaml", epochs=1, imgsz=32, cache="ram", copy_paste=0.5, mixup=0.5, name=0)
+    model(SOURCE)
+
+
+def test_all_model_yamls():
+    """Test YOLO model creation for all available YAML configurations in the `cfg/models` directory."""
+    for m in (ROOT / "cfg" / "models").rglob("*.yaml"):
+        if "rtdetr" in m.name:
+            if TORCH_1_9:  # torch<=1.8 issue - TypeError: __init__() got an unexpected keyword argument 'batch_first'
+                _ = RTDETR(m.name)(SOURCE, imgsz=640)  # must be 640
+        else:
+            YOLO(m.name)
+
+
+@pytest.mark.skipif(WINDOWS, reason="Windows slow CI export bug https://github.com/ultralytics/ultralytics/pull/16003")
+def test_workflow():
+    """Test the complete workflow including training, validation, prediction, and exporting."""
+    model = YOLO(MODEL)
+    model.train(data="coco8.yaml", epochs=1, imgsz=32, optimizer="SGD")
+    model.val(imgsz=32)
+    model.predict(SOURCE, imgsz=32)
+    model.export(format="torchscript")  # WARNING: Windows slow CI export bug
+
+
+def test_predict_callback_and_setup():
+    """Test callback functionality during YOLO prediction setup and execution."""
+
+    def on_predict_batch_end(predictor):
+        """Callback function that handles operations at the end of a prediction batch."""
+        path, im0s, _ = predictor.batch
+        im0s = im0s if isinstance(im0s, list) else [im0s]
+        bs = [predictor.dataset.bs for _ in range(len(path))]
+        predictor.results = zip(predictor.results, im0s, bs)  # results is List[batch_size]
+
+    model = YOLO(MODEL)
+    model.add_callback("on_predict_batch_end", on_predict_batch_end)
+
+    dataset = load_inference_source(source=SOURCE)
+    bs = dataset.bs  # noqa access predictor properties
+    results = model.predict(dataset, stream=True, imgsz=160)  # source already setup
+    for r, im0, bs in results:
+        print("test_callback", im0.shape)
+        print("test_callback", bs)
+        boxes = r.boxes  # Boxes object for bbox outputs
+        print(boxes)
+
+
+@pytest.mark.parametrize("model", MODELS)
+def test_results(model):
+    """Ensure YOLO model predictions can be processed and printed in various formats."""
+    results = YOLO(WEIGHTS_DIR / model)([SOURCE, SOURCE], imgsz=160)
+    for r in results:
+        r = r.cpu().numpy()
+        print(r, len(r), r.path)  # print numpy attributes
+        r = r.to(device="cpu", dtype=torch.float32)
+        r.save_txt(txt_file=TMP / "runs/tests/label.txt", save_conf=True)
+        r.save_crop(save_dir=TMP / "runs/tests/crops/")
+        r.to_json(normalize=True)
+        r.to_df(decimals=3)
+        r.to_csv()
+        r.to_xml()
+        r.plot(pil=True)
+        r.plot(conf=True, boxes=True)
+        print(r, len(r), r.path)  # print after methods
+
+
+def test_labels_and_crops():
+    """Test output from prediction args for saving YOLO detection labels and crops; ensures accurate saving."""
+    imgs = [SOURCE, ASSETS / "zidane.jpg"]
+    results = YOLO(WEIGHTS_DIR / "yolo11n.pt")(imgs, imgsz=160, save_txt=True, save_crop=True)
+    save_path = Path(results[0].save_dir)
+    for r in results:
+        im_name = Path(r.path).stem
+        cls_idxs = r.boxes.cls.int().tolist()
+        # Check correct detections
+        assert cls_idxs == ([0, 7, 0, 0] if r.path.endswith("bus.jpg") else [0, 0, 0])  # bus.jpg and zidane.jpg classes
+        # Check label path
+        labels = save_path / f"labels/{im_name}.txt"
+        assert labels.exists()
+        # Check detections match label count
+        assert len(r.boxes.data) == len([line for line in labels.read_text().splitlines() if line])
+        # Check crops path and files
+        crop_dirs = list((save_path / "crops").iterdir())
+        crop_files = [f for p in crop_dirs for f in p.glob("*")]
+        # Crop directories match detections
+        assert all(r.names.get(c) in {d.name for d in crop_dirs} for c in cls_idxs)
+        # Same number of crops as detections
+        assert len([f for f in crop_files if im_name in f.name]) == len(r.boxes.data)
+
+
+@pytest.mark.skipif(not ONLINE, reason="environment is offline")
+def test_data_utils():
+    """Test utility functions in ultralytics/data/utils.py, including dataset stats and auto-splitting."""
+    from ultralytics.data.utils import HUBDatasetStats, autosplit
+    from ultralytics.utils.downloads import zip_directory
+
+    # from ultralytics.utils.files import WorkingDirectory
+    # with WorkingDirectory(ROOT.parent / 'tests'):
+
+    for task in TASKS:
+        file = Path(TASK2DATA[task]).with_suffix(".zip")  # i.e. coco8.zip
+        download(f"https://github.com/ultralytics/hub/raw/main/example_datasets/{file}", unzip=False, dir=TMP)
+        stats = HUBDatasetStats(TMP / file, task=task)
+        stats.get_json(save=True)
+        stats.process_images()
+
+    autosplit(TMP / "coco8")
+    zip_directory(TMP / "coco8/images/val")  # zip
+
+
+@pytest.mark.skipif(not ONLINE, reason="environment is offline")
+def test_data_converter():
+    """Test dataset conversion functions from COCO to YOLO format and class mappings."""
+    from ultralytics.data.converter import coco80_to_coco91_class, convert_coco
+
+    file = "instances_val2017.json"
+    download(f"https://github.com/ultralytics/assets/releases/download/v0.0.0/{file}", dir=TMP)
+    convert_coco(labels_dir=TMP, save_dir=TMP / "yolo_labels", use_segments=True, use_keypoints=False, cls91to80=True)
+    coco80_to_coco91_class()
+
+
+def test_data_annotator():
+    """Automatically annotate data using specified detection and segmentation models."""
+    from ultralytics.data.annotator import auto_annotate
+
+    auto_annotate(
+        ASSETS,
+        det_model=WEIGHTS_DIR / "yolo11n.pt",
+        sam_model=WEIGHTS_DIR / "mobile_sam.pt",
+        output_dir=TMP / "auto_annotate_labels",
+    )
+
+
+def test_events():
+    """Test event sending functionality."""
+    from ultralytics.hub.utils import Events
+
+    events = Events()
+    events.enabled = True
+    cfg = copy(DEFAULT_CFG)  # does not require deepcopy
+    cfg.mode = "test"
+    events(cfg)
+
+
+def test_cfg_init():
+    """Test configuration initialization utilities from the 'ultralytics.cfg' module."""
+    from ultralytics.cfg import check_dict_alignment, copy_default_cfg, smart_value
+
+    with contextlib.suppress(SyntaxError):
+        check_dict_alignment({"a": 1}, {"b": 2})
+    copy_default_cfg()
+    (Path.cwd() / DEFAULT_CFG_PATH.name.replace(".yaml", "_copy.yaml")).unlink(missing_ok=False)
+    [smart_value(x) for x in ["none", "true", "false"]]
+
+
+def test_utils_init():
+    """Test initialization utilities in the Ultralytics library."""
+    from ultralytics.utils import get_git_branch, get_git_origin_url, get_ubuntu_version, is_github_action_running
+
+    get_ubuntu_version()
+    is_github_action_running()
+    get_git_origin_url()
+    get_git_branch()
+
+
+def test_utils_checks():
+    """Test various utility checks for filenames, git status, requirements, image sizes, and versions."""
+    checks.check_yolov5u_filename("yolov5n.pt")
+    checks.git_describe(ROOT)
+    checks.check_requirements()  # check requirements.txt
+    checks.check_imgsz([600, 600], max_dim=1)
+    checks.check_imshow(warn=True)
+    checks.check_version("ultralytics", "8.0.0")
+    checks.print_args()
+
+
+@pytest.mark.skipif(WINDOWS, reason="Windows profiling is extremely slow (cause unknown)")
+def test_utils_benchmarks():
+    """Benchmark model performance using 'ProfileModels' from 'ultralytics.utils.benchmarks'."""
+    from ultralytics.utils.benchmarks import ProfileModels
+
+    ProfileModels(["yolo11n.yaml"], imgsz=32, min_time=1, num_timed_runs=3, num_warmup_runs=1).profile()
+
+
+def test_utils_torchutils():
+    """Test Torch utility functions including profiling and FLOP calculations."""
+    from ultralytics.nn.modules.conv import Conv
+    from ultralytics.utils.torch_utils import get_flops_with_torch_profiler, profile, time_sync
+
+    x = torch.randn(1, 64, 20, 20)
+    m = Conv(64, 64, k=1, s=2)
+
+    profile(x, [m], n=3)
+    get_flops_with_torch_profiler(m)
+    time_sync()
+
+
+def test_utils_ops():
+    """Test utility operations functions for coordinate transformation and normalization."""
+    from ultralytics.utils.ops import (
+        ltwh2xywh,
+        ltwh2xyxy,
+        make_divisible,
+        xywh2ltwh,
+        xywh2xyxy,
+        xywhn2xyxy,
+        xywhr2xyxyxyxy,
+        xyxy2ltwh,
+        xyxy2xywh,
+        xyxy2xywhn,
+        xyxyxyxy2xywhr,
+    )
+
+    make_divisible(17, torch.tensor([8]))
+
+    boxes = torch.rand(10, 4)  # xywh
+    torch.allclose(boxes, xyxy2xywh(xywh2xyxy(boxes)))
+    torch.allclose(boxes, xyxy2xywhn(xywhn2xyxy(boxes)))
+    torch.allclose(boxes, ltwh2xywh(xywh2ltwh(boxes)))
+    torch.allclose(boxes, xyxy2ltwh(ltwh2xyxy(boxes)))
+
+    boxes = torch.rand(10, 5)  # xywhr for OBB
+    boxes[:, 4] = torch.randn(10) * 30
+    torch.allclose(boxes, xyxyxyxy2xywhr(xywhr2xyxyxyxy(boxes)), rtol=1e-3)
+
+
+def test_utils_files():
+    """Test file handling utilities including file age, date, and paths with spaces."""
+    from ultralytics.utils.files import file_age, file_date, get_latest_run, spaces_in_path
+
+    file_age(SOURCE)
+    file_date(SOURCE)
+    get_latest_run(ROOT / "runs")
+
+    path = TMP / "path/with spaces"
+    path.mkdir(parents=True, exist_ok=True)
+    with spaces_in_path(path) as new_path:
+        print(new_path)
+
+
+@pytest.mark.slow
+def test_utils_patches_torch_save():
+    """Test torch_save backoff when _torch_save raises RuntimeError to ensure robustness."""
+    from unittest.mock import MagicMock, patch
+
+    from ultralytics.utils.patches import torch_save
+
+    mock = MagicMock(side_effect=RuntimeError)
+
+    with patch("ultralytics.utils.patches._torch_save", new=mock):
+        with pytest.raises(RuntimeError):
+            torch_save(torch.zeros(1), TMP / "test.pt")
+
+    assert mock.call_count == 4, "torch_save was not attempted the expected number of times"
+
+
+def test_nn_modules_conv():
+    """Test Convolutional Neural Network modules including CBAM, Conv2, and ConvTranspose."""
+    from ultralytics.nn.modules.conv import CBAM, Conv2, ConvTranspose, DWConvTranspose2d, Focus
+
+    c1, c2 = 8, 16  # input and output channels
+    x = torch.zeros(4, c1, 10, 10)  # BCHW
+
+    # Run all modules not otherwise covered in tests
+    DWConvTranspose2d(c1, c2)(x)
+    ConvTranspose(c1, c2)(x)
+    Focus(c1, c2)(x)
+    CBAM(c1)(x)
+
+    # Fuse ops
+    m = Conv2(c1, c2)
+    m.fuse_convs()
+    m(x)
+
+
+def test_nn_modules_block():
+    """Test various blocks in neural network modules including C1, C3TR, BottleneckCSP, C3Ghost, and C3x."""
+    from ultralytics.nn.modules.block import C1, C3TR, BottleneckCSP, C3Ghost, C3x
+
+    c1, c2 = 8, 16  # input and output channels
+    x = torch.zeros(4, c1, 10, 10)  # BCHW
+
+    # Run all modules not otherwise covered in tests
+    C1(c1, c2)(x)
+    C3x(c1, c2)(x)
+    C3TR(c1, c2)(x)
+    C3Ghost(c1, c2)(x)
+    BottleneckCSP(c1, c2)(x)
+
+
+@pytest.mark.skipif(not ONLINE, reason="environment is offline")
+def test_hub():
+    """Test Ultralytics HUB functionalities (e.g. export formats, logout)."""
+    from ultralytics.hub import export_fmts_hub, logout
+    from ultralytics.hub.utils import smart_request
+
+    export_fmts_hub()
+    logout()
+    smart_request("GET", "https://github.com", progress=True)
+
+
+@pytest.fixture
+def image():
+    """Load and return an image from a predefined source using OpenCV."""
+    return cv2.imread(str(SOURCE))
+
+
+@pytest.mark.parametrize(
+    "auto_augment, erasing, force_color_jitter",
+    [
+        (None, 0.0, False),
+        ("randaugment", 0.5, True),
+        ("augmix", 0.2, False),
+        ("autoaugment", 0.0, True),
+    ],
+)
+def test_classify_transforms_train(image, auto_augment, erasing, force_color_jitter):
+    """Tests classification transforms during training with various augmentations to ensure proper functionality."""
+    from ultralytics.data.augment import classify_augmentations
+
+    transform = classify_augmentations(
+        size=224,
+        mean=(0.5, 0.5, 0.5),
+        std=(0.5, 0.5, 0.5),
+        scale=(0.08, 1.0),
+        ratio=(3.0 / 4.0, 4.0 / 3.0),
+        hflip=0.5,
+        vflip=0.5,
+        auto_augment=auto_augment,
+        hsv_h=0.015,
+        hsv_s=0.4,
+        hsv_v=0.4,
+        force_color_jitter=force_color_jitter,
+        erasing=erasing,
+    )
+
+    transformed_image = transform(Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGR2RGB)))
+
+    assert transformed_image.shape == (3, 224, 224)
+    assert torch.is_tensor(transformed_image)
+    assert transformed_image.dtype == torch.float32
+
+
+@pytest.mark.slow
+@pytest.mark.skipif(not ONLINE, reason="environment is offline")
+def test_model_tune():
+    """Tune YOLO model for performance improvement."""
+    YOLO("yolo11n-pose.pt").tune(data="coco8-pose.yaml", plots=False, imgsz=32, epochs=1, iterations=2, device="cpu")
+    YOLO("yolo11n-cls.pt").tune(data="imagenet10", plots=False, imgsz=32, epochs=1, iterations=2, device="cpu")
+
+
+def test_model_embeddings():
+    """Test YOLO model embeddings."""
+    model_detect = YOLO(MODEL)
+    model_segment = YOLO(WEIGHTS_DIR / "yolo11n-seg.pt")
+
+    for batch in [SOURCE], [SOURCE, SOURCE]:  # test batch size 1 and 2
+        assert len(model_detect.embed(source=batch, imgsz=32)) == len(batch)
+        assert len(model_segment.embed(source=batch, imgsz=32)) == len(batch)
+
+
+@pytest.mark.skipif(checks.IS_PYTHON_3_12, reason="YOLOWorld with CLIP is not supported in Python 3.12")
+def test_yolo_world():
+    """Tests YOLO world models with CLIP support, including detection and training scenarios."""
+    model = YOLO(WEIGHTS_DIR / "yolov8s-world.pt")  # no YOLO11n-world model yet
+    model.set_classes(["tree", "window"])
+    model(SOURCE, conf=0.01)
+
+    model = YOLO(WEIGHTS_DIR / "yolov8s-worldv2.pt")  # no YOLO11n-world model yet
+    # Training from a pretrained model. Eval is included at the final stage of training.
+    # Use dota8.yaml which has fewer categories to reduce the inference time of CLIP model
+    model.train(
+        data="dota8.yaml",
+        epochs=1,
+        imgsz=32,
+        cache="disk",
+        close_mosaic=1,
+    )
+
+    # test WorWorldTrainerFromScratch
+    from ultralytics.models.yolo.world.train_world import WorldTrainerFromScratch
+
+    model = YOLO("yolov8s-worldv2.yaml")  # no YOLO11n-world model yet
+    model.train(
+        data={"train": {"yolo_data": ["dota8.yaml"]}, "val": {"yolo_data": ["dota8.yaml"]}},
+        epochs=1,
+        imgsz=32,
+        cache="disk",
+        close_mosaic=1,
+        trainer=WorldTrainerFromScratch,
+    )
+
+
+def test_yolov10():
+    """Test YOLOv10 model training, validation, and prediction steps with minimal configurations."""
+    model = YOLO("yolov10n.yaml")
+    # train/val/predict
+    model.train(data="coco8.yaml", epochs=1, imgsz=32, close_mosaic=1, cache="disk")
+    model.val(data="coco8.yaml", imgsz=32)
+    model.predict(imgsz=32, save_txt=True, save_crop=True, augment=True)
+    model(SOURCE)

+ 94 - 0
tests/test_solutions.py

@@ -0,0 +1,94 @@
+# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
+
+import cv2
+import pytest
+
+from tests import TMP
+from ultralytics import YOLO, solutions
+from ultralytics.utils import ASSETS_URL, WEIGHTS_DIR
+from ultralytics.utils.downloads import safe_download
+
+DEMO_VIDEO = "solutions_ci_demo.mp4"
+POSE_VIDEO = "solution_ci_pose_demo.mp4"
+
+
+@pytest.mark.slow
+def test_major_solutions():
+    """Test the object counting, heatmap, speed estimation, trackzone and queue management solution."""
+    safe_download(url=f"{ASSETS_URL}/{DEMO_VIDEO}", dir=TMP)
+    cap = cv2.VideoCapture(str(TMP / DEMO_VIDEO))
+    assert cap.isOpened(), "Error reading video file"
+    region_points = [(20, 400), (1080, 400), (1080, 360), (20, 360)]
+    counter = solutions.ObjectCounter(region=region_points, model="yolo11n.pt", show=False)  # Test object counter
+    heatmap = solutions.Heatmap(colormap=cv2.COLORMAP_PARULA, model="yolo11n.pt", show=False)  # Test heatmaps
+    heatmap_count = solutions.Heatmap(
+        colormap=cv2.COLORMAP_PARULA, model="yolo11n.pt", show=False, region=region_points
+    )  # Test heatmaps with object counting
+    speed = solutions.SpeedEstimator(region=region_points, model="yolo11n.pt", show=False)  # Test queue manager
+    queue = solutions.QueueManager(region=region_points, model="yolo11n.pt", show=False)  # Test speed estimation
+    line_analytics = solutions.Analytics(analytics_type="line", model="yolo11n.pt", show=False)  # line analytics
+    pie_analytics = solutions.Analytics(analytics_type="pie", model="yolo11n.pt", show=False)  # line analytics
+    bar_analytics = solutions.Analytics(analytics_type="bar", model="yolo11n.pt", show=False)  # line analytics
+    area_analytics = solutions.Analytics(analytics_type="area", model="yolo11n.pt", show=False)  # line analytics
+    trackzone = solutions.TrackZone(region=region_points, model="yolo11n.pt", show=False)  # Test trackzone
+    frame_count = 0  # Required for analytics
+    while cap.isOpened():
+        success, im0 = cap.read()
+        if not success:
+            break
+        frame_count += 1
+        original_im0 = im0.copy()
+        _ = counter.count(original_im0.copy())
+        _ = heatmap.generate_heatmap(original_im0.copy())
+        _ = heatmap_count.generate_heatmap(original_im0.copy())
+        _ = speed.estimate_speed(original_im0.copy())
+        _ = queue.process_queue(original_im0.copy())
+        _ = line_analytics.process_data(original_im0.copy(), frame_count)
+        _ = pie_analytics.process_data(original_im0.copy(), frame_count)
+        _ = bar_analytics.process_data(original_im0.copy(), frame_count)
+        _ = area_analytics.process_data(original_im0.copy(), frame_count)
+        _ = trackzone.trackzone(original_im0.copy())
+    cap.release()
+
+    # Test workouts monitoring
+    safe_download(url=f"{ASSETS_URL}/{POSE_VIDEO}", dir=TMP)
+    cap = cv2.VideoCapture(str(TMP / POSE_VIDEO))
+    assert cap.isOpened(), "Error reading video file"
+    gym = solutions.AIGym(kpts=[5, 11, 13], show=False)
+    while cap.isOpened():
+        success, im0 = cap.read()
+        if not success:
+            break
+        _ = gym.monitor(im0)
+    cap.release()
+
+
+@pytest.mark.slow
+def test_instance_segmentation():
+    """Test the instance segmentation solution."""
+    from ultralytics.utils.plotting import Annotator, colors
+
+    model = YOLO(WEIGHTS_DIR / "yolo11n-seg.pt")
+    names = model.names
+    cap = cv2.VideoCapture(TMP / DEMO_VIDEO)
+    assert cap.isOpened(), "Error reading video file"
+    while cap.isOpened():
+        success, im0 = cap.read()
+        if not success:
+            break
+        results = model.predict(im0)
+        annotator = Annotator(im0, line_width=2)
+        if results[0].masks is not None:
+            clss = results[0].boxes.cls.cpu().tolist()
+            masks = results[0].masks.xy
+            for mask, cls in zip(masks, clss):
+                color = colors(int(cls), True)
+                annotator.seg_bbox(mask=mask, mask_color=color, label=names[int(cls)])
+    cap.release()
+    cv2.destroyAllWindows()
+
+
+@pytest.mark.slow
+def test_streamlit_predict():
+    """Test streamlit predict live inference solution."""
+    solutions.Inference().inference()