increase acc (#2039)
* increase acc * try 0.45 * @pytest * @pytest * try .50 * duration * pytest
This commit is contained in:
parent
b4eb6ef5a1
commit
c438d0dd90
|
@ -21,7 +21,7 @@ references:
|
|||
name: Testing
|
||||
command: |
|
||||
python --version ; pip --version ; pip list
|
||||
py.test pytorch_lightning tests -v --doctest-modules --junitxml=test-reports/pytest_junit.xml
|
||||
py.test pytorch_lightning tests -v --junitxml=test-reports/pytest_junit.xml
|
||||
no_output_timeout: 15m
|
||||
|
||||
examples: &examples
|
||||
|
@ -30,7 +30,7 @@ references:
|
|||
command: |
|
||||
pip install -r ./pl_examples/requirements.txt --user
|
||||
python --version ; pip --version ; pip list
|
||||
py.test pl_examples -v --doctest-modules --junitxml=test-reports/pytest_junit.xml
|
||||
py.test pl_examples -v --junitxml=test-reports/pytest_junit.xml
|
||||
no_output_timeout: 20m
|
||||
|
||||
install_pkg: &install_pkg
|
||||
|
|
|
@ -38,7 +38,7 @@ steps:
|
|||
#- pip install -r ./docs/requirements.txt --user -q
|
||||
- pip list
|
||||
- python -c "import torch ; print(' & '.join([torch.cuda.get_device_name(i) for i in range(torch.cuda.device_count())]) if torch.cuda.is_available() else 'only CPU')"
|
||||
- coverage run --source pytorch_lightning -m py.test pytorch_lightning tests benchmarks -v --doctest-modules # --flake8
|
||||
- coverage run --source pytorch_lightning -m py.test pytorch_lightning tests benchmarks -v # --flake8
|
||||
#- cd docs; make doctest; make coverage
|
||||
- coverage report
|
||||
- codecov --token $CODECOV_TOKEN # --pr $DRONE_PULL_REQUEST --build $DRONE_BUILD_NUMBER --branch $DRONE_BRANCH --commit $DRONE_COMMIT --tag $DRONE_TAG
|
||||
|
|
|
@ -112,7 +112,7 @@ jobs:
|
|||
run: |
|
||||
# tox --sitepackages
|
||||
# flake8 .
|
||||
coverage run --source pytorch_lightning -m py.test pytorch_lightning tests -v --doctest-modules --junitxml=junit/test-results-${{ runner.os }}-${{ matrix.python-version }}-${{ matrix.requires }}.xml
|
||||
coverage run --source pytorch_lightning -m py.test pytorch_lightning tests -v --junitxml=junit/test-results-${{ runner.os }}-${{ matrix.python-version }}-${{ matrix.requires }}.xml
|
||||
coverage report
|
||||
|
||||
- name: Upload pytest test results
|
||||
|
|
|
@ -12,7 +12,7 @@ rm -rf ./tests/cometruns*
|
|||
rm -rf ./tests/wandb*
|
||||
rm -rf ./tests/tests/*
|
||||
rm -rf ./lightning_logs
|
||||
python -m coverage run --source pytorch_lightning -m py.test pytorch_lightning tests pl_examples -v --doctest-modules --flake8 --durations=0
|
||||
python -m coverage run --source pytorch_lightning -m py.test pytorch_lightning tests pl_examples -v --flake8
|
||||
python -m coverage report -m
|
||||
|
||||
# specific file
|
||||
|
|
|
@ -6,12 +6,16 @@ norecursedirs =
|
|||
python_files =
|
||||
test_*.py
|
||||
# doctest_plus = disabled
|
||||
addopts = --strict
|
||||
addopts =
|
||||
--strict
|
||||
--doctest-modules
|
||||
--durations=0
|
||||
markers =
|
||||
slow
|
||||
remote_data
|
||||
filterwarnings
|
||||
gpus_param_tests
|
||||
junit_duration_report = call
|
||||
|
||||
[coverage:report]
|
||||
exclude_lines =
|
||||
|
|
|
@ -40,7 +40,7 @@ Make sure to run coverage on a GPU machine with at least 2 GPUs and NVIDIA apex
|
|||
cd pytorch-lightning
|
||||
|
||||
# generate coverage (coverage is also installed as part of dev dependencies under tests/requirements-devel.txt)
|
||||
coverage run --source pytorch_lightning -m py.test pytorch_lightning tests examples -v --doctest-modules
|
||||
coverage run --source pytorch_lightning -m py.test pytorch_lightning tests examples -v
|
||||
|
||||
# print coverage stats
|
||||
coverage report -m
|
||||
|
|
|
@ -25,7 +25,7 @@ def assert_speed_parity(pl_times, pt_times, num_epochs):
|
|||
f"lightning was slower than PT (threshold {max_diff_per_epoch})"
|
||||
|
||||
|
||||
def run_model_test_without_loggers(trainer_options, model, min_acc=0.30):
|
||||
def run_model_test_without_loggers(trainer_options, model, min_acc=0.50):
|
||||
reset_seed()
|
||||
|
||||
# fit model
|
||||
|
@ -155,7 +155,7 @@ def load_model_from_checkpoint(root_weights_dir, module_class=EvalModelTemplate)
|
|||
return trained_model
|
||||
|
||||
|
||||
def run_prediction(dataloader, trained_model, dp=False, min_acc=0.3):
|
||||
def run_prediction(dataloader, trained_model, dp=False, min_acc=0.50):
|
||||
# run prediction on 1 batch
|
||||
for batch in dataloader:
|
||||
break
|
||||
|
|
|
@ -248,9 +248,8 @@ def test_mixing_of_dataloader_options(tmpdir):
|
|||
f'`test_dataloaders` not initiated properly, got {trainer.test_dataloaders}'
|
||||
|
||||
|
||||
@pytest.mark.skip('TODO: speed up this test')
|
||||
def test_train_inf_dataloader_error(tmpdir):
|
||||
pytest.skip('TODO: fix speed of this test')
|
||||
|
||||
"""Test inf train data loader (e.g. IterableDataset)"""
|
||||
model = EvalModelTemplate()
|
||||
model.train_dataloader = model.train_dataloader__infinite
|
||||
|
@ -261,9 +260,8 @@ def test_train_inf_dataloader_error(tmpdir):
|
|||
trainer.fit(model)
|
||||
|
||||
|
||||
@pytest.mark.skip('TODO: speed up this test')
|
||||
def test_val_inf_dataloader_error(tmpdir):
|
||||
pytest.skip('TODO: fix speed of this test')
|
||||
|
||||
"""Test inf train data loader (e.g. IterableDataset)"""
|
||||
model = EvalModelTemplate()
|
||||
model.val_dataloader = model.val_dataloader__infinite
|
||||
|
@ -274,9 +272,8 @@ def test_val_inf_dataloader_error(tmpdir):
|
|||
trainer.fit(model)
|
||||
|
||||
|
||||
@pytest.mark.skip('TODO: speed up this test')
|
||||
def test_test_inf_dataloader_error(tmpdir):
|
||||
pytest.skip('TODO: fix speed of this test')
|
||||
|
||||
"""Test inf train data loader (e.g. IterableDataset)"""
|
||||
model = EvalModelTemplate()
|
||||
model.test_dataloader = model.test_dataloader__infinite
|
||||
|
@ -288,9 +285,8 @@ def test_test_inf_dataloader_error(tmpdir):
|
|||
|
||||
|
||||
@pytest.mark.parametrize('check_interval', [50, 1.0])
|
||||
@pytest.mark.skip('TODO: speed up this test')
|
||||
def test_inf_train_dataloader(tmpdir, check_interval):
|
||||
pytest.skip('TODO: fix speed of this test')
|
||||
|
||||
"""Test inf train data loader (e.g. IterableDataset)"""
|
||||
|
||||
model = EvalModelTemplate()
|
||||
|
@ -307,9 +303,8 @@ def test_inf_train_dataloader(tmpdir, check_interval):
|
|||
|
||||
|
||||
@pytest.mark.parametrize('check_interval', [1.0])
|
||||
@pytest.mark.skip('TODO: speed up this test')
|
||||
def test_inf_val_dataloader(tmpdir, check_interval):
|
||||
pytest.skip('TODO: fix speed of this test')
|
||||
|
||||
"""Test inf val data loader (e.g. IterableDataset)"""
|
||||
|
||||
model = EvalModelTemplate()
|
||||
|
|
|
@ -134,9 +134,8 @@ def test_call_to_trainer_method(tmpdir):
|
|||
'Learning rate was not altered after running learning rate finder'
|
||||
|
||||
|
||||
@pytest.mark.skip('TODO: speed up this test')
|
||||
def test_accumulation_and_early_stopping(tmpdir):
|
||||
pytest.skip('TODO: speed up this test')
|
||||
|
||||
""" Test that early stopping of learning rate finder works, and that
|
||||
accumulation also works for this feature """
|
||||
|
||||
|
|
Loading…
Reference in New Issue