From 50893251a60502a011f8e9ebc772908261556118 Mon Sep 17 00:00:00 2001 From: Jianjie Liu Date: Thu, 26 Aug 2021 22:02:14 +0000 Subject: [PATCH 01/22] Run tests with appropriate dependency subset --- .../workflows/actions/run-tests/action.yml | 16 +++++++--- tests/README.md | 5 +++ tox.ini | 32 +++++++++++++++---- 3 files changed, 41 insertions(+), 12 deletions(-) diff --git a/.github/workflows/actions/run-tests/action.yml b/.github/workflows/actions/run-tests/action.yml index c32f093d71..63c2398a49 100644 --- a/.github/workflows/actions/run-tests/action.yml +++ b/.github/workflows/actions/run-tests/action.yml @@ -7,21 +7,27 @@ # - uses: actions/checkout@v2 # - uses: ./.github/workflows/actions/run-tests # with: +# tox-env: 'spark' # test-kind: 'unit' # test-marker: 'spark and notebooks' name: 'Run Python tests' description: 'Specify parameters to configure test subsets to run and collect test report for.' inputs: + tox-env: + description: "Name of the tox env. EX) cpu|gpu|spark + See tox.ini at root level for more info" + required: true + # Options are "cpu|gpu|spark" and any default tox env. + # For more info on default tox env, see https://tox.readthedocs.io/en/latest/config.html#tox-environments + default: 'py' test-kind: - description: - "The kinds of tests to run. EX) unit|integration|smoke + description: "The kinds of tests to run. EX) unit|integration|smoke This maps to those in the 'tests/' folder" required: true default: 'unit' test-marker: - description: - "Finer filter for selecting the tests to run with pytest markers. + description: "Finer filter for selecting the tests to run with pytest markers. See https://docs.pytest.org/en/6.2.x/example/markers.html" default: 'not gpu and not notebooks and not spark' outputs: @@ -49,7 +55,7 @@ runs: # 3. run all of the specified test environment (i.e. run tests in different py-versions, etc) # 4. show test reports run: | - tox -e py -- tests/${{ inputs.test-kind }} -m '${{ inputs.test-marker }}' + tox -e ${{ inputs.tox-env }} -- tests/${{ inputs.test-kind }} -m '${{ inputs.test-marker }}' - name: Prepare Code Coverage Report id: rename-cov-report diff --git a/tests/README.md b/tests/README.md index e69c7a56b5..70f350708d 100644 --- a/tests/README.md +++ b/tests/README.md @@ -185,6 +185,11 @@ For executing this test, first make sure you are in the correct environment as d pytest tests/unit/test_notebooks_python.py::test_sar_single_node_runs ``` +## Test execution with tox + +......... + + ### Developing smoke and integration tests with Papermill A more advanced option is used in the smoke and integration tests, where we not only execute the notebook, but inject parameters and recover the computed metrics. diff --git a/tox.ini b/tox.ini index 4fd2246c20..ba06aa07bc 100644 --- a/tox.ini +++ b/tox.ini @@ -2,13 +2,11 @@ # py will use whatever the basepython `python` maps to from PATH # you can use py38, for example, to chosse a different version # See https://tox.readthedocs.io/en/latest/config.html#tox-environments -envlist = py +envlist = py, cpu, gpu, spark, all +# Default env settings [testenv] -# Reading additional dependencies to run the test -# https://tox.readthedocs.io/en/latest/example/basic.html#depending-on-requirements-txt-or-defining-constraints -; deps = -rrequirements-dev.txt # similar to 'pip install recommenders-*.whl[test]' extras = all commands = @@ -20,6 +18,29 @@ commands = # See https://tox.readthedocs.io/en/latest/example/general.html for more details pytest {posargs} +[testenv:cpu] +# i.e: 'pip install recommenders-*.whl[dev]' +# with this dependency subset, we should be able to run the test markers: +# 1. "not notebooks and not spark and not gpu" (tests for general sdk utilities) +# 2. "notebooks and not spark and not gpu" (tests for notebook example without extra dependencies) +extras = dev,examples + +[testenv:gpu] +# with this dependency subset, we should be able to run the test markers: +# 1. "gpu and not notebook and not spark" (tests for gpu utilities) +# 2. "gpu and notebooks and not spark" (tests for notebooks needing gpu resources) +extras = dev,gpu,examples + +[testenv:spark] +# with this dependency subset, we should be able to run the test markers: +# 1. "spark and not notebook and not spark" (test for spark utilities) +# 2. "spark and notebooks and not spark" (tests for notebook using spark) +extras = dev,spark,examples + +[testenv:all] +# i.e: 'pip install recommenders-*.whl[all]' +# with this, we should be able to run ANY tests +extras = all [testenv:flake8] deps = flake8 @@ -55,9 +76,6 @@ addopts = [flake8] -; # Configs for flake8-import-order, see https://pypi.org/project/flake8-import-order/ for more info. -; import-order-style=edited -; application-import-names=recommenders, tests # Native flake8 configs max-line-length = 140 exclude = From 312c8ad0ed5b8894f9a010c222876d89813f1aa3 Mon Sep 17 00:00:00 2001 From: Jianjie Liu Date: Fri, 27 Aug 2021 19:10:44 +0000 Subject: [PATCH 02/22] Update dc on test execution via tox --- tests/README.md | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/tests/README.md b/tests/README.md index 70f350708d..22893f619b 100644 --- a/tests/README.md +++ b/tests/README.md @@ -187,7 +187,24 @@ pytest tests/unit/test_notebooks_python.py::test_sar_single_node_runs ## Test execution with tox -......... +Tox is a great virtual environment management tool and test command tools that acts like a front-end for our CI workflows. Our existing CI pipelines in GitHub is leveraging tox to orchestrate the build. This way we can provide a parity in the local and remote execution environment if both run tox. No more panic when "tests run fine in my dev box but fail in remote build pipeline"! + +1. If you haven't, `pip install tox` +2. To run static analysis: `tox -e flake8` +3. To run any of our test suites: + `tox -e {TOX_ENV} -- {PYTEST_PARAM}` + + where + - `TOX_ENV` can be `cpu|gpu|spark|all`, each env maps to the "extra" dependency for `recommenders`, for example recommend[gpu], and recommend[spark]. + - `PYTEST_PARAM` are any standard parameters to supply to `pytest` cli executing particular tests. + + For example: + + 1. `tox -e cpu -- tests/unit -m "not notebook and not spark and not gpu` (runs the unit tests without extra dependency) + 2. `tox -e gpu -- tests/unit -m "gpu and notebook"` (runs the gpu notebook tests) + 3. `tox -e spark -- tests/unit -m "spark and notebook"` (runs the spark notebook tests) + 4. `tox -e all -- tests/unit` (to run all of the unit tests) + ### Developing smoke and integration tests with Papermill From 4b4726f964c2fab808c12b2d433f88c22fa69837 Mon Sep 17 00:00:00 2001 From: Jianjie Liu Date: Wed, 1 Sep 2021 19:25:58 +0000 Subject: [PATCH 03/22] Add env wild card --- .gitignore | 4 ++-- tests/README.md | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.gitignore b/.gitignore index 96c1146f0c..a163274a80 100644 --- a/.gitignore +++ b/.gitignore @@ -87,8 +87,8 @@ celerybeat-schedule *.sage.py # Environments -.env -.venv +.env* +.venv* env/ venv/ ENV/ diff --git a/tests/README.md b/tests/README.md index 22893f619b..7ded1778ed 100644 --- a/tests/README.md +++ b/tests/README.md @@ -185,7 +185,7 @@ For executing this test, first make sure you are in the correct environment as d pytest tests/unit/test_notebooks_python.py::test_sar_single_node_runs ``` -## Test execution with tox +### Test execution with tox Tox is a great virtual environment management tool and test command tools that acts like a front-end for our CI workflows. Our existing CI pipelines in GitHub is leveraging tox to orchestrate the build. This way we can provide a parity in the local and remote execution environment if both run tox. No more panic when "tests run fine in my dev box but fail in remote build pipeline"! From 07b9399bd7c90e2e27f953dbf7960d11578a3b0d Mon Sep 17 00:00:00 2001 From: Jianjie Liu Date: Wed, 1 Sep 2021 20:42:27 +0000 Subject: [PATCH 04/22] Update test-running documentation --- tests/README.md | 5 +++-- tox.ini | 4 ++++ 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/tests/README.md b/tests/README.md index 7ded1778ed..2014574e26 100644 --- a/tests/README.md +++ b/tests/README.md @@ -195,7 +195,7 @@ Tox is a great virtual environment management tool and test command tools that a `tox -e {TOX_ENV} -- {PYTEST_PARAM}` where - - `TOX_ENV` can be `cpu|gpu|spark|all`, each env maps to the "extra" dependency for `recommenders`, for example recommend[gpu], and recommend[spark]. + - `TOX_ENV` can be `cpu|gpu|spark|all`, each env maps to the "extra" dependency for `recommenders`, for example recommend[gpu], and recommend[spark]. It can also be any of the [default envs](https://tox.readthedocs.io/en/latest/config.html#tox-environments): "py|pyNM" - `PYTEST_PARAM` are any standard parameters to supply to `pytest` cli executing particular tests. For example: @@ -204,7 +204,8 @@ Tox is a great virtual environment management tool and test command tools that a 2. `tox -e gpu -- tests/unit -m "gpu and notebook"` (runs the gpu notebook tests) 3. `tox -e spark -- tests/unit -m "spark and notebook"` (runs the spark notebook tests) 4. `tox -e all -- tests/unit` (to run all of the unit tests) - + 5. `tox -e py -- test/unit` (runs the unit tests under the default python interpreter) + 6. `tox -e py37 -- test/unit` (runs the unit tests under Python3.7) ### Developing smoke and integration tests with Papermill diff --git a/tox.ini b/tox.ini index ba06aa07bc..815e06dc14 100644 --- a/tox.ini +++ b/tox.ini @@ -75,6 +75,10 @@ addopts = --cov-append --cov=recommenders --cov-report=term-missing --cov-report=xml --junitxml=junit/test-results.xml +[coverage:report] +skip_empty = true + + [flake8] # Native flake8 configs max-line-length = 140 From 6c892600f38ddf1f239c7cdcad446e85be0e0856 Mon Sep 17 00:00:00 2001 From: Jianjie Liu Date: Wed, 1 Sep 2021 20:53:00 +0000 Subject: [PATCH 05/22] Parametrize with tox-env --- .github/workflows/actions/run-tests/action.yml | 12 ++++++------ .github/workflows/nightly.yml | 3 +++ .github/workflows/pr-gate.yml | 3 +++ tests/README.md | 2 +- 4 files changed, 13 insertions(+), 7 deletions(-) diff --git a/.github/workflows/actions/run-tests/action.yml b/.github/workflows/actions/run-tests/action.yml index 63c2398a49..86b791fd99 100644 --- a/.github/workflows/actions/run-tests/action.yml +++ b/.github/workflows/actions/run-tests/action.yml @@ -18,9 +18,9 @@ inputs: description: "Name of the tox env. EX) cpu|gpu|spark See tox.ini at root level for more info" required: true - # Options are "cpu|gpu|spark" and any default tox env. + # Options are "cpu|gpu|spark|all" and any default tox env. # For more info on default tox env, see https://tox.readthedocs.io/en/latest/config.html#tox-environments - default: 'py' + default: 'all' test-kind: description: "The kinds of tests to run. EX) unit|integration|smoke This maps to those in the 'tests/' folder" @@ -50,12 +50,12 @@ runs: # '-e py' will use the default 'python' executable found in system path # for why using tox, see: https://tox.readthedocs.io/en/latest/index.html # tox will do: - # 1. build and install source distribution (sdist) - # 2. run static analysis on the code (not implemented yet) - # 3. run all of the specified test environment (i.e. run tests in different py-versions, etc) + # 1. create a virtual env + # 2. build and install source distribution (sdist) + # 3. run the specified tests # 4. show test reports run: | - tox -e ${{ inputs.tox-env }} -- tests/${{ inputs.test-kind }} -m '${{ inputs.test-marker }}' + tox -ve ${{ inputs.tox-env }} -- tests/${{ inputs.test-kind }} -m '${{ inputs.test-marker }}' - name: Prepare Code Coverage Report id: rename-cov-report diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml index 15ef7ba159..fe9cc4fb10 100644 --- a/.github/workflows/nightly.yml +++ b/.github/workflows/nightly.yml @@ -114,6 +114,7 @@ jobs: - name: Run ${{ matrix.test-kind }} tests ('${{ matrix.test-marker }}') uses: ./.github/workflows/actions/run-tests with: + tox-env: 'cpu' # run the gpu tests with the 'recommenders[dev]' dependencies test-kind: ${{ matrix.test-kind }} test-marker: ${{ matrix.test-marker }} # Currently GitHub workflow cannot call an action from another action @@ -167,6 +168,7 @@ jobs: - name: Run ${{ matrix.test-kind }} tests ('${{ matrix.test-marker }}') uses: ./.github/workflows/actions/run-tests with: + tox-env: 'spark' # run the gpu tests with the 'recommenders[spark,examples,dev]' dependencies test-kind: ${{ matrix.test-kind }} test-marker: ${{ matrix.test-marker }} @@ -202,6 +204,7 @@ jobs: - name: Run ${{ matrix.test-kind }} tests ('${{ matrix.test-marker }}') uses: ./.github/workflows/actions/run-tests with: + tox-env: 'gpu' # run the gpu tests with the 'recommenders[gpu,examples,dev]' dependencies test-kind: ${{ matrix.test-kind }} test-marker: ${{ matrix.test-marker }} diff --git a/.github/workflows/pr-gate.yml b/.github/workflows/pr-gate.yml index 5f0320ba7a..423844933d 100644 --- a/.github/workflows/pr-gate.yml +++ b/.github/workflows/pr-gate.yml @@ -93,6 +93,7 @@ jobs: - name: Run ${{ matrix.test-kind }} tests ('${{ matrix.test-marker }}') uses: ./.github/workflows/actions/run-tests with: + tox-env: 'cpu' # run the cpu tests with the 'recommenders[dev]' dependencies test-kind: ${{ matrix.test-kind }} test-marker: ${{ matrix.test-marker }} # Currently GitHub workflow cannot call an action from another action @@ -138,6 +139,7 @@ jobs: - name: Run ${{ matrix.test-kind }} tests ('${{ matrix.test-marker }}') uses: ./.github/workflows/actions/run-tests with: + tox-env: 'spark' # run the spark tests with the 'recommenders[spark,examples,dev]' dependencies test-kind: ${{ matrix.test-kind }} test-marker: ${{ matrix.test-marker }} @@ -173,6 +175,7 @@ jobs: - name: Run ${{ matrix.test-kind }} tests ('${{ matrix.test-marker }}') uses: ./.github/workflows/actions/run-tests with: + tox-env: 'gpu' # run the gpu tests with the 'recommenders[gpu,examples,dev]' dependencies test-kind: ${{ matrix.test-kind }} test-marker: ${{ matrix.test-marker }} diff --git a/tests/README.md b/tests/README.md index 2014574e26..4b8d6bfca2 100644 --- a/tests/README.md +++ b/tests/README.md @@ -195,7 +195,7 @@ Tox is a great virtual environment management tool and test command tools that a `tox -e {TOX_ENV} -- {PYTEST_PARAM}` where - - `TOX_ENV` can be `cpu|gpu|spark|all`, each env maps to the "extra" dependency for `recommenders`, for example recommend[gpu], and recommend[spark]. It can also be any of the [default envs](https://tox.readthedocs.io/en/latest/config.html#tox-environments): "py|pyNM" + - `TOX_ENV` can be `cpu|gpu|spark|all`, each env maps to the "extra" dependency, for example recommenders[gpu], and recommenders[spark]. It can also be any of the [default envs](https://tox.readthedocs.io/en/latest/config.html#tox-environments): "py|pyNM" - `PYTEST_PARAM` are any standard parameters to supply to `pytest` cli executing particular tests. For example: From 318885afd8a9f23afe4f48b50f06b59b78f7c489 Mon Sep 17 00:00:00 2001 From: Jianjie Liu Date: Wed, 1 Sep 2021 21:58:09 +0000 Subject: [PATCH 06/22] Correct typos --- .github/workflows/nightly.yml | 2 +- .github/workflows/pr-gate.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml index fe9cc4fb10..7f87ab1220 100644 --- a/.github/workflows/nightly.yml +++ b/.github/workflows/nightly.yml @@ -114,7 +114,7 @@ jobs: - name: Run ${{ matrix.test-kind }} tests ('${{ matrix.test-marker }}') uses: ./.github/workflows/actions/run-tests with: - tox-env: 'cpu' # run the gpu tests with the 'recommenders[dev]' dependencies + tox-env: 'cpu' # run the cpu tests with the 'recommenders[dev,examples]' dependencies test-kind: ${{ matrix.test-kind }} test-marker: ${{ matrix.test-marker }} # Currently GitHub workflow cannot call an action from another action diff --git a/.github/workflows/pr-gate.yml b/.github/workflows/pr-gate.yml index 423844933d..16ea4111e7 100644 --- a/.github/workflows/pr-gate.yml +++ b/.github/workflows/pr-gate.yml @@ -93,7 +93,7 @@ jobs: - name: Run ${{ matrix.test-kind }} tests ('${{ matrix.test-marker }}') uses: ./.github/workflows/actions/run-tests with: - tox-env: 'cpu' # run the cpu tests with the 'recommenders[dev]' dependencies + tox-env: 'cpu' # run the cpu tests with the 'recommenders[dev,examples]' dependencies test-kind: ${{ matrix.test-kind }} test-marker: ${{ matrix.test-marker }} # Currently GitHub workflow cannot call an action from another action From 1e652115bc8e76b3c5ec1c7c33e5bd935376ff8f Mon Sep 17 00:00:00 2001 From: Jianjie Liu Date: Thu, 2 Sep 2021 15:30:00 +0000 Subject: [PATCH 07/22] Update dev documentation --- tests/README.md | 45 ++++++++++++++++++++++----------------------- 1 file changed, 22 insertions(+), 23 deletions(-) diff --git a/tests/README.md b/tests/README.md index 4b8d6bfca2..a16a79d61f 100644 --- a/tests/README.md +++ b/tests/README.md @@ -157,6 +157,28 @@ Example: +### Test execution with tox + +[Tox](https://tox.readthedocs.io/en/latest/) is a great tool for both virtual environment management and test execution. Tox acts like a front-end for our CI workflows. Our existing [CI pipelines](https://github.com/microsoft/recommenders/actions) in GitHub is leveraging it to orchestrate the build. This way we can provide a parity in both local and remote execution environments if both run tox. Run tox and no more "tests run fine in my dev box but fail in the remote build"! + +1. If you haven't, `pip install tox` +2. To run static analysis: `tox -e flake8` +3. To run any of our test suites: + `tox -e {TOX_ENV} -- {PYTEST_PARAM}` + + where + - `TOX_ENV` can be `cpu|gpu|spark|all`, each env maps to the "extra" dependency, for example recommenders[gpu], and recommenders[spark]. It can also be any of the [default envs](https://tox.readthedocs.io/en/latest/config.html#tox-environments): "py|pyNM" + - `PYTEST_PARAM` are any standard parameters to supply to `pytest` cli executing particular tests. + + For example: + + 1. `tox -e cpu -- tests/unit -m "not notebook and not spark and not gpu` (runs the unit tests without extra dependency) + 2. `tox -e gpu -- tests/unit -m "gpu and notebook"` (runs the gpu notebook tests) + 3. `tox -e spark -- tests/unit -m "spark and notebook"` (runs the spark notebook tests) + 4. `tox -e all -- tests/unit` (to run all of the unit tests) + 5. `tox -e py -- test/unit` (runs the unit tests under the default python interpreter) + 6. `tox -e py37 -- test/unit` (runs the unit tests under Python3.7) + ## How to create tests on notebooks with Papermill In the notebooks of this repo, we use [Papermill](https://github.com/nteract/papermill) in unit, smoke and integration tests. Papermill is a tool that enables you to parameterize notebooks, execute and collect metrics across the notebooks, and summarize collections of notebooks. @@ -185,29 +207,6 @@ For executing this test, first make sure you are in the correct environment as d pytest tests/unit/test_notebooks_python.py::test_sar_single_node_runs ``` -### Test execution with tox - -Tox is a great virtual environment management tool and test command tools that acts like a front-end for our CI workflows. Our existing CI pipelines in GitHub is leveraging tox to orchestrate the build. This way we can provide a parity in the local and remote execution environment if both run tox. No more panic when "tests run fine in my dev box but fail in remote build pipeline"! - -1. If you haven't, `pip install tox` -2. To run static analysis: `tox -e flake8` -3. To run any of our test suites: - `tox -e {TOX_ENV} -- {PYTEST_PARAM}` - - where - - `TOX_ENV` can be `cpu|gpu|spark|all`, each env maps to the "extra" dependency, for example recommenders[gpu], and recommenders[spark]. It can also be any of the [default envs](https://tox.readthedocs.io/en/latest/config.html#tox-environments): "py|pyNM" - - `PYTEST_PARAM` are any standard parameters to supply to `pytest` cli executing particular tests. - - For example: - - 1. `tox -e cpu -- tests/unit -m "not notebook and not spark and not gpu` (runs the unit tests without extra dependency) - 2. `tox -e gpu -- tests/unit -m "gpu and notebook"` (runs the gpu notebook tests) - 3. `tox -e spark -- tests/unit -m "spark and notebook"` (runs the spark notebook tests) - 4. `tox -e all -- tests/unit` (to run all of the unit tests) - 5. `tox -e py -- test/unit` (runs the unit tests under the default python interpreter) - 6. `tox -e py37 -- test/unit` (runs the unit tests under Python3.7) - - ### Developing smoke and integration tests with Papermill A more advanced option is used in the smoke and integration tests, where we not only execute the notebook, but inject parameters and recover the computed metrics. From 983b92d2fa7b636a68a539bbdb196bb2d44c511b Mon Sep 17 00:00:00 2001 From: Jianjie Liu Date: Thu, 2 Sep 2021 17:55:16 +0000 Subject: [PATCH 08/22] Update test execution docs --- tests/README.md | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/tests/README.md b/tests/README.md index a16a79d61f..daa555beee 100644 --- a/tests/README.md +++ b/tests/README.md @@ -159,7 +159,7 @@ Example: ### Test execution with tox -[Tox](https://tox.readthedocs.io/en/latest/) is a great tool for both virtual environment management and test execution. Tox acts like a front-end for our CI workflows. Our existing [CI pipelines](https://github.com/microsoft/recommenders/actions) in GitHub is leveraging it to orchestrate the build. This way we can provide a parity in both local and remote execution environments if both run tox. Run tox and no more "tests run fine in my dev box but fail in the remote build"! +[Tox](https://tox.readthedocs.io/en/latest/) is a great tool for both virtual environment management and test execution. Tox acts like a front-end for our CI workflows. Our existing [CI pipelines](https://github.com/microsoft/recommenders/actions) in GitHub is leveraging it to orchestrate the build. This way we can provide a **parity** in both local and remote execution environments if both run tox. Run tox and no more **"tests run fine in my dev box but fail in the remote build"**! 1. If you haven't, `pip install tox` 2. To run static analysis: `tox -e flake8` @@ -167,17 +167,17 @@ Example: `tox -e {TOX_ENV} -- {PYTEST_PARAM}` where - - `TOX_ENV` can be `cpu|gpu|spark|all`, each env maps to the "extra" dependency, for example recommenders[gpu], and recommenders[spark]. It can also be any of the [default envs](https://tox.readthedocs.io/en/latest/config.html#tox-environments): "py|pyNM" - - `PYTEST_PARAM` are any standard parameters to supply to `pytest` cli executing particular tests. + - `TOX_ENV` can be `cpu|gpu|spark|all`, each env maps to the "extra" dependency, for example recommenders[gpu], and recommenders[spark]. It can also be any of the [default envs](https://tox.readthedocs.io/en/latest/config.html#tox-environments): `py|pyNM` + - `PYTEST_PARAM` are any standard parameters to supply to `pytest` cli. For example: - 1. `tox -e cpu -- tests/unit -m "not notebook and not spark and not gpu` (runs the unit tests without extra dependency) - 2. `tox -e gpu -- tests/unit -m "gpu and notebook"` (runs the gpu notebook tests) - 3. `tox -e spark -- tests/unit -m "spark and notebook"` (runs the spark notebook tests) - 4. `tox -e all -- tests/unit` (to run all of the unit tests) - 5. `tox -e py -- test/unit` (runs the unit tests under the default python interpreter) - 6. `tox -e py37 -- test/unit` (runs the unit tests under Python3.7) + 1. `tox -e cpu -- tests/unit -m "not notebook and not spark and not gpu` (runs the unit tests with `recommenders[dev,example]` dependencies) + 2. `tox -e gpu -- tests/unit -m "gpu and notebook"` (runs the gpu notebook tests with `recommenders[dev,example,gpu]` dependencies) + 3. `tox -e spark -- tests/unit -m "spark and notebook"` (runs the spark notebook tests with `recommenders[dev,example,spark]` dependencies) + 4. `tox -e all -- tests/unit` (to run all of the unit tests with `recommenders[all]` dependencies) + 5. `tox -e py -- test/unit` (runs the unit tests under the default python interpreter with `recommenders[all]`) + 6. `tox -e py37 -- test/unit` (runs the unit tests under Python3.7 with `recommenders[all]` ) ## How to create tests on notebooks with Papermill From 9b094e32550adbb326f40b02a7056e179a3e3da0 Mon Sep 17 00:00:00 2001 From: YanZhangADS Date: Fri, 10 Sep 2021 02:59:30 +0000 Subject: [PATCH 09/22] addition of item similarity measure - python version --- recommenders/evaluation/python_evaluation.py | 72 ++++++++- .../evaluation/test_python_evaluation.py | 145 ++++++++++++++++-- 2 files changed, 206 insertions(+), 11 deletions(-) diff --git a/recommenders/evaluation/python_evaluation.py b/recommenders/evaluation/python_evaluation.py index f639930bad..b47c58e962 100644 --- a/recommenders/evaluation/python_evaluation.py +++ b/recommenders/evaluation/python_evaluation.py @@ -20,6 +20,8 @@ DEFAULT_PREDICTION_COL, DEFAULT_RELEVANCE_COL, DEFAULT_SIMILARITY_COL, + DEFAULT_ITEM_FEATURES_COL, + DEFAULT_ITEM_SIM_MEASURE, DEFAULT_K, DEFAULT_THRESHOLD, ) @@ -703,6 +705,8 @@ def __init__( self, train_df, reco_df, + item_feature_df=None, + item_sim_measure=DEFAULT_ITEM_SIM_MEASURE, col_user=DEFAULT_USER_COL, col_item=DEFAULT_ITEM_COL, col_relevance=None, @@ -743,6 +747,8 @@ def __init__( Interaction here follows the *item choice model* from Castells et al. reco_df (pandas.DataFrame): Recommender's prediction output, containing col_user, col_item, col_relevance (optional). Assumed to not contain any duplicate user-item pairs. + item_feature_df (pandas.DataFrame): (Optional) It is required only when item_sim_measure='item_feature_vector'. It contains two columns: col_item and features (a feature vector). + item_sim_measure (str): (Optional) This column indicates which item similarity measure to be used. Available measures include item_cooccurrence_count (default choice) and item_feature_vector. col_user (str): User id column name. col_item (str): Item id column name. col_relevance (str): This column indicates whether the recommended item is actually @@ -762,6 +768,8 @@ def __init__( self.df_intralist_similarity = None self.df_user_diversity = None self.avg_diversity = None + self.item_feature_df = item_feature_df + self.item_sim_measure = item_sim_measure if col_relevance is None: self.col_relevance = DEFAULT_RELEVANCE_COL @@ -773,6 +781,26 @@ def __init__( self.reco_df = reco_df[[col_user, col_item, col_relevance]].astype( {col_relevance: np.float16} ) + if self.item_sim_measure == "item_feature_vector": + self.col_item_features = DEFAULT_ITEM_FEATURES_COL + required_columns = [self.col_item, self.col_item_features] + # print(str(required_columns)) + # print(str(item_feature_df.columns.values.tolist())) + if self.item_feature_df is not None: + + if str(required_columns) != str( + item_feature_df.columns.values.tolist() + ): + raise Exception( + "Incorrect column names! item_feature_df should have columns:" + + str(required_columns) + ) + else: + raise Exception( + "item_feature_df not specified! item_feature_df must be provided if choosing to use item_feature_vector to calculate item similarity. item_feature_df should have columns:" + + str(required_columns) + ) + # check if reco_df contains any user_item pairs that are already shown in train_df count_intersection = pd.merge( self.train_df, self.reco_df, how="inner", on=[self.col_user, self.col_item] @@ -799,7 +827,21 @@ def _get_pairwise_items(self, df): ].reset_index(drop=True) return df_pairwise_items - def _get_cosine_similarity(self, n_partitions=200): + def _get_cosine_similarity(self): + + if self.item_sim_measure == "item_cooccurrence_count": + # calculate item-item similarity based on item co-occurrence count + self._get_cooccurrence_similarity() + elif self.item_sim_measure == "item_feature_vector": + # calculate item-item similarity based on item feature vectors + self._get_item_feature_similarity() + else: + raise Exception( + "item_sim_measure not recognized! The available options include 'item_cooccurrence_count' and 'item_feature_vector'." + ) + return self.df_cosine_similarity + + def _get_cooccurrence_similarity(self): """Cosine similarity metric from :Citation: @@ -850,6 +892,34 @@ def _get_cosine_similarity(self, n_partitions=200): return self.df_cosine_similarity + def _get_item_feature_similarity(self): + """Cosine similarity metric based on item feature vectors + + The item indexes in the result are such that i1 <= i2. + """ + if self.df_cosine_similarity is None: + df1 = self.item_feature_df[[self.col_item, self.col_item_features]] + df1.columns = ["i1", "f1"] + df1["key"] = 0 + df2 = self.item_feature_df[[self.col_item, self.col_item_features]] + df2.columns = ["i2", "f2"] + df2["key"] = 0 + + df = pd.merge(df1, df2, on="key", how="outer").drop("key", axis=1) + df_item_feature_pair = df[(df["i1"] <= df["i2"])].reset_index(drop=True) + + df_item_feature_pair["sim"] = df_item_feature_pair.apply( + lambda x: float(x.f1.dot(x.f2)) + / float(np.linalg.norm(x.f1, 2) * np.linalg.norm(x.f2, 2)), + axis=1, + ) + + self.df_cosine_similarity = df_item_feature_pair[ + ["i1", "i2", "sim"] + ].sort_values(["i1", "i2"]) + + return self.df_cosine_similarity + # Diversity metrics def _get_intralist_similarity(self, df): """Intra-list similarity from diff --git a/tests/unit/recommenders/evaluation/test_python_evaluation.py b/tests/unit/recommenders/evaluation/test_python_evaluation.py index 8586131f15..650ec98ef7 100644 --- a/tests/unit/recommenders/evaluation/test_python_evaluation.py +++ b/tests/unit/recommenders/evaluation/test_python_evaluation.py @@ -392,6 +392,11 @@ def target_metrics(): "user_diversity": pd.DataFrame( dict(UserId=[1, 2, 3], user_diversity=[0.29289, 1.0, 0.0]) ), + # diversity values when using item features to calculate item similarity + "diversity_item_feature_vector": pytest.approx(0.5000, TOL), + "user_diversity_item_feature_vector": pd.DataFrame( + dict(UserId=[1, 2, 3], user_diversity=[0.5000, 0.5000, 0.5000]) + ), "user_item_serendipity": pd.DataFrame( dict( UserId=[1, 1, 2, 2, 3, 3], @@ -410,6 +415,25 @@ def target_metrics(): dict(UserId=[1, 2, 3], user_serendipity=[0.363915, 0.53455, 0.403775]) ), "serendipity": pytest.approx(0.43408, TOL), + # serendipity values when using item features to calculate item similarity + "user_item_serendipity_item_feature_vector": pd.DataFrame( + dict( + UserId=[1, 1, 2, 2, 3, 3], + ItemId=[3, 5, 2, 5, 1, 2], + user_item_serendipity=[ + 0.5000, + 0.0, + 0.75, + 0.5000, + 0.6667, + 0.0, + ], + ) + ), + "user_serendipity_item_feature_vector": pd.DataFrame( + dict(UserId=[1, 2, 3], user_serendipity=[0.2500, 0.625, 0.3333]) + ), + "serendipity_item_feature_vector": pytest.approx(0.4028, TOL), } @@ -426,11 +450,24 @@ def python_diversity_data(): "Relevance": [1, 0, 1, 1, 1, 0], } ) - return train_df, reco_df + + item_feature_df = pd.DataFrame( + { + "ItemId": [1, 2, 3, 4, 5], + "features": [ + np.array([0.0, 1.0, 1.0, 0.0, 0.0], dtype=float), + np.array([0.0, 1.0, 0.0, 1.0, 0.0], dtype=float), + np.array([0.0, 0.0, 1.0, 1.0, 0.0], dtype=float), + np.array([0.0, 0.0, 1.0, 0.0, 1.0], dtype=float), + np.array([0.0, 0.0, 0.0, 1.0, 1.0], dtype=float), + ], + } + ) + return train_df, reco_df, item_feature_df def test_catalog_coverage(python_diversity_data, target_metrics): - train_df, reco_df = python_diversity_data + train_df, reco_df, _ = python_diversity_data evaluator = PythonDiversityEvaluation( train_df=train_df, reco_df=reco_df, col_user="UserId", col_item="ItemId" ) @@ -439,7 +476,7 @@ def test_catalog_coverage(python_diversity_data, target_metrics): def test_distributional_coverage(python_diversity_data, target_metrics): - train_df, reco_df = python_diversity_data + train_df, reco_df, _ = python_diversity_data evaluator = PythonDiversityEvaluation( train_df=train_df, reco_df=reco_df, col_user="UserId", col_item="ItemId" ) @@ -448,7 +485,7 @@ def test_distributional_coverage(python_diversity_data, target_metrics): def test_item_novelty(python_diversity_data, target_metrics): - train_df, reco_df = python_diversity_data + train_df, reco_df, _ = python_diversity_data evaluator = PythonDiversityEvaluation( train_df=train_df, reco_df=reco_df, col_user="UserId", col_item="ItemId" ) @@ -467,7 +504,7 @@ def test_item_novelty(python_diversity_data, target_metrics): def test_novelty(python_diversity_data, target_metrics): - train_df, reco_df = python_diversity_data + train_df, reco_df, _ = python_diversity_data evaluator = PythonDiversityEvaluation( train_df=train_df, reco_df=reco_df, col_user="UserId", col_item="ItemId" ) @@ -484,7 +521,7 @@ def test_novelty(python_diversity_data, target_metrics): def test_user_diversity(python_diversity_data, target_metrics): - train_df, reco_df = python_diversity_data + train_df, reco_df, _ = python_diversity_data evaluator = PythonDiversityEvaluation( train_df=train_df, reco_df=reco_df, col_user="UserId", col_item="ItemId" ) @@ -498,7 +535,7 @@ def test_user_diversity(python_diversity_data, target_metrics): def test_diversity(python_diversity_data, target_metrics): - train_df, reco_df = python_diversity_data + train_df, reco_df, _ = python_diversity_data evaluator = PythonDiversityEvaluation( train_df=train_df, reco_df=reco_df, col_user="UserId", col_item="ItemId" ) @@ -506,7 +543,7 @@ def test_diversity(python_diversity_data, target_metrics): def test_user_item_serendipity(python_diversity_data, target_metrics): - train_df, reco_df = python_diversity_data + train_df, reco_df, _ = python_diversity_data evaluator = PythonDiversityEvaluation( train_df=train_df, reco_df=reco_df, @@ -524,7 +561,7 @@ def test_user_item_serendipity(python_diversity_data, target_metrics): def test_user_serendipity(python_diversity_data, target_metrics): - train_df, reco_df = python_diversity_data + train_df, reco_df, _ = python_diversity_data evaluator = PythonDiversityEvaluation( train_df=train_df, reco_df=reco_df, @@ -542,7 +579,7 @@ def test_user_serendipity(python_diversity_data, target_metrics): def test_serendipity(python_diversity_data, target_metrics): - train_df, reco_df = python_diversity_data + train_df, reco_df, _ = python_diversity_data evaluator = PythonDiversityEvaluation( train_df=train_df, reco_df=reco_df, @@ -551,3 +588,91 @@ def test_serendipity(python_diversity_data, target_metrics): col_relevance="Relevance", ) assert target_metrics["serendipity"] == evaluator.serendipity() + + +def test_user_diversity_item_feature_vector(python_diversity_data, target_metrics): + train_df, reco_df, item_feature_df = python_diversity_data + evaluator = PythonDiversityEvaluation( + train_df=train_df, + reco_df=reco_df, + item_feature_df=item_feature_df, + item_sim_measure="item_feature_vector", + col_user="UserId", + col_item="ItemId", + ) + actual = evaluator.user_diversity() + assert_frame_equal( + target_metrics["user_diversity_item_feature_vector"], + actual, + check_exact=False, + check_less_precise=4, + ) + + +def test_diversity_item_feature_vector(python_diversity_data, target_metrics): + train_df, reco_df, item_feature_df = python_diversity_data + evaluator = PythonDiversityEvaluation( + train_df=train_df, + reco_df=reco_df, + item_feature_df=item_feature_df, + item_sim_measure="item_feature_vector", + col_user="UserId", + col_item="ItemId", + ) + assert target_metrics["diversity_item_feature_vector"] == evaluator.diversity() + + +def test_user_item_serendipity_item_feature_vector( + python_diversity_data, target_metrics +): + train_df, reco_df, item_feature_df = python_diversity_data + evaluator = PythonDiversityEvaluation( + train_df=train_df, + reco_df=reco_df, + item_feature_df=item_feature_df, + item_sim_measure="item_feature_vector", + col_user="UserId", + col_item="ItemId", + col_relevance="Relevance", + ) + actual = evaluator.user_item_serendipity() + assert_frame_equal( + target_metrics["user_item_serendipity_item_feature_vector"], + actual, + check_exact=False, + check_less_precise=4, + ) + + +def test_user_serendipity_item_feature_vector(python_diversity_data, target_metrics): + train_df, reco_df, item_feature_df = python_diversity_data + evaluator = PythonDiversityEvaluation( + train_df=train_df, + reco_df=reco_df, + item_feature_df=item_feature_df, + item_sim_measure="item_feature_vector", + col_user="UserId", + col_item="ItemId", + col_relevance="Relevance", + ) + actual = evaluator.user_serendipity() + assert_frame_equal( + target_metrics["user_serendipity_item_feature_vector"], + actual, + check_exact=False, + check_less_precise=4, + ) + + +def test_serendipity_item_feature_vector(python_diversity_data, target_metrics): + train_df, reco_df, item_feature_df = python_diversity_data + evaluator = PythonDiversityEvaluation( + train_df=train_df, + reco_df=reco_df, + item_feature_df=item_feature_df, + item_sim_measure="item_feature_vector", + col_user="UserId", + col_item="ItemId", + col_relevance="Relevance", + ) + assert target_metrics["serendipity_item_feature_vector"] == evaluator.serendipity() From 09400ea7b0535d25e9242d9cc8e6457f8e36595e Mon Sep 17 00:00:00 2001 From: Jianjie Liu Date: Tue, 14 Sep 2021 16:06:44 +0000 Subject: [PATCH 10/22] Hotfix typos --- tests/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/README.md b/tests/README.md index daa555beee..e6a5cab670 100644 --- a/tests/README.md +++ b/tests/README.md @@ -176,8 +176,8 @@ Example: 2. `tox -e gpu -- tests/unit -m "gpu and notebook"` (runs the gpu notebook tests with `recommenders[dev,example,gpu]` dependencies) 3. `tox -e spark -- tests/unit -m "spark and notebook"` (runs the spark notebook tests with `recommenders[dev,example,spark]` dependencies) 4. `tox -e all -- tests/unit` (to run all of the unit tests with `recommenders[all]` dependencies) - 5. `tox -e py -- test/unit` (runs the unit tests under the default python interpreter with `recommenders[all]`) - 6. `tox -e py37 -- test/unit` (runs the unit tests under Python3.7 with `recommenders[all]` ) + 5. `tox -e py -- tests/unit` (runs the unit tests under the default python interpreter with `recommenders[all]`) + 6. `tox -e py37 -- tests/unit` (runs the unit tests under Python3.7 with `recommenders[all]` ) ## How to create tests on notebooks with Papermill From 20b0b53f74ca1e4d32d77e681bc3b44cc3e79794 Mon Sep 17 00:00:00 2001 From: YanZhangADS Date: Wed, 15 Sep 2021 01:54:40 +0000 Subject: [PATCH 11/22] replace diversity class into functions --- recommenders/evaluation/python_evaluation.py | 1027 ++++++++++------- .../evaluation/test_python_evaluation.py | 104 +- 2 files changed, 683 insertions(+), 448 deletions(-) diff --git a/recommenders/evaluation/python_evaluation.py b/recommenders/evaluation/python_evaluation.py index b47c58e962..a04fff5450 100644 --- a/recommenders/evaluation/python_evaluation.py +++ b/recommenders/evaluation/python_evaluation.py @@ -698,48 +698,131 @@ def get_top_k_items( } # diversity metrics -class PythonDiversityEvaluation: - """Python Diversity Evaluator""" +def check_column_dtypes_diversity_serendipity(func): + """Checks columns of DataFrame inputs + + This includes the checks on: + + * whether the input columns exist in the input DataFrames + * whether the data types of col_user as well as col_item are matched in the two input DataFrames. + * whether reco_df contains any user_item pairs that are already shown in train_df + * check relevance column in reco_df + * check column names in item_feature_df - def __init__( - self, + Args: + func (function): function that will be wrapped + + Returns: + function: Wrapper function for checking dtypes. + """ + + @wraps(func) + def check_column_dtypes_diversity_serendipity_wrapper( train_df, reco_df, item_feature_df=None, item_sim_measure=DEFAULT_ITEM_SIM_MEASURE, + col_item_features=DEFAULT_ITEM_FEATURES_COL, col_user=DEFAULT_USER_COL, col_item=DEFAULT_ITEM_COL, + col_sim=DEFAULT_SIMILARITY_COL, col_relevance=None, + *args, + **kwargs ): - """Initializer. + """Check columns of DataFrame inputs + + Args: + train_df (pandas.DataFrame): Data set with historical data for users and items they + have interacted with; contains col_user, col_item. Assumed to not contain any duplicate rows. + reco_df (pandas.DataFrame): Recommender's prediction output, containing col_user, col_item, + col_relevance (optional). Assumed to not contain any duplicate user-item pairs. + item_feature_df (pandas.DataFrame): (Optional) It is required only when item_sim_measure='item_feature_vector'. It contains two columns: col_item and features (a feature vector). + item_sim_measure (str): (Optional) This column indicates which item similarity measure to be used. Available measures include item_cooccurrence_count (default choice) and item_feature_vector. + col_item_features (str): item feature column name. + col_user (str): User id column name. + col_item (str): Item id column name. + col_relevance (str): This column indicates whether the recommended item is actually + relevant to the user or not. + """ + + if not has_columns(train_df, [col_user, col_item]): + raise ValueError("Missing columns in train_df DataFrame") + if not has_columns(reco_df, [col_user, col_item]): + raise ValueError("Missing columns in reco_df DataFrame") + if not has_same_base_dtype(train_df, reco_df, columns=[col_user, col_item]): + raise ValueError("Columns in provided DataFrames are not the same datatype") + if col_relevance is None: + col_relevance = DEFAULT_RELEVANCE_COL + # relevance term, default is 1 (relevant) for all + reco_df = reco_df[[col_user, col_item]] + reco_df[col_relevance] = 1.0 + else: + col_relevance = col_relevance + reco_df = reco_df[[col_user, col_item, col_relevance]].astype( + {col_relevance: np.float16} + ) + if item_sim_measure == "item_feature_vector": + required_columns = [col_item, col_item_features] + if item_feature_df is not None: + if not has_columns(item_feature_df, required_columns): + raise ValueError("Missing columns in item_feature_df DataFrame") + else: + raise Exception( + "item_feature_df not specified! item_feature_df must be provided if choosing to use item_feature_vector to calculate item similarity. item_feature_df should have columns:" + + str(required_columns) + ) + # check if reco_df contains any user_item pairs that are already shown in train_df + count_intersection = pd.merge( + train_df, reco_df, how="inner", on=[col_user, col_item] + ).shape[0] + if count_intersection != 0: + raise Exception( + "reco_df should not contain any user_item pairs that are already shown in train_df" + ) - This is the Python version of diversity metrics evaluator. - The methods of this class calculate the following diversity metrics: + return func( + train_df=train_df, + reco_df=reco_df, + item_feature_df=item_feature_df, + item_sim_measure=item_sim_measure, + col_user=col_user, + col_item=col_item, + col_sim=col_sim, + col_relevance=col_relevance, + *args, + **kwargs + ) - * Coverage - it includes two metrics: - 1. catalog_coverage, which measures the proportion of items that get recommended from the item catalog; - 2. distributional_coverage, which measures how unequally different items are recommended in the - recommendations to all users. + return check_column_dtypes_diversity_serendipity_wrapper - * Novelty - A more novel item indicates it is less popular, i.e. it gets recommended less frequently. - * Diversity - The dissimilarity of items being recommended. - * Serendipity - The "unusualness" or "surprise" of recommendations to a user. When 'col_relevance' is used, it indicates how "pleasant surprise" of recommendations is to a user. - The metric definitions/formulations are based on the following references with modification: +def check_column_dtypes_novelty_coverage(func): + """Checks columns of DataFrame inputs - :Citation: + This includes the checks on: - G. Shani and A. Gunawardana, Evaluating Recommendation Systems, - Recommender Systems Handbook pp. 257-297, 2010. + * whether the input columns exist in the input DataFrames + * whether the data types of col_user as well as col_item are matched in the two input DataFrames. + * whether reco_df contains any user_item pairs that are already shown in train_df - Y.C. Zhang, D.Ó. Séaghdha, D. Quercia and T. Jambor, Auralist: introducing - serendipity into music recommendation, WSDM 2012 + Args: + func (function): function that will be wrapped - P. Castells, S. Vargas, and J. Wang, Novelty and diversity metrics for recommender systems: - choice, discovery and relevance, ECIR 2011 + Returns: + function: Wrapper function for checking dtypes. + """ - Eugene Yan, Serendipity: Accuracy’s unpopular best friend in Recommender Systems, - eugeneyan.com, April 2020 + @wraps(func) + def check_column_dtypes_novelty_coverage_wrapper( + train_df, + reco_df, + col_user=DEFAULT_USER_COL, + col_item=DEFAULT_ITEM_COL, + *args, + **kwargs + ): + """Check columns of DataFrame inputs Args: train_df (pandas.DataFrame): Data set with historical data for users and items they @@ -747,458 +830,582 @@ def __init__( Interaction here follows the *item choice model* from Castells et al. reco_df (pandas.DataFrame): Recommender's prediction output, containing col_user, col_item, col_relevance (optional). Assumed to not contain any duplicate user-item pairs. - item_feature_df (pandas.DataFrame): (Optional) It is required only when item_sim_measure='item_feature_vector'. It contains two columns: col_item and features (a feature vector). - item_sim_measure (str): (Optional) This column indicates which item similarity measure to be used. Available measures include item_cooccurrence_count (default choice) and item_feature_vector. col_user (str): User id column name. col_item (str): Item id column name. - col_relevance (str): This column indicates whether the recommended item is actually - relevant to the user or not. - """ - self.train_df = train_df[[col_user, col_item]] - self.col_user = col_user - self.col_item = col_item - self.sim_col = DEFAULT_SIMILARITY_COL - self.df_cosine_similarity = None - self.df_user_item_serendipity = None - self.df_user_serendipity = None - self.avg_serendipity = None - self.df_item_novelty = None - self.avg_novelty = None - self.df_intralist_similarity = None - self.df_user_diversity = None - self.avg_diversity = None - self.item_feature_df = item_feature_df - self.item_sim_measure = item_sim_measure + """ - if col_relevance is None: - self.col_relevance = DEFAULT_RELEVANCE_COL - # relevance term, default is 1 (relevant) for all - self.reco_df = reco_df[[col_user, col_item]] - self.reco_df[self.col_relevance] = 1.0 - else: - self.col_relevance = col_relevance - self.reco_df = reco_df[[col_user, col_item, col_relevance]].astype( - {col_relevance: np.float16} - ) - if self.item_sim_measure == "item_feature_vector": - self.col_item_features = DEFAULT_ITEM_FEATURES_COL - required_columns = [self.col_item, self.col_item_features] - # print(str(required_columns)) - # print(str(item_feature_df.columns.values.tolist())) - if self.item_feature_df is not None: - - if str(required_columns) != str( - item_feature_df.columns.values.tolist() - ): - raise Exception( - "Incorrect column names! item_feature_df should have columns:" - + str(required_columns) - ) - else: - raise Exception( - "item_feature_df not specified! item_feature_df must be provided if choosing to use item_feature_vector to calculate item similarity. item_feature_df should have columns:" - + str(required_columns) - ) + if not has_columns(train_df, [col_user, col_item]): + raise ValueError("Missing columns in train_df DataFrame") + if not has_columns(reco_df, [col_user, col_item]): + raise ValueError("Missing columns in reco_df DataFrame") + if not has_same_base_dtype(train_df, reco_df, columns=[col_user, col_item]): + raise ValueError("Columns in provided DataFrames are not the same datatype") - # check if reco_df contains any user_item pairs that are already shown in train_df count_intersection = pd.merge( - self.train_df, self.reco_df, how="inner", on=[self.col_user, self.col_item] + train_df, reco_df, how="inner", on=[col_user, col_item] ).shape[0] if count_intersection != 0: raise Exception( "reco_df should not contain any user_item pairs that are already shown in train_df" ) - def _get_pairwise_items(self, df): - """Get pairwise combinations of items per user (ignoring duplicate pairs [1,2] == [2,1])""" - df_user_i1 = df[[self.col_user, self.col_item]] - df_user_i1.columns = [self.col_user, "i1"] + return func( + train_df=train_df, + reco_df=reco_df, + col_user=col_user, + col_item=col_item, + *args, + **kwargs + ) + + return check_column_dtypes_novelty_coverage_wrapper + + +@lru_cache_df(maxsize=1) +def _get_pairwise_items( + df, + col_user=DEFAULT_USER_COL, + col_item=DEFAULT_ITEM_COL, +): + """Get pairwise combinations of items per user (ignoring duplicate pairs [1,2] == [2,1])""" + df_user_i1 = df[[col_user, col_item]] + df_user_i1.columns = [col_user, "i1"] + + df_user_i2 = df[[col_user, col_item]] + df_user_i2.columns = [col_user, "i2"] + + df_user_i1_i2 = pd.merge(df_user_i1, df_user_i2, how="inner", on=[col_user]) + + df_pairwise_items = df_user_i1_i2[(df_user_i1_i2["i1"] <= df_user_i1_i2["i2"])][ + [col_user, "i1", "i2"] + ].reset_index(drop=True) + return df_pairwise_items + - df_user_i2 = df[[self.col_user, self.col_item]] - df_user_i2.columns = [self.col_user, "i2"] +@lru_cache_df(maxsize=1) +def _get_cosine_similarity( + train_df, + item_feature_df=None, + item_sim_measure=DEFAULT_ITEM_SIM_MEASURE, + col_item_features=DEFAULT_ITEM_FEATURES_COL, + col_user=DEFAULT_USER_COL, + col_item=DEFAULT_ITEM_COL, + col_sim=DEFAULT_SIMILARITY_COL, +): - df_user_i1_i2 = pd.merge( - df_user_i1, df_user_i2, how="inner", on=[self.col_user] + if item_sim_measure == "item_cooccurrence_count": + # calculate item-item similarity based on item co-occurrence count + df_cosine_similarity = _get_cooccurrence_similarity( + train_df, col_user, col_item, col_sim ) + elif item_sim_measure == "item_feature_vector": + # calculdf_cosine_similarity = ate item-item similarity based on item feature vectors + df_cosine_similarity = _get_item_feature_similarity( + item_feature_df, col_item_features, col_user, col_item + ) + else: + raise Exception( + "item_sim_measure not recognized! The available options include 'item_cooccurrence_count' and 'item_feature_vector'." + ) + return df_cosine_similarity - df_pairwise_items = df_user_i1_i2[(df_user_i1_i2["i1"] <= df_user_i1_i2["i2"])][ - [self.col_user, "i1", "i2"] - ].reset_index(drop=True) - return df_pairwise_items - def _get_cosine_similarity(self): +@lru_cache_df(maxsize=1) +def _get_cooccurrence_similarity( + train_df, + col_user=DEFAULT_USER_COL, + col_item=DEFAULT_ITEM_COL, + col_sim=DEFAULT_SIMILARITY_COL, +): + """Cosine similarity metric from - if self.item_sim_measure == "item_cooccurrence_count": - # calculate item-item similarity based on item co-occurrence count - self._get_cooccurrence_similarity() - elif self.item_sim_measure == "item_feature_vector": - # calculate item-item similarity based on item feature vectors - self._get_item_feature_similarity() - else: - raise Exception( - "item_sim_measure not recognized! The available options include 'item_cooccurrence_count' and 'item_feature_vector'." - ) - return self.df_cosine_similarity + :Citation: - def _get_cooccurrence_similarity(self): - """Cosine similarity metric from + Y.C. Zhang, D.Ó. Séaghdha, D. Quercia and T. Jambor, Auralist: + introducing serendipity into music recommendation, WSDM 2012 - :Citation: + The item indexes in the result are such that i1 <= i2. + """ + pairs = _get_pairwise_items(train_df, col_user, col_item) + pairs_count = pd.DataFrame( + {"count": pairs.groupby(["i1", "i2"]).size()} + ).reset_index() + item_count = pd.DataFrame( + {"count": train_df.groupby([col_item]).size()} + ).reset_index() + item_count["item_sqrt_count"] = item_count["count"] ** 0.5 + item_co_occur = pairs_count.merge( + item_count[[col_item, "item_sqrt_count"]], + left_on=["i1"], + right_on=[col_item], + ).drop(columns=[col_item]) + + item_co_occur.columns = ["i1", "i2", "count", "i1_sqrt_count"] + + item_co_occur = item_co_occur.merge( + item_count[[col_item, "item_sqrt_count"]], + left_on=["i2"], + right_on=[col_item], + ).drop(columns=[col_item]) + item_co_occur.columns = [ + "i1", + "i2", + "count", + "i1_sqrt_count", + "i2_sqrt_count", + ] - Y.C. Zhang, D.Ó. Séaghdha, D. Quercia and T. Jambor, Auralist: - introducing serendipity into music recommendation, WSDM 2012 + item_co_occur[col_sim] = item_co_occur["count"] / ( + item_co_occur["i1_sqrt_count"] * item_co_occur["i2_sqrt_count"] + ) + df_cosine_similarity = ( + item_co_occur[["i1", "i2", col_sim]] + .sort_values(["i1", "i2"]) + .reset_index(drop=True) + ) - The item indexes in the result are such that i1 <= i2. - """ - if self.df_cosine_similarity is None: - pairs = self._get_pairwise_items(df=self.train_df) - pairs_count = pd.DataFrame( - {"count": pairs.groupby(["i1", "i2"]).size()} - ).reset_index() - item_count = pd.DataFrame( - {"count": self.train_df.groupby([self.col_item]).size()} - ).reset_index() - item_count["item_sqrt_count"] = item_count["count"] ** 0.5 - item_co_occur = pairs_count.merge( - item_count[[self.col_item, "item_sqrt_count"]], - left_on=["i1"], - right_on=[self.col_item], - ).drop(columns=[self.col_item]) - - item_co_occur.columns = ["i1", "i2", "count", "i1_sqrt_count"] - - item_co_occur = item_co_occur.merge( - item_count[[self.col_item, "item_sqrt_count"]], - left_on=["i2"], - right_on=[self.col_item], - ).drop(columns=[self.col_item]) - item_co_occur.columns = [ - "i1", - "i2", - "count", - "i1_sqrt_count", - "i2_sqrt_count", - ] - - item_co_occur[self.sim_col] = item_co_occur["count"] / ( - item_co_occur["i1_sqrt_count"] * item_co_occur["i2_sqrt_count"] - ) - self.df_cosine_similarity = ( - item_co_occur[["i1", "i2", self.sim_col]] - .sort_values(["i1", "i2"]) - .reset_index(drop=True) - ) + return df_cosine_similarity - return self.df_cosine_similarity - def _get_item_feature_similarity(self): - """Cosine similarity metric based on item feature vectors +@lru_cache_df(maxsize=1) +def _get_item_feature_similarity( + item_feature_df, + col_item_features=DEFAULT_ITEM_FEATURES_COL, + col_user=DEFAULT_USER_COL, + col_item=DEFAULT_ITEM_COL, + col_sim=DEFAULT_SIMILARITY_COL, +): + """Cosine similarity metric based on item feature vectors - The item indexes in the result are such that i1 <= i2. - """ - if self.df_cosine_similarity is None: - df1 = self.item_feature_df[[self.col_item, self.col_item_features]] - df1.columns = ["i1", "f1"] - df1["key"] = 0 - df2 = self.item_feature_df[[self.col_item, self.col_item_features]] - df2.columns = ["i2", "f2"] - df2["key"] = 0 - - df = pd.merge(df1, df2, on="key", how="outer").drop("key", axis=1) - df_item_feature_pair = df[(df["i1"] <= df["i2"])].reset_index(drop=True) - - df_item_feature_pair["sim"] = df_item_feature_pair.apply( - lambda x: float(x.f1.dot(x.f2)) - / float(np.linalg.norm(x.f1, 2) * np.linalg.norm(x.f2, 2)), - axis=1, - ) + The item indexes in the result are such that i1 <= i2. + """ + df1 = item_feature_df[[col_item, col_item_features]] + df1.columns = ["i1", "f1"] + df1["key"] = 0 + df2 = item_feature_df[[col_item, col_item_features]] + df2.columns = ["i2", "f2"] + df2["key"] = 0 + + df = pd.merge(df1, df2, on="key", how="outer").drop("key", axis=1) + df_item_feature_pair = df[(df["i1"] <= df["i2"])].reset_index(drop=True) + + df_item_feature_pair[col_sim] = df_item_feature_pair.apply( + lambda x: float(x.f1.dot(x.f2)) + / float(np.linalg.norm(x.f1, 2) * np.linalg.norm(x.f2, 2)), + axis=1, + ) - self.df_cosine_similarity = df_item_feature_pair[ - ["i1", "i2", "sim"] - ].sort_values(["i1", "i2"]) + df_cosine_similarity = df_item_feature_pair[["i1", "i2", col_sim]].sort_values( + ["i1", "i2"] + ) - return self.df_cosine_similarity + return df_cosine_similarity - # Diversity metrics - def _get_intralist_similarity(self, df): - """Intra-list similarity from - :Citation: +# Diversity metrics +@lru_cache_df(maxsize=1) +def _get_intralist_similarity( + train_df, + reco_df, + item_feature_df=None, + item_sim_measure=DEFAULT_ITEM_SIM_MEASURE, + col_item_features=DEFAULT_ITEM_FEATURES_COL, + col_user=DEFAULT_USER_COL, + col_item=DEFAULT_ITEM_COL, + col_sim=DEFAULT_SIMILARITY_COL, +): + """Intra-list similarity from - "Improving Recommendation Lists Through Topic Diversification", - Ziegler, McNee, Konstan and Lausen, 2005. - """ - if self.df_intralist_similarity is None: - pairs = self._get_pairwise_items(df=df) - similarity_df = self._get_cosine_similarity() - # Fillna(0) is needed in the cases where similarity_df does not have an entry for a pair of items. - # e.g. i1 and i2 have never occurred together. - - item_pair_sim = pairs.merge(similarity_df, on=["i1", "i2"], how="left") - item_pair_sim[self.sim_col].fillna(0, inplace=True) - item_pair_sim = item_pair_sim.loc[ - item_pair_sim["i1"] != item_pair_sim["i2"] - ].reset_index(drop=True) - self.df_intralist_similarity = ( - item_pair_sim.groupby([self.col_user]) - .agg({self.sim_col: "mean"}) - .reset_index() - ) - self.df_intralist_similarity.columns = [self.col_user, "avg_il_sim"] + :Citation: - return self.df_intralist_similarity + "Improving Recommendation Lists Through Topic Diversification", + Ziegler, McNee, Konstan and Lausen, 2005. + """ + pairs = _get_pairwise_items(reco_df, col_user, col_item) + similarity_df = _get_cosine_similarity( + train_df, + item_feature_df, + item_sim_measure, + col_item_features, + col_user, + col_item, + col_sim, + ) + # Fillna(0) is needed in the cases where similarity_df does not have an entry for a pair of items. + # e.g. i1 and i2 have never occurred together. + + item_pair_sim = pairs.merge(similarity_df, on=["i1", "i2"], how="left") + item_pair_sim[col_sim].fillna(0, inplace=True) + item_pair_sim = item_pair_sim.loc[ + item_pair_sim["i1"] != item_pair_sim["i2"] + ].reset_index(drop=True) + df_intralist_similarity = ( + item_pair_sim.groupby([col_user]).agg({col_sim: "mean"}).reset_index() + ) + df_intralist_similarity.columns = [col_user, "avg_il_sim"] - def user_diversity(self): - """Calculate average diversity of recommendations for each user. - The metric definition is based on formula (3) in the following reference: + return df_intralist_similarity - :Citation: - Y.C. Zhang, D.Ó. Séaghdha, D. Quercia and T. Jambor, Auralist: - introducing serendipity into music recommendation, WSDM 2012 +@check_column_dtypes_diversity_serendipity +@lru_cache_df(maxsize=1) +def user_diversity( + train_df, + reco_df, + item_feature_df=None, + item_sim_measure=DEFAULT_ITEM_SIM_MEASURE, + col_item_features=DEFAULT_ITEM_FEATURES_COL, + col_user=DEFAULT_USER_COL, + col_item=DEFAULT_ITEM_COL, + col_sim=DEFAULT_SIMILARITY_COL, + col_relevance=None, +): + """Calculate average diversity of recommendations for each user. + The metric definition is based on formula (3) in the following reference: - Returns: - pandas.DataFrame: A dataframe with the following columns: col_user, user_diversity. - """ - if self.df_user_diversity is None: - self.df_intralist_similarity = self._get_intralist_similarity(self.reco_df) - self.df_user_diversity = self.df_intralist_similarity - self.df_user_diversity["user_diversity"] = ( - 1 - self.df_user_diversity["avg_il_sim"] - ) - self.df_user_diversity = ( - self.df_user_diversity[[self.col_user, "user_diversity"]] - .sort_values(self.col_user) - .reset_index(drop=True) - ) + :Citation: - return self.df_user_diversity + Y.C. Zhang, D.Ó. Séaghdha, D. Quercia and T. Jambor, Auralist: + introducing serendipity into music recommendation, WSDM 2012 - def diversity(self): - """Calculate average diversity of recommendations across all users. + Returns: + pandas.DataFrame: A dataframe with the following columns: col_user, user_diversity. + """ - Returns: - float: diversity. - """ - if self.avg_diversity is None: - self.df_user_diversity = self.user_diversity() - self.avg_diversity = self.df_user_diversity.agg({"user_diversity": "mean"})[ - 0 - ] - return self.avg_diversity - - # Novelty metrics - def historical_item_novelty(self): - """Calculate novelty for each item. Novelty is computed as the minus logarithm of - (number of interactions with item / total number of interactions). The definition of the metric - is based on the following reference using the choice model (eqs. 1 and 6): - - :Citation: - - P. Castells, S. Vargas, and J. Wang, Novelty and diversity metrics for recommender systems: - choice, discovery and relevance, ECIR 2011 - - The novelty of an item can be defined relative to a set of observed events on the set of all items. - These can be events of user choice (item "is picked" by a random user) or user discovery - (item "is known" to a random user). The above definition of novelty reflects a factor of item popularity. - High novelty values correspond to long-tail items in the density function, that few users have interacted - with and low novelty values correspond to popular head items. - - Returns: - pandas.DataFrame: A dataframe with the following columns: col_item, item_novelty. - """ - if self.df_item_novelty is None: - n_records = self.train_df.shape[0] - item_count = pd.DataFrame( - {"count": self.train_df.groupby([self.col_item]).size()} - ).reset_index() - item_count["item_novelty"] = -np.log2(item_count["count"] / n_records) - self.df_item_novelty = ( - item_count[[self.col_item, "item_novelty"]] - .sort_values(self.col_item) - .reset_index(drop=True) - ) + df_intralist_similarity = _get_intralist_similarity( + train_df, + reco_df, + item_feature_df, + item_sim_measure, + col_item_features, + col_user, + col_item, + col_sim, + ) + df_user_diversity = df_intralist_similarity + df_user_diversity["user_diversity"] = 1 - df_user_diversity["avg_il_sim"] + df_user_diversity = ( + df_user_diversity[[col_user, "user_diversity"]] + .sort_values(col_user) + .reset_index(drop=True) + ) - return self.df_item_novelty + return df_user_diversity - def novelty(self): - """Calculate the average novelty in a list of recommended items (this assumes that the recommendation list - is already computed). Follows section 5 from - :Citation: +@check_column_dtypes_diversity_serendipity +def diversity( + train_df, + reco_df, + item_feature_df=None, + item_sim_measure=DEFAULT_ITEM_SIM_MEASURE, + col_item_features=DEFAULT_ITEM_FEATURES_COL, + col_user=DEFAULT_USER_COL, + col_item=DEFAULT_ITEM_COL, + col_sim=DEFAULT_SIMILARITY_COL, + col_relevance=None, +): + """Calculate average diversity of recommendations across all users. - P. Castells, S. Vargas, and J. Wang, Novelty and diversity metrics for recommender systems: - choice, discovery and relevance, ECIR 2011 + Returns: + float: diversity. + """ + df_user_diversity = user_diversity( + train_df, + reco_df, + item_feature_df, + item_sim_measure, + col_item_features, + col_user, + col_item, + col_sim, + ) + avg_diversity = df_user_diversity.agg({"user_diversity": "mean"})[0] + return avg_diversity - Returns: - float: novelty. - """ - if self.avg_novelty is None: - self.df_item_novelty = self.historical_item_novelty() - n_recommendations = self.reco_df.shape[0] - reco_item_count = pd.DataFrame( - {"count": self.reco_df.groupby([self.col_item]).size()} - ).reset_index() - reco_item_novelty = reco_item_count.merge( - self.df_item_novelty, on=self.col_item - ) - reco_item_novelty["product"] = ( - reco_item_novelty["count"] * reco_item_novelty["item_novelty"] - ) - self.avg_novelty = ( - reco_item_novelty.agg({"product": "sum"})[0] / n_recommendations - ) - return self.avg_novelty +# Novelty metrics +@check_column_dtypes_novelty_coverage +@lru_cache_df(maxsize=1) +def historical_item_novelty( + train_df, + reco_df, + col_user=DEFAULT_USER_COL, + col_item=DEFAULT_ITEM_COL, +): + """Calculate novelty for each item. Novelty is computed as the minus logarithm of + (number of interactions with item / total number of interactions). The definition of the metric + is based on the following reference using the choice model (eqs. 1 and 6): - # Serendipity metrics - def user_item_serendipity(self): - """Calculate serendipity of each item in the recommendations for each user. - The metric definition is based on the following references: + :Citation: - :Citation: + P. Castells, S. Vargas, and J. Wang, Novelty and diversity metrics for recommender systems: + choice, discovery and relevance, ECIR 2011 - Y.C. Zhang, D.Ó. Séaghdha, D. Quercia and T. Jambor, Auralist: - introducing serendipity into music recommendation, WSDM 2012 + The novelty of an item can be defined relative to a set of observed events on the set of all items. + These can be events of user choice (item "is picked" by a random user) or user discovery + (item "is known" to a random user). The above definition of novelty reflects a factor of item popularity. + High novelty values correspond to long-tail items in the density function, that few users have interacted + with and low novelty values correspond to popular head items. - Eugene Yan, Serendipity: Accuracy’s unpopular best friend in Recommender Systems, - eugeneyan.com, April 2020 + Returns: + pandas.DataFrame: A dataframe with the following columns: col_item, item_novelty. + """ - Returns: - pandas.DataFrame: A dataframe with columns: col_user, col_item, user_item_serendipity. - """ - # for every col_user, col_item in reco_df, join all interacted items from train_df. - # These interacted items are repeated for each item in reco_df for a specific user. - if self.df_user_item_serendipity is None: - self.df_cosine_similarity = self._get_cosine_similarity() - reco_user_item = self.reco_df[[self.col_user, self.col_item]] - reco_user_item["reco_item_tmp"] = reco_user_item[self.col_item] - - train_user_item = self.train_df[[self.col_user, self.col_item]] - train_user_item.columns = [self.col_user, "train_item_tmp"] - - reco_train_user_item = reco_user_item.merge( - train_user_item, on=[self.col_user] - ) - reco_train_user_item["i1"] = reco_train_user_item[ - ["reco_item_tmp", "train_item_tmp"] - ].min(axis=1) - reco_train_user_item["i2"] = reco_train_user_item[ - ["reco_item_tmp", "train_item_tmp"] - ].max(axis=1) - - reco_train_user_item_sim = reco_train_user_item.merge( - self.df_cosine_similarity, on=["i1", "i2"], how="left" - ) - reco_train_user_item_sim[self.sim_col].fillna(0, inplace=True) + n_records = train_df.shape[0] + item_count = pd.DataFrame( + {"count": train_df.groupby([col_item]).size()} + ).reset_index() + item_count["item_novelty"] = -np.log2(item_count["count"] / n_records) + df_item_novelty = ( + item_count[[col_item, "item_novelty"]] + .sort_values(col_item) + .reset_index(drop=True) + ) - reco_user_item_avg_sim = ( - reco_train_user_item_sim.groupby([self.col_user, self.col_item]) - .agg({self.sim_col: "mean"}) - .reset_index() - ) - reco_user_item_avg_sim.columns = [ - self.col_user, - self.col_item, - "avg_item2interactedHistory_sim", - ] - - self.df_user_item_serendipity = reco_user_item_avg_sim.merge( - self.reco_df, on=[self.col_user, self.col_item] - ) - self.df_user_item_serendipity["user_item_serendipity"] = ( - 1 - self.df_user_item_serendipity["avg_item2interactedHistory_sim"] - ) * self.df_user_item_serendipity[self.col_relevance] - self.df_user_item_serendipity = ( - self.df_user_item_serendipity[ - [self.col_user, self.col_item, "user_item_serendipity"] - ] - .sort_values([self.col_user, self.col_item]) - .reset_index(drop=True) - ) + return df_item_novelty - return self.df_user_item_serendipity - def user_serendipity(self): - """Calculate average serendipity for each user's recommendations. +@check_column_dtypes_novelty_coverage +def novelty(train_df, reco_df, col_user=DEFAULT_USER_COL, col_item=DEFAULT_ITEM_COL): + """Calculate the average novelty in a list of recommended items (this assumes that the recommendation list + is already computed). Follows section 5 from - Returns: - pandas.DataFrame: A dataframe with following columns: col_user, user_serendipity. - """ - if self.df_user_serendipity is None: - self.df_user_item_serendipity = self.user_item_serendipity() - self.df_user_serendipity = ( - self.df_user_item_serendipity.groupby(self.col_user) - .agg({"user_item_serendipity": "mean"}) - .reset_index() - ) - self.df_user_serendipity.columns = [self.col_user, "user_serendipity"] - self.df_user_serendipity = self.df_user_serendipity.sort_values( - self.col_user - ).reset_index(drop=True) + :Citation: - return self.df_user_serendipity + P. Castells, S. Vargas, and J. Wang, Novelty and diversity metrics for recommender systems: + choice, discovery and relevance, ECIR 2011 - def serendipity(self): - """Calculate average serendipity for recommendations across all users. + Returns: + float: novelty. + """ - Returns: - float: serendipity. - """ - if self.avg_serendipity is None: - self.df_user_serendipity = self.user_serendipity() - self.avg_serendipity = self.df_user_serendipity.agg( - {"user_serendipity": "mean"} - )[0] - return self.avg_serendipity + df_item_novelty = historical_item_novelty(train_df, reco_df, col_user, col_item) + n_recommendations = reco_df.shape[0] + reco_item_count = pd.DataFrame( + {"count": reco_df.groupby([col_item]).size()} + ).reset_index() + reco_item_novelty = reco_item_count.merge(df_item_novelty, on=col_item) + reco_item_novelty["product"] = ( + reco_item_novelty["count"] * reco_item_novelty["item_novelty"] + ) + avg_novelty = reco_item_novelty.agg({"product": "sum"})[0] / n_recommendations - # Coverage metrics - def catalog_coverage(self): - """Calculate catalog coverage for recommendations across all users. - The metric definition is based on the "catalog coverage" definition in the following reference: + return avg_novelty - :Citation: - G. Shani and A. Gunawardana, Evaluating Recommendation Systems, - Recommender Systems Handbook pp. 257-297, 2010. +# Serendipity metrics +@check_column_dtypes_diversity_serendipity +@lru_cache_df(maxsize=1) +def user_item_serendipity( + train_df, + reco_df, + item_feature_df=None, + item_sim_measure=DEFAULT_ITEM_SIM_MEASURE, + col_item_features=DEFAULT_ITEM_FEATURES_COL, + col_user=DEFAULT_USER_COL, + col_item=DEFAULT_ITEM_COL, + col_sim=DEFAULT_SIMILARITY_COL, + col_relevance=None, +): + """Calculate serendipity of each item in the recommendations for each user. + The metric definition is based on the following references: - Returns: - float: catalog coverage - """ - # distinct item count in reco_df - count_distinct_item_reco = self.reco_df[self.col_item].nunique() - # distinct item count in train_df - count_distinct_item_train = self.train_df[self.col_item].nunique() + :Citation: - # catalog coverage - c_coverage = count_distinct_item_reco / count_distinct_item_train - return c_coverage + Y.C. Zhang, D.Ó. Séaghdha, D. Quercia and T. Jambor, Auralist: + introducing serendipity into music recommendation, WSDM 2012 - def distributional_coverage(self): - """Calculate distributional coverage for recommendations across all users. - The metric definition is based on formula (21) in the following reference: + Eugene Yan, Serendipity: Accuracy’s unpopular best friend in Recommender Systems, + eugeneyan.com, April 2020 - :Citation: + Returns: + pandas.DataFrame: A dataframe with columns: col_user, col_item, user_item_serendipity. + """ + # for every col_user, col_item in reco_df, join all interacted items from train_df. + # These interacted items are repeated for each item in reco_df for a specific user. + df_cosine_similarity = _get_cosine_similarity( + train_df, + item_feature_df, + item_sim_measure, + col_item_features, + col_user, + col_item, + col_sim, + ) + reco_user_item = reco_df[[col_user, col_item]] + reco_user_item["reco_item_tmp"] = reco_user_item[col_item] + + train_user_item = train_df[[col_user, col_item]] + train_user_item.columns = [col_user, "train_item_tmp"] + + reco_train_user_item = reco_user_item.merge(train_user_item, on=[col_user]) + reco_train_user_item["i1"] = reco_train_user_item[ + ["reco_item_tmp", "train_item_tmp"] + ].min(axis=1) + reco_train_user_item["i2"] = reco_train_user_item[ + ["reco_item_tmp", "train_item_tmp"] + ].max(axis=1) + + reco_train_user_item_sim = reco_train_user_item.merge( + df_cosine_similarity, on=["i1", "i2"], how="left" + ) + reco_train_user_item_sim[col_sim].fillna(0, inplace=True) - G. Shani and A. Gunawardana, Evaluating Recommendation Systems, - Recommender Systems Handbook pp. 257-297, 2010. + reco_user_item_avg_sim = ( + reco_train_user_item_sim.groupby([col_user, col_item]) + .agg({col_sim: "mean"}) + .reset_index() + ) + reco_user_item_avg_sim.columns = [ + col_user, + col_item, + "avg_item2interactedHistory_sim", + ] - Returns: - float: distributional coverage - """ - # In reco_df, how many times each col_item is being recommended - df_itemcnt_reco = pd.DataFrame( - {"count": self.reco_df.groupby([self.col_item]).size()} - ).reset_index() + df_user_item_serendipity = reco_user_item_avg_sim.merge( + reco_df, on=[col_user, col_item] + ) + df_user_item_serendipity["user_item_serendipity"] = ( + 1 - df_user_item_serendipity["avg_item2interactedHistory_sim"] + ) * df_user_item_serendipity[col_relevance] + df_user_item_serendipity = ( + df_user_item_serendipity[[col_user, col_item, "user_item_serendipity"]] + .sort_values([col_user, col_item]) + .reset_index(drop=True) + ) + + return df_user_item_serendipity + + +@lru_cache_df(maxsize=1) +@check_column_dtypes_diversity_serendipity +def user_serendipity( + train_df, + reco_df, + item_feature_df=None, + item_sim_measure=DEFAULT_ITEM_SIM_MEASURE, + col_item_features=DEFAULT_ITEM_FEATURES_COL, + col_user=DEFAULT_USER_COL, + col_item=DEFAULT_ITEM_COL, + col_sim=DEFAULT_SIMILARITY_COL, + col_relevance=None, +): + """Calculate average serendipity for each user's recommendations. + + Returns: + pandas.DataFrame: A dataframe with following columns: col_user, user_serendipity. + """ + df_user_item_serendipity = user_item_serendipity( + train_df, + reco_df, + item_feature_df, + item_sim_measure, + col_item_features, + col_user, + col_item, + col_sim, + col_relevance, + ) + df_user_serendipity = ( + df_user_item_serendipity.groupby(col_user) + .agg({"user_item_serendipity": "mean"}) + .reset_index() + ) + df_user_serendipity.columns = [col_user, "user_serendipity"] + df_user_serendipity = df_user_serendipity.sort_values(col_user).reset_index( + drop=True + ) + + return df_user_serendipity + + +@check_column_dtypes_diversity_serendipity +def serendipity( + train_df, + reco_df, + item_feature_df=None, + item_sim_measure=DEFAULT_ITEM_SIM_MEASURE, + col_item_features=DEFAULT_ITEM_FEATURES_COL, + col_user=DEFAULT_USER_COL, + col_item=DEFAULT_ITEM_COL, + col_sim=DEFAULT_SIMILARITY_COL, + col_relevance=None, +): + """Calculate average serendipity for recommendations across all users. + + Returns: + float: serendipity. + """ + df_user_serendipity = user_serendipity( + train_df, + reco_df, + item_feature_df, + item_sim_measure, + col_item_features, + col_user, + col_item, + col_sim, + col_relevance, + ) + avg_serendipity = df_user_serendipity.agg({"user_serendipity": "mean"})[0] + return avg_serendipity + + +# Coverage metrics +@check_column_dtypes_novelty_coverage +def catalog_coverage( + train_df, reco_df, col_user=DEFAULT_USER_COL, col_item=DEFAULT_ITEM_COL +): + """Calculate catalog coverage for recommendations across all users. + The metric definition is based on the "catalog coverage" definition in the following reference: + + :Citation: + + G. Shani and A. Gunawardana, Evaluating Recommendation Systems, + Recommender Systems Handbook pp. 257-297, 2010. + + Returns: + float: catalog coverage + """ + # distinct item count in reco_df + count_distinct_item_reco = reco_df[col_item].nunique() + # distinct item count in train_df + count_distinct_item_train = train_df[col_item].nunique() + + # catalog coverage + c_coverage = count_distinct_item_reco / count_distinct_item_train + return c_coverage + + +@check_column_dtypes_novelty_coverage +def distributional_coverage( + train_df, reco_df, col_user=DEFAULT_USER_COL, col_item=DEFAULT_ITEM_COL +): + """Calculate distributional coverage for recommendations across all users. + The metric definition is based on formula (21) in the following reference: + + :Citation: + + G. Shani and A. Gunawardana, Evaluating Recommendation Systems, + Recommender Systems Handbook pp. 257-297, 2010. + + Returns: + float: distributional coverage + """ + # In reco_df, how many times each col_item is being recommended + df_itemcnt_reco = pd.DataFrame( + {"count": reco_df.groupby([col_item]).size()} + ).reset_index() - # the number of total recommendations - count_row_reco = self.reco_df.shape[0] + # the number of total recommendations + count_row_reco = reco_df.shape[0] - df_entropy = df_itemcnt_reco - df_entropy["p(i)"] = df_entropy["count"] / count_row_reco - df_entropy["entropy(i)"] = df_entropy["p(i)"] * np.log2(df_entropy["p(i)"]) + df_entropy = df_itemcnt_reco + df_entropy["p(i)"] = df_entropy["count"] / count_row_reco + df_entropy["entropy(i)"] = df_entropy["p(i)"] * np.log2(df_entropy["p(i)"]) - d_coverage = -df_entropy.agg({"entropy(i)": "sum"})[0] + d_coverage = -df_entropy.agg({"entropy(i)": "sum"})[0] - return d_coverage + return d_coverage diff --git a/tests/unit/recommenders/evaluation/test_python_evaluation.py b/tests/unit/recommenders/evaluation/test_python_evaluation.py index 650ec98ef7..b25da4aec8 100644 --- a/tests/unit/recommenders/evaluation/test_python_evaluation.py +++ b/tests/unit/recommenders/evaluation/test_python_evaluation.py @@ -28,7 +28,15 @@ map_at_k, auc, logloss, - PythonDiversityEvaluation, + user_diversity, + diversity, + historical_item_novelty, + novelty, + user_item_serendipity, + user_serendipity, + serendipity, + catalog_coverage, + distributional_coverage, ) TOL = 0.0001 @@ -468,64 +476,70 @@ def python_diversity_data(): def test_catalog_coverage(python_diversity_data, target_metrics): train_df, reco_df, _ = python_diversity_data - evaluator = PythonDiversityEvaluation( + c_coverage = catalog_coverage( train_df=train_df, reco_df=reco_df, col_user="UserId", col_item="ItemId" ) - c_coverage = evaluator.catalog_coverage() assert c_coverage == target_metrics["c_coverage"] def test_distributional_coverage(python_diversity_data, target_metrics): train_df, reco_df, _ = python_diversity_data - evaluator = PythonDiversityEvaluation( + d_coverage = distributional_coverage( train_df=train_df, reco_df=reco_df, col_user="UserId", col_item="ItemId" ) - d_coverage = evaluator.distributional_coverage() assert d_coverage == target_metrics["d_coverage"] def test_item_novelty(python_diversity_data, target_metrics): train_df, reco_df, _ = python_diversity_data - evaluator = PythonDiversityEvaluation( + actual = historical_item_novelty( train_df=train_df, reco_df=reco_df, col_user="UserId", col_item="ItemId" ) - actual = evaluator.historical_item_novelty() assert_frame_equal( target_metrics["item_novelty"], actual, check_exact=False, check_less_precise=4 ) assert np.all(actual["item_novelty"].values >= 0) # Test that novelty is zero when data includes only one item train_df_new = train_df.loc[train_df["ItemId"] == 3] - evaluator = PythonDiversityEvaluation( + actual = historical_item_novelty( train_df=train_df_new, reco_df=reco_df, col_user="UserId", col_item="ItemId" ) - actual = evaluator.historical_item_novelty() assert actual["item_novelty"].values[0] == 0 def test_novelty(python_diversity_data, target_metrics): train_df, reco_df, _ = python_diversity_data - evaluator = PythonDiversityEvaluation( + actual = novelty( train_df=train_df, reco_df=reco_df, col_user="UserId", col_item="ItemId" ) - novelty = evaluator.novelty() - assert target_metrics["novelty"] == novelty - assert novelty >= 0 + assert target_metrics["novelty"] == actual + assert actual >= 0 # Test that novelty is zero when data includes only one item train_df_new = train_df.loc[train_df["ItemId"] == 3] reco_df_new = reco_df.loc[reco_df["ItemId"] == 3] - evaluator = PythonDiversityEvaluation( - train_df=train_df_new, reco_df=reco_df_new, col_user="UserId", col_item="ItemId" + assert ( + novelty( + train_df=train_df_new, + reco_df=reco_df_new, + col_user="UserId", + col_item="ItemId", + ) + == 0 ) - assert evaluator.novelty() == 0 def test_user_diversity(python_diversity_data, target_metrics): train_df, reco_df, _ = python_diversity_data - evaluator = PythonDiversityEvaluation( - train_df=train_df, reco_df=reco_df, col_user="UserId", col_item="ItemId" + actual = user_diversity( + train_df=train_df, + reco_df=reco_df, + item_feature_df=None, + item_sim_measure="item_cooccurrence_count", + col_user="UserId", + col_item="ItemId", + col_sim="sim", + col_relevance=None, ) - actual = evaluator.user_diversity() assert_frame_equal( target_metrics["user_diversity"], actual, @@ -536,22 +550,30 @@ def test_user_diversity(python_diversity_data, target_metrics): def test_diversity(python_diversity_data, target_metrics): train_df, reco_df, _ = python_diversity_data - evaluator = PythonDiversityEvaluation( - train_df=train_df, reco_df=reco_df, col_user="UserId", col_item="ItemId" + assert target_metrics["diversity"] == diversity( + train_df=train_df, + reco_df=reco_df, + item_feature_df=None, + item_sim_measure="item_cooccurrence_count", + col_user="UserId", + col_item="ItemId", + col_sim="sim", + col_relevance=None, ) - assert target_metrics["diversity"] == evaluator.diversity() def test_user_item_serendipity(python_diversity_data, target_metrics): train_df, reco_df, _ = python_diversity_data - evaluator = PythonDiversityEvaluation( + actual = user_item_serendipity( train_df=train_df, reco_df=reco_df, + item_feature_df=None, + item_sim_measure="item_cooccurrence_count", col_user="UserId", col_item="ItemId", + col_sim="sim", col_relevance="Relevance", ) - actual = evaluator.user_item_serendipity() assert_frame_equal( target_metrics["user_item_serendipity"], actual, @@ -562,14 +584,16 @@ def test_user_item_serendipity(python_diversity_data, target_metrics): def test_user_serendipity(python_diversity_data, target_metrics): train_df, reco_df, _ = python_diversity_data - evaluator = PythonDiversityEvaluation( + actual = user_serendipity( train_df=train_df, reco_df=reco_df, + item_feature_df=None, + item_sim_measure="item_cooccurrence_count", col_user="UserId", col_item="ItemId", + col_sim="sim", col_relevance="Relevance", ) - actual = evaluator.user_serendipity() assert_frame_equal( target_metrics["user_serendipity"], actual, @@ -580,27 +604,30 @@ def test_user_serendipity(python_diversity_data, target_metrics): def test_serendipity(python_diversity_data, target_metrics): train_df, reco_df, _ = python_diversity_data - evaluator = PythonDiversityEvaluation( + assert target_metrics["serendipity"] == serendipity( train_df=train_df, reco_df=reco_df, + item_feature_df=None, + item_sim_measure="item_cooccurrence_count", col_user="UserId", col_item="ItemId", + col_sim="sim", col_relevance="Relevance", ) - assert target_metrics["serendipity"] == evaluator.serendipity() def test_user_diversity_item_feature_vector(python_diversity_data, target_metrics): train_df, reco_df, item_feature_df = python_diversity_data - evaluator = PythonDiversityEvaluation( + actual = user_diversity( train_df=train_df, reco_df=reco_df, item_feature_df=item_feature_df, item_sim_measure="item_feature_vector", col_user="UserId", col_item="ItemId", + col_sim="sim", + col_relevance=None, ) - actual = evaluator.user_diversity() assert_frame_equal( target_metrics["user_diversity_item_feature_vector"], actual, @@ -611,31 +638,32 @@ def test_user_diversity_item_feature_vector(python_diversity_data, target_metric def test_diversity_item_feature_vector(python_diversity_data, target_metrics): train_df, reco_df, item_feature_df = python_diversity_data - evaluator = PythonDiversityEvaluation( + assert target_metrics["diversity_item_feature_vector"] == diversity( train_df=train_df, reco_df=reco_df, item_feature_df=item_feature_df, item_sim_measure="item_feature_vector", col_user="UserId", col_item="ItemId", + col_sim="sim", + col_relevance=None, ) - assert target_metrics["diversity_item_feature_vector"] == evaluator.diversity() def test_user_item_serendipity_item_feature_vector( python_diversity_data, target_metrics ): train_df, reco_df, item_feature_df = python_diversity_data - evaluator = PythonDiversityEvaluation( + actual = user_item_serendipity( train_df=train_df, reco_df=reco_df, item_feature_df=item_feature_df, item_sim_measure="item_feature_vector", col_user="UserId", col_item="ItemId", + col_sim="sim", col_relevance="Relevance", ) - actual = evaluator.user_item_serendipity() assert_frame_equal( target_metrics["user_item_serendipity_item_feature_vector"], actual, @@ -646,16 +674,16 @@ def test_user_item_serendipity_item_feature_vector( def test_user_serendipity_item_feature_vector(python_diversity_data, target_metrics): train_df, reco_df, item_feature_df = python_diversity_data - evaluator = PythonDiversityEvaluation( + actual = user_serendipity( train_df=train_df, reco_df=reco_df, item_feature_df=item_feature_df, item_sim_measure="item_feature_vector", col_user="UserId", col_item="ItemId", + col_sim="sim", col_relevance="Relevance", ) - actual = evaluator.user_serendipity() assert_frame_equal( target_metrics["user_serendipity_item_feature_vector"], actual, @@ -666,13 +694,13 @@ def test_user_serendipity_item_feature_vector(python_diversity_data, target_metr def test_serendipity_item_feature_vector(python_diversity_data, target_metrics): train_df, reco_df, item_feature_df = python_diversity_data - evaluator = PythonDiversityEvaluation( + assert target_metrics["serendipity_item_feature_vector"] == serendipity( train_df=train_df, reco_df=reco_df, item_feature_df=item_feature_df, item_sim_measure="item_feature_vector", col_user="UserId", col_item="ItemId", + col_sim="sim", col_relevance="Relevance", ) - assert target_metrics["serendipity_item_feature_vector"] == evaluator.serendipity() From 8c817242f859a0d7adb5f02dbba1e0f0ae9fd423 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miguel=20Gonz=C3=A1lez-Fierro?= <3491412+miguelgfierro@users.noreply.github.com> Date: Thu, 16 Sep 2021 12:14:07 +0100 Subject: [PATCH 12/22] LightGBM 3 --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 54122f29e2..0aba6982be 100644 --- a/setup.py +++ b/setup.py @@ -30,7 +30,7 @@ "scikit-learn>=0.22.1,<1", "numba>=0.38.1,<1", "lightfm>=1.15,<2", - "lightgbm>=2.2.1,<3", + "lightgbm>=2.2.1", "memory_profiler>=0.54.0,<1", "nltk>=3.4,<4", "pydocumentdb>=2.3.3<3", # TODO: replace with azure-cosmos From ed6efe89235ef7d22ddb74d3ca661f7b48d6f0b2 Mon Sep 17 00:00:00 2001 From: Scott Graham <5720537+gramhagen@users.noreply.github.com> Date: Thu, 16 Sep 2021 09:18:49 -0400 Subject: [PATCH 13/22] Update .readthedocs.yaml to fix xlearn-cmake issue --- docs/.readthedocs.yaml | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/docs/.readthedocs.yaml b/docs/.readthedocs.yaml index 7fc97323f0..647d071207 100644 --- a/docs/.readthedocs.yaml +++ b/docs/.readthedocs.yaml @@ -8,4 +8,9 @@ sphinx: python: version: 3.7 install: - - requirements: docs/requirements.txt \ No newline at end of file + - requirements: docs/requirements.txt + +# Add necessary apt-get packages +build: + apt_packages: + - cmake From 3bcb8082403aa94d0f8a1d0465fe97accaa5b20f Mon Sep 17 00:00:00 2001 From: Scott Graham <5720537+gramhagen@users.noreply.github.com> Date: Fri, 17 Sep 2021 12:01:02 -0400 Subject: [PATCH 14/22] Update .readthedocs.yaml moving build to happen before python install --- docs/.readthedocs.yaml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/.readthedocs.yaml b/docs/.readthedocs.yaml index 647d071207..7e819f66b3 100644 --- a/docs/.readthedocs.yaml +++ b/docs/.readthedocs.yaml @@ -4,13 +4,13 @@ version: 2 sphinx: configuration: docs/source/conf.py +# Add necessary apt-get packages +build: + apt_packages: + - cmake + # Explicitly set the version of Python and its requirements python: version: 3.7 install: - requirements: docs/requirements.txt - -# Add necessary apt-get packages -build: - apt_packages: - - cmake From 361c45e5f1539f55eb1c2551f205a2283c4aa594 Mon Sep 17 00:00:00 2001 From: Scott Graham <5720537+gramhagen@users.noreply.github.com> Date: Fri, 17 Sep 2021 12:14:58 -0400 Subject: [PATCH 15/22] Update .readthedocs.yaml --- docs/.readthedocs.yaml | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/docs/.readthedocs.yaml b/docs/.readthedocs.yaml index 7e819f66b3..51aa0e7e6b 100644 --- a/docs/.readthedocs.yaml +++ b/docs/.readthedocs.yaml @@ -1,11 +1,8 @@ version: 2 -# Build from the docs/ directory with Sphinx -sphinx: - configuration: docs/source/conf.py - # Add necessary apt-get packages build: + image: latest apt_packages: - cmake @@ -14,3 +11,7 @@ python: version: 3.7 install: - requirements: docs/requirements.txt + +# Build from the docs/ directory with Sphinx +sphinx: + configuration: docs/source/conf.py From 9f19411e2312cde3655b1576bf2dbec20ffb2eb5 Mon Sep 17 00:00:00 2001 From: gramhagen <5720537+gramhagen@users.noreply.github.com> Date: Fri, 17 Sep 2021 12:31:41 -0400 Subject: [PATCH 16/22] moving readthedocs yaml --- docs/.readthedocs.yaml | 17 ----------------- 1 file changed, 17 deletions(-) delete mode 100644 docs/.readthedocs.yaml diff --git a/docs/.readthedocs.yaml b/docs/.readthedocs.yaml deleted file mode 100644 index 51aa0e7e6b..0000000000 --- a/docs/.readthedocs.yaml +++ /dev/null @@ -1,17 +0,0 @@ -version: 2 - -# Add necessary apt-get packages -build: - image: latest - apt_packages: - - cmake - -# Explicitly set the version of Python and its requirements -python: - version: 3.7 - install: - - requirements: docs/requirements.txt - -# Build from the docs/ directory with Sphinx -sphinx: - configuration: docs/source/conf.py From fb2c31882bb644cdb8aaf5bf7f9e0a3e44fd14f9 Mon Sep 17 00:00:00 2001 From: gramhagen <5720537+gramhagen@users.noreply.github.com> Date: Fri, 17 Sep 2021 12:33:27 -0400 Subject: [PATCH 17/22] actually moving the yaml this time --- .readthedocs.yaml | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) create mode 100644 .readthedocs.yaml diff --git a/.readthedocs.yaml b/.readthedocs.yaml new file mode 100644 index 0000000000..595e468ec7 --- /dev/null +++ b/.readthedocs.yaml @@ -0,0 +1,16 @@ +version: 2 + +# Add necessary apt-get packages +build: + apt_packages: + - cmake + +# Explicitly set the version of Python and its requirements +python: + version: "3.7" + install: + - requirements: docs/requirements.txt + +# Build from the docs/ directory with Sphinx +sphinx: + configuration: docs/source/conf.py From 01768eb6f563729a30a2402d282bd22160ab718b Mon Sep 17 00:00:00 2001 From: gramhagen <5720537+gramhagen@users.noreply.github.com> Date: Fri, 17 Sep 2021 12:42:09 -0400 Subject: [PATCH 18/22] using pip install for rtd --- .readthedocs.yaml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.readthedocs.yaml b/.readthedocs.yaml index 595e468ec7..28d6dd565d 100644 --- a/.readthedocs.yaml +++ b/.readthedocs.yaml @@ -9,7 +9,10 @@ build: python: version: "3.7" install: - - requirements: docs/requirements.txt + - method: pip + path: . + extra_requirements: + - all # Build from the docs/ directory with Sphinx sphinx: From 6b34580bfa1f293a16fbff4ceafe8b081002b72d Mon Sep 17 00:00:00 2001 From: gramhagen <5720537+gramhagen@users.noreply.github.com> Date: Fri, 17 Sep 2021 12:54:44 -0400 Subject: [PATCH 19/22] removing redundant requirements.txt --- docs/requirements.txt | 39 --------------------------------------- 1 file changed, 39 deletions(-) delete mode 100644 docs/requirements.txt diff --git a/docs/requirements.txt b/docs/requirements.txt deleted file mode 100644 index 8d986c3fd7..0000000000 --- a/docs/requirements.txt +++ /dev/null @@ -1,39 +0,0 @@ -numpy>=1.14 -pandas>1.0.3,<2 -scipy>=1.0.0,<2 -tqdm>=4.31.1,<5 -matplotlib>=2.2.2,<4 -scikit-learn>=0.22.1,<1 -numba>=0.38.1,<1 -lightfm>=1.15,<2 -lightgbm>=2.2.1,<3 -memory_profiler>=0.54.0,<1 -nltk>=3.4,<4 -pydocumentdb>=2.3.3<3 -pymanopt>=0.2.5,<1 -seaborn>=0.8.1,<1 -transformers>=2.5.0,<5 -bottleneck>=1.2.1,<2 -category_encoders>=1.3.0,<2 -jinja2>=2,<3 -pyyaml>=5.4.1,<6 -requests>=2.0.0,<3 -cornac>=1.1.2,<2 -scikit-surprise>=0.19.1,<=1.1.1 -retrying>=1.3.3 -azure.mgmt.cosmosdb>=0.8.0,<1 -hyperopt>=0.1.2,<1 -ipykernel>=4.6.1,<5 -jupyter>=1,<2 -locust>=1,<2 -papermill>=2.1.2,<3 -scrapbook>=0.5.0,<1.0.0 -nvidia-ml-py3>=7.352.0 -tensorflow-gpu>=1.15.0,<2 -torch==1.2.0 -fastai>=1.0.46,<2 -databricks_cli>=0.8.6,<1 -pyarrow>=0.8.0,<1.0.0 -pyspark>=2.4.5,<3.0.0 -cmake>=3.18.4.post1 -xlearn==0.40a1 From be2c1896d2b4bfb98d0c754ff01fb7aaa9442444 Mon Sep 17 00:00:00 2001 From: Andreas Argyriou Date: Mon, 20 Sep 2021 18:16:10 +0000 Subject: [PATCH 20/22] Change date of publication --- NEWS.md | 2 +- README.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/NEWS.md b/NEWS.md index 03489b855e..e8f0777dbf 100644 --- a/NEWS.md +++ b/NEWS.md @@ -1,6 +1,6 @@ # What's New -## Update September 9, 2021 +## Update September 27, 2021 We have a new release [Recommenders 0.7.0](https://github.com/microsoft/recommenders/releases/tag/0.7.0)! diff --git a/README.md b/README.md index 863a921493..27f650606b 100644 --- a/README.md +++ b/README.md @@ -3,7 +3,7 @@ [![Documentation Status](https://readthedocs.org/projects/microsoft-recommenders/badge/?version=latest)](https://microsoft-recommenders.readthedocs.io/en/latest/?badge=latest) ## What's New -### (September 9, 2021) +### (September 27, 2021) We have a new release [Recommenders 0.7.0](https://github.com/microsoft/recommenders/releases/tag/0.7.0)! We have changed the names of the folders which contain the source code, so that they are more informative. This implies that you will need to change any import statements that reference the recommenders package. Specifically, the folder `reco_utils` has been renamed to `recommenders` and its subfolders have been renamed according to [issue 1390](https://github.com/microsoft/recommenders/issues/1390). From 7227ef0868022804365ec3d6480a458c366777b8 Mon Sep 17 00:00:00 2001 From: YanZhangADS Date: Tue, 21 Sep 2021 21:56:19 +0000 Subject: [PATCH 21/22] fixed private functions and docstrings --- recommenders/evaluation/python_evaluation.py | 130 ++++++++++++++++-- .../evaluation/test_python_evaluation.py | 4 +- 2 files changed, 118 insertions(+), 16 deletions(-) diff --git a/recommenders/evaluation/python_evaluation.py b/recommenders/evaluation/python_evaluation.py index a04fff5450..77f7c6fa50 100644 --- a/recommenders/evaluation/python_evaluation.py +++ b/recommenders/evaluation/python_evaluation.py @@ -32,7 +32,7 @@ ) -def check_column_dtypes(func): +def _check_column_dtypes(func): """Checks columns of DataFrame inputs This includes the checks on: @@ -92,7 +92,7 @@ def check_column_dtypes_wrapper( return check_column_dtypes_wrapper -@check_column_dtypes +@_check_column_dtypes @lru_cache_df(maxsize=1) def merge_rating_true_pred( rating_true, @@ -343,7 +343,7 @@ def logloss( return log_loss(y_true, y_pred) -@check_column_dtypes +@_check_column_dtypes @lru_cache_df(maxsize=1) def merge_ranking_true_pred( rating_true, @@ -698,7 +698,7 @@ def get_top_k_items( } # diversity metrics -def check_column_dtypes_diversity_serendipity(func): +def _check_column_dtypes_diversity_serendipity(func): """Checks columns of DataFrame inputs This includes the checks on: @@ -742,6 +742,7 @@ def check_column_dtypes_diversity_serendipity_wrapper( col_item_features (str): item feature column name. col_user (str): User id column name. col_item (str): Item id column name. + col_sim (str): This column indicates the column name for item similarity. col_relevance (str): This column indicates whether the recommended item is actually relevant to the user or not. """ @@ -797,7 +798,7 @@ def check_column_dtypes_diversity_serendipity_wrapper( return check_column_dtypes_diversity_serendipity_wrapper -def check_column_dtypes_novelty_coverage(func): +def _check_column_dtypes_novelty_coverage(func): """Checks columns of DataFrame inputs This includes the checks on: @@ -1048,7 +1049,7 @@ def _get_intralist_similarity( return df_intralist_similarity -@check_column_dtypes_diversity_serendipity +@_check_column_dtypes_diversity_serendipity @lru_cache_df(maxsize=1) def user_diversity( train_df, @@ -1069,6 +1070,19 @@ def user_diversity( Y.C. Zhang, D.Ó. Séaghdha, D. Quercia and T. Jambor, Auralist: introducing serendipity into music recommendation, WSDM 2012 + Args: + train_df (pandas.DataFrame): Data set with historical data for users and items they + have interacted with; contains col_user, col_item. Assumed to not contain any duplicate rows. + reco_df (pandas.DataFrame): Recommender's prediction output, containing col_user, col_item, + col_relevance (optional). Assumed to not contain any duplicate user-item pairs. + item_feature_df (pandas.DataFrame): (Optional) It is required only when item_sim_measure='item_feature_vector'. It contains two columns: col_item and features (a feature vector). + item_sim_measure (str): (Optional) This column indicates which item similarity measure to be used. Available measures include item_cooccurrence_count (default choice) and item_feature_vector. + col_item_features (str): item feature column name. + col_user (str): User id column name. + col_item (str): Item id column name. + col_sim (str): This column indicates the column name for item similarity. + col_relevance (str): This column indicates whether the recommended item is actually + relevant to the user or not. Returns: pandas.DataFrame: A dataframe with the following columns: col_user, user_diversity. """ @@ -1094,7 +1108,7 @@ def user_diversity( return df_user_diversity -@check_column_dtypes_diversity_serendipity +@_check_column_dtypes_diversity_serendipity def diversity( train_df, reco_df, @@ -1108,6 +1122,19 @@ def diversity( ): """Calculate average diversity of recommendations across all users. + Args: + train_df (pandas.DataFrame): Data set with historical data for users and items they + have interacted with; contains col_user, col_item. Assumed to not contain any duplicate rows. + reco_df (pandas.DataFrame): Recommender's prediction output, containing col_user, col_item, + col_relevance (optional). Assumed to not contain any duplicate user-item pairs. + item_feature_df (pandas.DataFrame): (Optional) It is required only when item_sim_measure='item_feature_vector'. It contains two columns: col_item and features (a feature vector). + item_sim_measure (str): (Optional) This column indicates which item similarity measure to be used. Available measures include item_cooccurrence_count (default choice) and item_feature_vector. + col_item_features (str): item feature column name. + col_user (str): User id column name. + col_item (str): Item id column name. + col_sim (str): This column indicates the column name for item similarity. + col_relevance (str): This column indicates whether the recommended item is actually + relevant to the user or not. Returns: float: diversity. """ @@ -1126,7 +1153,7 @@ def diversity( # Novelty metrics -@check_column_dtypes_novelty_coverage +@_check_column_dtypes_novelty_coverage @lru_cache_df(maxsize=1) def historical_item_novelty( train_df, @@ -1149,6 +1176,15 @@ def historical_item_novelty( High novelty values correspond to long-tail items in the density function, that few users have interacted with and low novelty values correspond to popular head items. + Args: + train_df (pandas.DataFrame): Data set with historical data for users and items they + have interacted with; contains col_user, col_item. Assumed to not contain any duplicate rows. + Interaction here follows the *item choice model* from Castells et al. + reco_df (pandas.DataFrame): Recommender's prediction output, containing col_user, col_item, + col_relevance (optional). Assumed to not contain any duplicate user-item pairs. + col_user (str): User id column name. + col_item (str): Item id column name. + Returns: pandas.DataFrame: A dataframe with the following columns: col_item, item_novelty. """ @@ -1167,7 +1203,7 @@ def historical_item_novelty( return df_item_novelty -@check_column_dtypes_novelty_coverage +@_check_column_dtypes_novelty_coverage def novelty(train_df, reco_df, col_user=DEFAULT_USER_COL, col_item=DEFAULT_ITEM_COL): """Calculate the average novelty in a list of recommended items (this assumes that the recommendation list is already computed). Follows section 5 from @@ -1177,6 +1213,15 @@ def novelty(train_df, reco_df, col_user=DEFAULT_USER_COL, col_item=DEFAULT_ITEM_ P. Castells, S. Vargas, and J. Wang, Novelty and diversity metrics for recommender systems: choice, discovery and relevance, ECIR 2011 + Args: + train_df (pandas.DataFrame): Data set with historical data for users and items they + have interacted with; contains col_user, col_item. Assumed to not contain any duplicate rows. + Interaction here follows the *item choice model* from Castells et al. + reco_df (pandas.DataFrame): Recommender's prediction output, containing col_user, col_item, + col_relevance (optional). Assumed to not contain any duplicate user-item pairs. + col_user (str): User id column name. + col_item (str): Item id column name. + Returns: float: novelty. """ @@ -1196,7 +1241,7 @@ def novelty(train_df, reco_df, col_user=DEFAULT_USER_COL, col_item=DEFAULT_ITEM_ # Serendipity metrics -@check_column_dtypes_diversity_serendipity +@_check_column_dtypes_diversity_serendipity @lru_cache_df(maxsize=1) def user_item_serendipity( train_df, @@ -1220,6 +1265,19 @@ def user_item_serendipity( Eugene Yan, Serendipity: Accuracy’s unpopular best friend in Recommender Systems, eugeneyan.com, April 2020 + Args: + train_df (pandas.DataFrame): Data set with historical data for users and items they + have interacted with; contains col_user, col_item. Assumed to not contain any duplicate rows. + reco_df (pandas.DataFrame): Recommender's prediction output, containing col_user, col_item, + col_relevance (optional). Assumed to not contain any duplicate user-item pairs. + item_feature_df (pandas.DataFrame): (Optional) It is required only when item_sim_measure='item_feature_vector'. It contains two columns: col_item and features (a feature vector). + item_sim_measure (str): (Optional) This column indicates which item similarity measure to be used. Available measures include item_cooccurrence_count (default choice) and item_feature_vector. + col_item_features (str): item feature column name. + col_user (str): User id column name. + col_item (str): Item id column name. + col_sim (str): This column indicates the column name for item similarity. + col_relevance (str): This column indicates whether the recommended item is actually + relevant to the user or not. Returns: pandas.DataFrame: A dataframe with columns: col_user, col_item, user_item_serendipity. """ @@ -1280,7 +1338,7 @@ def user_item_serendipity( @lru_cache_df(maxsize=1) -@check_column_dtypes_diversity_serendipity +@_check_column_dtypes_diversity_serendipity def user_serendipity( train_df, reco_df, @@ -1294,6 +1352,19 @@ def user_serendipity( ): """Calculate average serendipity for each user's recommendations. + Args: + train_df (pandas.DataFrame): Data set with historical data for users and items they + have interacted with; contains col_user, col_item. Assumed to not contain any duplicate rows. + reco_df (pandas.DataFrame): Recommender's prediction output, containing col_user, col_item, + col_relevance (optional). Assumed to not contain any duplicate user-item pairs. + item_feature_df (pandas.DataFrame): (Optional) It is required only when item_sim_measure='item_feature_vector'. It contains two columns: col_item and features (a feature vector). + item_sim_measure (str): (Optional) This column indicates which item similarity measure to be used. Available measures include item_cooccurrence_count (default choice) and item_feature_vector. + col_item_features (str): item feature column name. + col_user (str): User id column name. + col_item (str): Item id column name. + col_sim (str): This column indicates the column name for item similarity. + col_relevance (str): This column indicates whether the recommended item is actually + relevant to the user or not. Returns: pandas.DataFrame: A dataframe with following columns: col_user, user_serendipity. """ @@ -1321,7 +1392,7 @@ def user_serendipity( return df_user_serendipity -@check_column_dtypes_diversity_serendipity +@_check_column_dtypes_diversity_serendipity def serendipity( train_df, reco_df, @@ -1335,6 +1406,19 @@ def serendipity( ): """Calculate average serendipity for recommendations across all users. + Args: + train_df (pandas.DataFrame): Data set with historical data for users and items they + have interacted with; contains col_user, col_item. Assumed to not contain any duplicate rows. + reco_df (pandas.DataFrame): Recommender's prediction output, containing col_user, col_item, + col_relevance (optional). Assumed to not contain any duplicate user-item pairs. + item_feature_df (pandas.DataFrame): (Optional) It is required only when item_sim_measure='item_feature_vector'. It contains two columns: col_item and features (a feature vector). + item_sim_measure (str): (Optional) This column indicates which item similarity measure to be used. Available measures include item_cooccurrence_count (default choice) and item_feature_vector. + col_item_features (str): item feature column name. + col_user (str): User id column name. + col_item (str): Item id column name. + col_sim (str): This column indicates the column name for item similarity. + col_relevance (str): This column indicates whether the recommended item is actually + relevant to the user or not. Returns: float: serendipity. """ @@ -1354,7 +1438,7 @@ def serendipity( # Coverage metrics -@check_column_dtypes_novelty_coverage +@_check_column_dtypes_novelty_coverage def catalog_coverage( train_df, reco_df, col_user=DEFAULT_USER_COL, col_item=DEFAULT_ITEM_COL ): @@ -1366,6 +1450,15 @@ def catalog_coverage( G. Shani and A. Gunawardana, Evaluating Recommendation Systems, Recommender Systems Handbook pp. 257-297, 2010. + Args: + train_df (pandas.DataFrame): Data set with historical data for users and items they + have interacted with; contains col_user, col_item. Assumed to not contain any duplicate rows. + Interaction here follows the *item choice model* from Castells et al. + reco_df (pandas.DataFrame): Recommender's prediction output, containing col_user, col_item, + col_relevance (optional). Assumed to not contain any duplicate user-item pairs. + col_user (str): User id column name. + col_item (str): Item id column name. + Returns: float: catalog coverage """ @@ -1379,7 +1472,7 @@ def catalog_coverage( return c_coverage -@check_column_dtypes_novelty_coverage +@_check_column_dtypes_novelty_coverage def distributional_coverage( train_df, reco_df, col_user=DEFAULT_USER_COL, col_item=DEFAULT_ITEM_COL ): @@ -1391,6 +1484,15 @@ def distributional_coverage( G. Shani and A. Gunawardana, Evaluating Recommendation Systems, Recommender Systems Handbook pp. 257-297, 2010. + Args: + train_df (pandas.DataFrame): Data set with historical data for users and items they + have interacted with; contains col_user, col_item. Assumed to not contain any duplicate rows. + Interaction here follows the *item choice model* from Castells et al. + reco_df (pandas.DataFrame): Recommender's prediction output, containing col_user, col_item, + col_relevance (optional). Assumed to not contain any duplicate user-item pairs. + col_user (str): User id column name. + col_item (str): Item id column name. + Returns: float: distributional coverage """ diff --git a/tests/unit/recommenders/evaluation/test_python_evaluation.py b/tests/unit/recommenders/evaluation/test_python_evaluation.py index b25da4aec8..cdf377cf6c 100644 --- a/tests/unit/recommenders/evaluation/test_python_evaluation.py +++ b/tests/unit/recommenders/evaluation/test_python_evaluation.py @@ -15,7 +15,7 @@ DEFAULT_PREDICTION_COL, ) from recommenders.evaluation.python_evaluation import ( - check_column_dtypes, + _check_column_dtypes, merge_rating_true_pred, merge_ranking_true_pred, rmse, @@ -102,7 +102,7 @@ def test_column_dtypes_match(rating_true, rating_pred): expected_error = "Columns in provided DataFrames are not the same datatype" with pytest.raises(ValueError, match=expected_error): - check_column_dtypes(Mock())( + _check_column_dtypes(Mock())( rating_true, rating_pred, col_user=DEFAULT_USER_COL, From c0fc99116d185a94aa772c6436a40b61b5953926 Mon Sep 17 00:00:00 2001 From: Andreas Argyriou Date: Wed, 22 Sep 2021 17:56:55 +0000 Subject: [PATCH 22/22] Fix docstrings in evaluation --- recommenders/evaluation/python_evaluation.py | 26 +++++++++----------- recommenders/evaluation/spark_evaluation.py | 5 ++-- 2 files changed, 13 insertions(+), 18 deletions(-) diff --git a/recommenders/evaluation/python_evaluation.py b/recommenders/evaluation/python_evaluation.py index 77f7c6fa50..014be05fc1 100644 --- a/recommenders/evaluation/python_evaluation.py +++ b/recommenders/evaluation/python_evaluation.py @@ -372,7 +372,7 @@ def merge_ranking_true_pred( Returns: pandas.DataFrame, pandas.DataFrame, int: DataFrame of recommendation hits, sorted by `col_user` and `rank` - DataFrmae of hit counts vs actual relevant items per user number of unique user ids + DataFrame of hit counts vs actual relevant items per user number of unique user ids """ # Make sure the prediction and true data frames have the same set of users @@ -1070,19 +1070,17 @@ def user_diversity( Y.C. Zhang, D.Ó. Séaghdha, D. Quercia and T. Jambor, Auralist: introducing serendipity into music recommendation, WSDM 2012 - Args: - train_df (pandas.DataFrame): Data set with historical data for users and items they - have interacted with; contains col_user, col_item. Assumed to not contain any duplicate rows. - reco_df (pandas.DataFrame): Recommender's prediction output, containing col_user, col_item, - col_relevance (optional). Assumed to not contain any duplicate user-item pairs. + Args: + train_df (pandas.DataFrame): Data set with historical data for users and items they have interacted with; contains col_user, col_item. Assumed to not contain any duplicate rows. + reco_df (pandas.DataFrame): Recommender's prediction output, containing col_user, col_item, col_relevance (optional). Assumed to not contain any duplicate user-item pairs. item_feature_df (pandas.DataFrame): (Optional) It is required only when item_sim_measure='item_feature_vector'. It contains two columns: col_item and features (a feature vector). item_sim_measure (str): (Optional) This column indicates which item similarity measure to be used. Available measures include item_cooccurrence_count (default choice) and item_feature_vector. col_item_features (str): item feature column name. col_user (str): User id column name. col_item (str): Item id column name. col_sim (str): This column indicates the column name for item similarity. - col_relevance (str): This column indicates whether the recommended item is actually - relevant to the user or not. + col_relevance (str): This column indicates whether the recommended item is actually relevant to the user or not. + Returns: pandas.DataFrame: A dataframe with the following columns: col_user, user_diversity. """ @@ -1122,19 +1120,17 @@ def diversity( ): """Calculate average diversity of recommendations across all users. - Args: - train_df (pandas.DataFrame): Data set with historical data for users and items they - have interacted with; contains col_user, col_item. Assumed to not contain any duplicate rows. - reco_df (pandas.DataFrame): Recommender's prediction output, containing col_user, col_item, - col_relevance (optional). Assumed to not contain any duplicate user-item pairs. + Args: + train_df (pandas.DataFrame): Data set with historical data for users and items they have interacted with; contains col_user, col_item. Assumed to not contain any duplicate rows. + reco_df (pandas.DataFrame): Recommender's prediction output, containing col_user, col_item, col_relevance (optional). Assumed to not contain any duplicate user-item pairs. item_feature_df (pandas.DataFrame): (Optional) It is required only when item_sim_measure='item_feature_vector'. It contains two columns: col_item and features (a feature vector). item_sim_measure (str): (Optional) This column indicates which item similarity measure to be used. Available measures include item_cooccurrence_count (default choice) and item_feature_vector. col_item_features (str): item feature column name. col_user (str): User id column name. col_item (str): Item id column name. col_sim (str): This column indicates the column name for item similarity. - col_relevance (str): This column indicates whether the recommended item is actually - relevant to the user or not. + col_relevance (str): This column indicates whether the recommended item is actually relevant to the user or not. + Returns: float: diversity. """ diff --git a/recommenders/evaluation/spark_evaluation.py b/recommenders/evaluation/spark_evaluation.py index 1bc7ad280e..37a73778ea 100644 --- a/recommenders/evaluation/spark_evaluation.py +++ b/recommenders/evaluation/spark_evaluation.py @@ -165,7 +165,7 @@ def exp_var(self): class SparkRankingEvaluation: - """SparkRankingEvaluation""" + """Spark Ranking Evaluator""" def __init__( self, @@ -487,7 +487,7 @@ def _get_relevant_items_by_timestamp( class SparkDiversityEvaluation: - """Spark Diversity Evaluator""" + """Spark Evaluator for diversity, coverage, novelty, serendipity""" def __init__( self, @@ -508,7 +508,6 @@ def __init__( 1. catalog_coverage, which measures the proportion of items that get recommended from the item catalog; 2. distributional_coverage, which measures how unequally different items are recommended in the recommendations to all users. - * Novelty - A more novel item indicates it is less popular, i.e. it gets recommended less frequently. * Diversity - The dissimilarity of items being recommended. * Serendipity - The "unusualness" or "surprise" of recommendations to a user. When 'col_relevance' is used, it indicates how "pleasant surprise" of recommendations is to a user.