From 80d3f812cc0d51fc274d50cd970531b95d451a97 Mon Sep 17 00:00:00 2001 From: "Zhian N. Kamvar" Date: Tue, 25 Apr 2023 09:32:31 -0700 Subject: [PATCH] [automation] transform lesson to sandpaper --- .editorconfig | 26 + .github/workflows/README.md | 198 +++ .github/workflows/pr-close-signal.yaml | 23 + .github/workflows/pr-comment.yaml | 185 +++ .github/workflows/pr-post-remove-branch.yaml | 32 + .github/workflows/pr-preflight.yaml | 39 + .github/workflows/pr-receive.yaml | 131 ++ .github/workflows/sandpaper-main.yaml | 61 + .github/workflows/sandpaper-version.txt | 1 + .github/workflows/update-cache.yaml | 125 ++ .github/workflows/update-workflows.yaml | 66 + .github/workflows/workbench-beta-phase.yml | 60 + .gitignore | 56 + CODE_OF_CONDUCT.md | 13 + CONTRIBUTING.md | 121 ++ LICENSE.md | 79 ++ README.md | 16 +- _extras/.Rhistory | 0 _extras/prereqs.md | 50 - config.yaml | 90 ++ episodes/01-introduction.md | 125 +- episodes/02-image-basics.md | 1033 +++++++-------- episodes/03-skimage-images.md | 695 +++++----- episodes/04-drawing.md | 855 +++++++------ episodes/05-creating-histograms.md | 503 ++++---- episodes/06-blurring.md | 460 +++---- episodes/07-thresholding.md | 817 ++++++------ episodes/08-connected-components.md | 1118 +++++++++-------- episodes/09-challenges.md | 362 +++--- {data => episodes/data}/beads.jpg | Bin {data => episodes/data}/board.jpg | Bin {data => episodes/data}/centers.txt | 0 {data => episodes/data}/chair.jpg | Bin {data => episodes/data}/colonies-01.tif | Bin {data => episodes/data}/colonies-02.tif | Bin {data => episodes/data}/colonies-03.tif | Bin {data => episodes/data}/eight.tif | Bin {data => episodes/data}/gaussian-original.png | Bin .../data}/maize-root-cluster.jpg | Bin .../data}/maize-roots-grayscale.jpg | Bin {data => episodes/data}/maize-seedlings.tif | Bin {data => episodes/data}/plant-seedling.jpg | Bin {data => episodes/data}/remote-control.jpg | Bin {data => episodes/data}/shapes-01.jpg | Bin {data => episodes/data}/shapes-02.jpg | Bin {data => episodes/data}/sudoku.png | Bin {data => episodes/data}/tree.jpg | Bin {data => episodes/data}/trial-016.jpg | Bin {data => episodes/data}/trial-020.jpg | Bin {data => episodes/data}/trial-216.jpg | Bin {data => episodes/data}/trial-293.jpg | Bin {data => episodes/data}/wellplate-01.jpg | Bin {data => episodes/data}/wellplate-02.tif | Bin {fig => episodes/fig}/Gaussian_2D.png | Bin .../fig}/Normal_Distribution_PDF.svg | 0 {fig => episodes/fig}/beads-canny-ui.png | Bin {fig => episodes/fig}/beads-out.png | Bin .../fig}/black-and-white-edge-pixels.jpg | Bin .../fig}/black-and-white-gradient.png | Bin {fig => episodes/fig}/black-and-white.jpg | Bin {fig => episodes/fig}/blur-demo.gif | Bin {fig => episodes/fig}/board-coordinates.jpg | Bin {fig => episodes/fig}/board-final.jpg | Bin .../fig}/cartesian-coordinates.png | Bin {fig => episodes/fig}/cat-corner-blue.png | Bin {fig => episodes/fig}/cat-eye-pixels.jpg | Bin {fig => episodes/fig}/cat.jpg | Bin {fig => episodes/fig}/chair-layers-rgb.png | Bin {fig => episodes/fig}/chair-original.jpg | Bin .../fig}/checkerboard-blue-channel.png | Bin .../fig}/checkerboard-green-channel.png | Bin .../fig}/checkerboard-red-channel.png | Bin {fig => episodes/fig}/checkerboard.png | Bin {fig => episodes/fig}/colonies-01-gray.png | Bin .../fig}/colonies-01-histogram.png | Bin {fig => episodes/fig}/colonies-01-mask.png | Bin {fig => episodes/fig}/colonies-01-summary.png | Bin {fig => episodes/fig}/colonies-01.jpg | Bin {fig => episodes/fig}/colonies-02-summary.png | Bin {fig => episodes/fig}/colonies-02.jpg | Bin {fig => episodes/fig}/colonies-03-summary.png | Bin {fig => episodes/fig}/colonies-03.jpg | Bin {fig => episodes/fig}/colonies01.png | Bin {fig => episodes/fig}/colony-mask.png | Bin {fig => episodes/fig}/colour-table.png | Bin {fig => episodes/fig}/combination.png | Bin {fig => episodes/fig}/drawing-practice.jpg | Bin {fig => episodes/fig}/eight.png | Bin {fig => episodes/fig}/five.png | Bin .../fig}/four-maize-roots-binary-improved.jpg | Bin .../fig}/four-maize-roots-binary.jpg | Bin {fig => episodes/fig}/four-maize-roots.jpg | Bin {fig => episodes/fig}/gaussian-blurred.png | Bin {fig => episodes/fig}/gaussian-kernel.png | Bin {fig => episodes/fig}/grayscale.png | Bin {fig => episodes/fig}/image-coordinates.png | Bin {fig => episodes/fig}/jupyter_overview.png | Bin .../fig}/left-hand-coordinates.png | Bin .../fig}/maize-root-cluster-histogram.png | Bin .../fig}/maize-root-cluster-mask.png | Bin .../fig}/maize-root-cluster-selected.png | Bin .../fig}/maize-root-cluster-threshold.jpg | Bin .../fig}/maize-roots-threshold.png | Bin .../fig}/maize-seedling-enlarged.jpg | Bin .../fig}/maize-seedling-original.jpg | Bin .../fig}/maize-seedlings-mask.png | Bin .../fig}/maize-seedlings-masked.jpg | Bin {fig => episodes/fig}/maize-seedlings.jpg | Bin .../fig}/plant-seedling-colour-histogram.png | Bin ...lant-seedling-grayscale-histogram-mask.png | Bin .../plant-seedling-grayscale-histogram.png | Bin .../fig}/plant-seedling-grayscale.png | Bin {fig => episodes/fig}/quality-histogram.jpg | Bin {fig => episodes/fig}/quality-jpg.jpg | Bin {fig => episodes/fig}/quality-original.jpg | Bin {fig => episodes/fig}/quality-tif.jpg | Bin .../fig}/rectangle-gaussian-blurred.png | Bin .../fig}/remote-control-masked.jpg | Bin .../fig}/shapes-01-areas-histogram.png | Bin .../fig}/shapes-01-canny-edge-output.png | Bin .../fig}/shapes-01-canny-edges.png | Bin .../fig}/shapes-01-canny-track-edges.png | Bin .../fig}/shapes-01-cca-detail.png | Bin .../fig}/shapes-01-filtered-objects.png | Bin {fig => episodes/fig}/shapes-01-grayscale.png | Bin {fig => episodes/fig}/shapes-01-histogram.png | Bin {fig => episodes/fig}/shapes-01-labeled.png | Bin {fig => episodes/fig}/shapes-01-mask.png | Bin .../shapes-01-objects-coloured-by-area.png | Bin {fig => episodes/fig}/shapes-01-selected.png | Bin {fig => episodes/fig}/shapes-02-histogram.png | Bin {fig => episodes/fig}/shapes-02-mask.png | Bin {fig => episodes/fig}/shapes-02-selected.png | Bin {fig => episodes/fig}/sudoku-gray.png | Bin {fig => episodes/fig}/three-colours.png | Bin {fig => episodes/fig}/wellplate-01-masked.jpg | Bin .../fig}/wellplate-02-histogram.png | Bin {fig => episodes/fig}/wellplate-02-masked.jpg | Bin {fig => episodes/fig}/wellplate-02.jpg | Bin {fig => episodes/fig}/zero.png | Bin index.md | 37 +- {_extras => instructors}/edge-detection.md | 361 +++--- .../instructor-notes.md | 41 +- instructors/prereqs.md | 55 + {_extras => learners}/discuss.md | 7 +- reference.md => learners/reference.md | 44 +- learners/setup.md | 137 ++ profiles/learner-profiles.md | 5 + setup.md | 118 -- site/README.md | 2 + 150 files changed, 4867 insertions(+), 3280 deletions(-) create mode 100644 .editorconfig create mode 100755 .github/workflows/README.md create mode 100755 .github/workflows/pr-close-signal.yaml create mode 100755 .github/workflows/pr-comment.yaml create mode 100755 .github/workflows/pr-post-remove-branch.yaml create mode 100755 .github/workflows/pr-preflight.yaml create mode 100755 .github/workflows/pr-receive.yaml create mode 100755 .github/workflows/sandpaper-main.yaml create mode 100644 .github/workflows/sandpaper-version.txt create mode 100755 .github/workflows/update-cache.yaml create mode 100755 .github/workflows/update-workflows.yaml create mode 100644 .github/workflows/workbench-beta-phase.yml create mode 100644 .gitignore create mode 100644 CODE_OF_CONDUCT.md create mode 100644 CONTRIBUTING.md create mode 100644 LICENSE.md delete mode 100644 _extras/.Rhistory delete mode 100644 _extras/prereqs.md create mode 100644 config.yaml rename {data => episodes/data}/beads.jpg (100%) rename {data => episodes/data}/board.jpg (100%) rename {data => episodes/data}/centers.txt (100%) rename {data => episodes/data}/chair.jpg (100%) rename {data => episodes/data}/colonies-01.tif (100%) rename {data => episodes/data}/colonies-02.tif (100%) rename {data => episodes/data}/colonies-03.tif (100%) rename {data => episodes/data}/eight.tif (100%) rename {data => episodes/data}/gaussian-original.png (100%) rename {data => episodes/data}/maize-root-cluster.jpg (100%) rename {data => episodes/data}/maize-roots-grayscale.jpg (100%) rename {data => episodes/data}/maize-seedlings.tif (100%) rename {data => episodes/data}/plant-seedling.jpg (100%) rename {data => episodes/data}/remote-control.jpg (100%) rename {data => episodes/data}/shapes-01.jpg (100%) rename {data => episodes/data}/shapes-02.jpg (100%) rename {data => episodes/data}/sudoku.png (100%) rename {data => episodes/data}/tree.jpg (100%) rename {data => episodes/data}/trial-016.jpg (100%) rename {data => episodes/data}/trial-020.jpg (100%) rename {data => episodes/data}/trial-216.jpg (100%) rename {data => episodes/data}/trial-293.jpg (100%) rename {data => episodes/data}/wellplate-01.jpg (100%) rename {data => episodes/data}/wellplate-02.tif (100%) rename {fig => episodes/fig}/Gaussian_2D.png (100%) rename {fig => episodes/fig}/Normal_Distribution_PDF.svg (100%) rename {fig => episodes/fig}/beads-canny-ui.png (100%) rename {fig => episodes/fig}/beads-out.png (100%) rename {fig => episodes/fig}/black-and-white-edge-pixels.jpg (100%) rename {fig => episodes/fig}/black-and-white-gradient.png (100%) rename {fig => episodes/fig}/black-and-white.jpg (100%) rename {fig => episodes/fig}/blur-demo.gif (100%) rename {fig => episodes/fig}/board-coordinates.jpg (100%) rename {fig => episodes/fig}/board-final.jpg (100%) rename {fig => episodes/fig}/cartesian-coordinates.png (100%) rename {fig => episodes/fig}/cat-corner-blue.png (100%) rename {fig => episodes/fig}/cat-eye-pixels.jpg (100%) rename {fig => episodes/fig}/cat.jpg (100%) rename {fig => episodes/fig}/chair-layers-rgb.png (100%) rename {fig => episodes/fig}/chair-original.jpg (100%) rename {fig => episodes/fig}/checkerboard-blue-channel.png (100%) rename {fig => episodes/fig}/checkerboard-green-channel.png (100%) rename {fig => episodes/fig}/checkerboard-red-channel.png (100%) rename {fig => episodes/fig}/checkerboard.png (100%) rename {fig => episodes/fig}/colonies-01-gray.png (100%) rename {fig => episodes/fig}/colonies-01-histogram.png (100%) rename {fig => episodes/fig}/colonies-01-mask.png (100%) rename {fig => episodes/fig}/colonies-01-summary.png (100%) rename {fig => episodes/fig}/colonies-01.jpg (100%) rename {fig => episodes/fig}/colonies-02-summary.png (100%) rename {fig => episodes/fig}/colonies-02.jpg (100%) rename {fig => episodes/fig}/colonies-03-summary.png (100%) rename {fig => episodes/fig}/colonies-03.jpg (100%) rename {fig => episodes/fig}/colonies01.png (100%) rename {fig => episodes/fig}/colony-mask.png (100%) rename {fig => episodes/fig}/colour-table.png (100%) rename {fig => episodes/fig}/combination.png (100%) rename {fig => episodes/fig}/drawing-practice.jpg (100%) rename {fig => episodes/fig}/eight.png (100%) rename {fig => episodes/fig}/five.png (100%) rename {fig => episodes/fig}/four-maize-roots-binary-improved.jpg (100%) rename {fig => episodes/fig}/four-maize-roots-binary.jpg (100%) rename {fig => episodes/fig}/four-maize-roots.jpg (100%) rename {fig => episodes/fig}/gaussian-blurred.png (100%) rename {fig => episodes/fig}/gaussian-kernel.png (100%) rename {fig => episodes/fig}/grayscale.png (100%) rename {fig => episodes/fig}/image-coordinates.png (100%) rename {fig => episodes/fig}/jupyter_overview.png (100%) rename {fig => episodes/fig}/left-hand-coordinates.png (100%) rename {fig => episodes/fig}/maize-root-cluster-histogram.png (100%) rename {fig => episodes/fig}/maize-root-cluster-mask.png (100%) rename {fig => episodes/fig}/maize-root-cluster-selected.png (100%) rename {fig => episodes/fig}/maize-root-cluster-threshold.jpg (100%) rename {fig => episodes/fig}/maize-roots-threshold.png (100%) rename {fig => episodes/fig}/maize-seedling-enlarged.jpg (100%) rename {fig => episodes/fig}/maize-seedling-original.jpg (100%) rename {fig => episodes/fig}/maize-seedlings-mask.png (100%) rename {fig => episodes/fig}/maize-seedlings-masked.jpg (100%) rename {fig => episodes/fig}/maize-seedlings.jpg (100%) rename {fig => episodes/fig}/plant-seedling-colour-histogram.png (100%) rename {fig => episodes/fig}/plant-seedling-grayscale-histogram-mask.png (100%) rename {fig => episodes/fig}/plant-seedling-grayscale-histogram.png (100%) rename {fig => episodes/fig}/plant-seedling-grayscale.png (100%) rename {fig => episodes/fig}/quality-histogram.jpg (100%) rename {fig => episodes/fig}/quality-jpg.jpg (100%) rename {fig => episodes/fig}/quality-original.jpg (100%) rename {fig => episodes/fig}/quality-tif.jpg (100%) rename {fig => episodes/fig}/rectangle-gaussian-blurred.png (100%) rename {fig => episodes/fig}/remote-control-masked.jpg (100%) rename {fig => episodes/fig}/shapes-01-areas-histogram.png (100%) rename {fig => episodes/fig}/shapes-01-canny-edge-output.png (100%) rename {fig => episodes/fig}/shapes-01-canny-edges.png (100%) rename {fig => episodes/fig}/shapes-01-canny-track-edges.png (100%) rename {fig => episodes/fig}/shapes-01-cca-detail.png (100%) rename {fig => episodes/fig}/shapes-01-filtered-objects.png (100%) rename {fig => episodes/fig}/shapes-01-grayscale.png (100%) rename {fig => episodes/fig}/shapes-01-histogram.png (100%) rename {fig => episodes/fig}/shapes-01-labeled.png (100%) rename {fig => episodes/fig}/shapes-01-mask.png (100%) rename {fig => episodes/fig}/shapes-01-objects-coloured-by-area.png (100%) rename {fig => episodes/fig}/shapes-01-selected.png (100%) rename {fig => episodes/fig}/shapes-02-histogram.png (100%) rename {fig => episodes/fig}/shapes-02-mask.png (100%) rename {fig => episodes/fig}/shapes-02-selected.png (100%) rename {fig => episodes/fig}/sudoku-gray.png (100%) rename {fig => episodes/fig}/three-colours.png (100%) rename {fig => episodes/fig}/wellplate-01-masked.jpg (100%) rename {fig => episodes/fig}/wellplate-02-histogram.png (100%) rename {fig => episodes/fig}/wellplate-02-masked.jpg (100%) rename {fig => episodes/fig}/wellplate-02.jpg (100%) rename {fig => episodes/fig}/zero.png (100%) rename {_extras => instructors}/edge-detection.md (64%) rename _extras/guide.md => instructors/instructor-notes.md (67%) create mode 100644 instructors/prereqs.md rename {_extras => learners}/discuss.md (95%) rename reference.md => learners/reference.md (98%) create mode 100644 learners/setup.md create mode 100644 profiles/learner-profiles.md delete mode 100644 setup.md create mode 100644 site/README.md diff --git a/.editorconfig b/.editorconfig new file mode 100644 index 000000000..5bf4860b1 --- /dev/null +++ b/.editorconfig @@ -0,0 +1,26 @@ +root = true + +[*] +charset = utf-8 +insert_final_newline = true +trim_trailing_whitespace = true + +[*.md] +indent_size = 2 +indent_style = space +max_line_length = 100 # Please keep this in sync with bin/lesson_check.py! +trim_trailing_whitespace = false # keep trailing spaces in markdown - 2+ spaces are translated to a hard break (
) + +[*.r] +max_line_length = 80 + +[*.py] +indent_size = 4 +indent_style = space +max_line_length = 79 + +[*.sh] +end_of_line = lf + +[Makefile] +indent_style = tab diff --git a/.github/workflows/README.md b/.github/workflows/README.md new file mode 100755 index 000000000..101967e4b --- /dev/null +++ b/.github/workflows/README.md @@ -0,0 +1,198 @@ +# Carpentries Workflows + +This directory contains workflows to be used for Lessons using the {sandpaper} +lesson infrastructure. Two of these workflows require R (`sandpaper-main.yaml` +and `pr-recieve.yaml`) and the rest are bots to handle pull request management. + +These workflows will likely change as {sandpaper} evolves, so it is important to +keep them up-to-date. To do this in your lesson you can do the following in your +R console: + +```r +# Install/Update sandpaper +options(repos = c(carpentries = "https://carpentries.r-universe.dev/", + CRAN = "https://cloud.r-project.org")) +install.packages("sandpaper") + +# update the workflows in your lesson +library("sandpaper") +update_github_workflows() +``` + +Inside this folder, you will find a file called `sandpaper-version.txt`, which +will contain a version number for sandpaper. This will be used in the future to +alert you if a workflow update is needed. + +What follows are the descriptions of the workflow files: + +## Deployment + +### 01 Build and Deploy (sandpaper-main.yaml) + +This is the main driver that will only act on the main branch of the repository. +This workflow does the following: + + 1. checks out the lesson + 2. provisions the following resources + - R + - pandoc + - lesson infrastructure (stored in a cache) + - lesson dependencies if needed (stored in a cache) + 3. builds the lesson via `sandpaper:::ci_deploy()` + +#### Caching + +This workflow has two caches; one cache is for the lesson infrastructure and +the other is for the the lesson dependencies if the lesson contains rendered +content. These caches are invalidated by new versions of the infrastructure and +the `renv.lock` file, respectively. If there is a problem with the cache, +manual invaliation is necessary. You will need maintain access to the repository +and you can either go to the actions tab and [click on the caches button to find +and invalidate the failing cache](https://github.blog/changelog/2022-10-20-manage-caches-in-your-actions-workflows-from-web-interface/) +or by setting the `CACHE_VERSION` secret to the current date (which will +invalidate all of the caches). + +## Updates + +### Setup Information + +These workflows run on a schedule and at the maintainer's request. Because they +create pull requests that update workflows/require the downstream actions to run, +they need a special repository/organization secret token called +`SANDPAPER_WORKFLOW` and it must have the `public_repo` and `workflow` scope. + +This can be an individual user token, OR it can be a trusted bot account. If you +have a repository in one of the official Carpentries accounts, then you do not +need to worry about this token being present because the Carpentries Core Team +will take care of supplying this token. + +If you want to use your personal account: you can go to + +to create a token. Once you have created your token, you should copy it to your +clipboard and then go to your repository's settings > secrets > actions and +create or edit the `SANDPAPER_WORKFLOW` secret, pasting in the generated token. + +If you do not specify your token correctly, the runs will not fail and they will +give you instructions to provide the token for your repository. + +### 02 Maintain: Update Workflow Files (update-workflow.yaml) + +The {sandpaper} repository was designed to do as much as possible to separate +the tools from the content. For local builds, this is absolutely true, but +there is a minor issue when it comes to workflow files: they must live inside +the repository. + +This workflow ensures that the workflow files are up-to-date. The way it work is +to download the update-workflows.sh script from GitHub and run it. The script +will do the following: + +1. check the recorded version of sandpaper against the current version on github +2. update the files if there is a difference in versions + +After the files are updated, if there are any changes, they are pushed to a +branch called `update/workflows` and a pull request is created. Maintainers are +encouraged to review the changes and accept the pull request if the outputs +are okay. + +This update is run ~~weekly or~~ on demand. + +### 03 Maintain: Update Pacakge Cache (update-cache.yaml) + +For lessons that have generated content, we use {renv} to ensure that the output +is stable. This is controlled by a single lockfile which documents the packages +needed for the lesson and the version numbers. This workflow is skipped in +lessons that do not have generated content. + +Because the lessons need to remain current with the package ecosystem, it's a +good idea to make sure these packages can be updated periodically. The +update cache workflow will do this by checking for updates, applying them in a +branch called `updates/packages` and creating a pull request with _only the +lockfile changed_. + +From here, the markdown documents will be rebuilt and you can inspect what has +changed based on how the packages have updated. + +## Pull Request and Review Management + +Because our lessons execute code, pull requests are a secruity risk for any +lesson and thus have security measures associted with them. **Do not merge any +pull requests that do not pass checks and do not have bots commented on them.** + +This series of workflows all go together and are described in the following +diagram and the below sections: + +![Graph representation of a pull request](https://carpentries.github.io/sandpaper/articles/img/pr-flow.dot.svg) + +### Pre Flight Pull Request Validation (pr-preflight.yaml) + +This workflow runs every time a pull request is created and its purpose is to +validate that the pull request is okay to run. This means the following things: + +1. The pull request does not contain modified workflow files +2. If the pull request contains modified workflow files, it does not contain + modified content files (such as a situation where @carpentries-bot will + make an automated pull request) +3. The pull request does not contain an invalid commit hash (e.g. from a fork + that was made before a lesson was transitioned from styles to use the + workbench). + +Once the checks are finished, a comment is issued to the pull request, which +will allow maintainers to determine if it is safe to run the +"Receive Pull Request" workflow from new contributors. + +### Recieve Pull Request (pr-recieve.yaml) + +**Note of caution:** This workflow runs arbitrary code by anyone who creates a +pull request. GitHub has safeguarded the token used in this workflow to have no +priviledges in the repository, but we have taken precautions to protect against +spoofing. + +This workflow is triggered with every push to a pull request. If this workflow +is already running and a new push is sent to the pull request, the workflow +running from the previous push will be cancelled and a new workflow run will be +started. + +The first step of this workflow is to check if it is valid (e.g. that no +workflow files have been modified). If there are workflow files that have been +modified, a comment is made that indicates that the workflow is not run. If +both a workflow file and lesson content is modified, an error will occurr. + +The second step (if valid) is to build the generated content from the pull +request. This builds the content and uploads three artifacts: + +1. The pull request number (pr) +2. A summary of changes after the rendering process (diff) +3. The rendered files (build) + +Because this workflow builds generated content, it follows the same general +process as the `sandpaper-main` workflow with the same caching mechanisms. + +The artifacts produced are used by the next workflow. + +### Comment on Pull Request (pr-comment.yaml) + +This workflow is triggered if the `pr-recieve.yaml` workflow is successful. +The steps in this workflow are: + +1. Test if the workflow is valid and comment the validity of the workflow to the + pull request. +2. If it is valid: create an orphan branch with two commits: the current state + of the repository and the proposed changes. +3. If it is valid: update the pull request comment with the summary of changes + +Importantly: if the pull request is invalid, the branch is not created so any +malicious code is not published. + +From here, the maintainer can request changes from the author and eventually +either merge or reject the PR. When this happens, if the PR was valid, the +preview branch needs to be deleted. + +### Send Close PR Signal (pr-close-signal.yaml) + +Triggered any time a pull request is closed. This emits an artifact that is the +pull request number for the next action + +### Remove Pull Request Branch (pr-post-remove-branch.yaml) + +Tiggered by `pr-close-signal.yaml`. This removes the temporary branch associated with +the pull request (if it was created). diff --git a/.github/workflows/pr-close-signal.yaml b/.github/workflows/pr-close-signal.yaml new file mode 100755 index 000000000..9b129d5d2 --- /dev/null +++ b/.github/workflows/pr-close-signal.yaml @@ -0,0 +1,23 @@ +name: "Bot: Send Close Pull Request Signal" + +on: + pull_request: + types: + [closed] + +jobs: + send-close-signal: + name: "Send closing signal" + runs-on: ubuntu-latest + if: ${{ github.event.action == 'closed' }} + steps: + - name: "Create PRtifact" + run: | + mkdir -p ./pr + printf ${{ github.event.number }} > ./pr/NUM + - name: Upload Diff + uses: actions/upload-artifact@v3 + with: + name: pr + path: ./pr + diff --git a/.github/workflows/pr-comment.yaml b/.github/workflows/pr-comment.yaml new file mode 100755 index 000000000..bb2eb03cd --- /dev/null +++ b/.github/workflows/pr-comment.yaml @@ -0,0 +1,185 @@ +name: "Bot: Comment on the Pull Request" + +# read-write repo token +# access to secrets +on: + workflow_run: + workflows: ["Receive Pull Request"] + types: + - completed + +concurrency: + group: pr-${{ github.event.workflow_run.pull_requests[0].number }} + cancel-in-progress: true + + +jobs: + # Pull requests are valid if: + # - they match the sha of the workflow run head commit + # - they are open + # - no .github files were committed + test-pr: + name: "Test if pull request is valid" + runs-on: ubuntu-latest + if: > + github.event.workflow_run.event == 'pull_request' && + github.event.workflow_run.conclusion == 'success' + outputs: + is_valid: ${{ steps.check-pr.outputs.VALID }} + payload: ${{ steps.check-pr.outputs.payload }} + number: ${{ steps.get-pr.outputs.NUM }} + msg: ${{ steps.check-pr.outputs.MSG }} + steps: + - name: 'Download PR artifact' + id: dl + uses: carpentries/actions/download-workflow-artifact@main + with: + run: ${{ github.event.workflow_run.id }} + name: 'pr' + + - name: "Get PR Number" + if: ${{ steps.dl.outputs.success == 'true' }} + id: get-pr + run: | + unzip pr.zip + echo "NUM=$(<./NR)" >> $GITHUB_OUTPUT + + - name: "Fail if PR number was not present" + id: bad-pr + if: ${{ steps.dl.outputs.success != 'true' }} + run: | + echo '::error::A pull request number was not recorded. The pull request that triggered this workflow is likely malicious.' + exit 1 + - name: "Get Invalid Hashes File" + id: hash + run: | + echo "json<> $GITHUB_OUTPUT + - name: "Check PR" + id: check-pr + if: ${{ steps.dl.outputs.success == 'true' }} + uses: carpentries/actions/check-valid-pr@main + with: + pr: ${{ steps.get-pr.outputs.NUM }} + sha: ${{ github.event.workflow_run.head_sha }} + headroom: 3 # if it's within the last three commits, we can keep going, because it's likely rapid-fire + invalid: ${{ fromJSON(steps.hash.outputs.json)[github.repository] }} + fail_on_error: true + + # Create an orphan branch on this repository with two commits + # - the current HEAD of the md-outputs branch + # - the output from running the current HEAD of the pull request through + # the md generator + create-branch: + name: "Create Git Branch" + needs: test-pr + runs-on: ubuntu-latest + if: ${{ needs.test-pr.outputs.is_valid == 'true' }} + env: + NR: ${{ needs.test-pr.outputs.number }} + permissions: + contents: write + steps: + - name: 'Checkout md outputs' + uses: actions/checkout@v3 + with: + ref: md-outputs + path: built + fetch-depth: 1 + + - name: 'Download built markdown' + id: dl + uses: carpentries/actions/download-workflow-artifact@main + with: + run: ${{ github.event.workflow_run.id }} + name: 'built' + + - if: ${{ steps.dl.outputs.success == 'true' }} + run: unzip built.zip + + - name: "Create orphan and push" + if: ${{ steps.dl.outputs.success == 'true' }} + run: | + cd built/ + git config --local user.email "actions@github.com" + git config --local user.name "GitHub Actions" + CURR_HEAD=$(git rev-parse HEAD) + git checkout --orphan md-outputs-PR-${NR} + git add -A + git commit -m "source commit: ${CURR_HEAD}" + ls -A | grep -v '^.git$' | xargs -I _ rm -r '_' + cd .. + unzip -o -d built built.zip + cd built + git add -A + git commit --allow-empty -m "differences for PR #${NR}" + git push -u --force --set-upstream origin md-outputs-PR-${NR} + + # Comment on the Pull Request with a link to the branch and the diff + comment-pr: + name: "Comment on Pull Request" + needs: [test-pr, create-branch] + runs-on: ubuntu-latest + if: ${{ needs.test-pr.outputs.is_valid == 'true' }} + env: + NR: ${{ needs.test-pr.outputs.number }} + permissions: + pull-requests: write + steps: + - name: 'Download comment artifact' + id: dl + uses: carpentries/actions/download-workflow-artifact@main + with: + run: ${{ github.event.workflow_run.id }} + name: 'diff' + + - if: ${{ steps.dl.outputs.success == 'true' }} + run: unzip ${{ github.workspace }}/diff.zip + + - name: "Comment on PR" + id: comment-diff + if: ${{ steps.dl.outputs.success == 'true' }} + uses: carpentries/actions/comment-diff@main + with: + pr: ${{ env.NR }} + path: ${{ github.workspace }}/diff.md + + # Comment if the PR is open and matches the SHA, but the workflow files have + # changed + comment-changed-workflow: + name: "Comment if workflow files have changed" + needs: test-pr + runs-on: ubuntu-latest + if: ${{ always() && needs.test-pr.outputs.is_valid == 'false' }} + env: + NR: ${{ github.event.workflow_run.pull_requests[0].number }} + body: ${{ needs.test-pr.outputs.msg }} + permissions: + pull-requests: write + steps: + - name: 'Check for spoofing' + id: dl + uses: carpentries/actions/download-workflow-artifact@main + with: + run: ${{ github.event.workflow_run.id }} + name: 'built' + + - name: 'Alert if spoofed' + id: spoof + if: ${{ steps.dl.outputs.success == 'true' }} + run: | + echo 'body<> $GITHUB_ENV + echo '' >> $GITHUB_ENV + echo '## :x: DANGER :x:' >> $GITHUB_ENV + echo 'This pull request has modified workflows that created output. Close this now.' >> $GITHUB_ENV + echo '' >> $GITHUB_ENV + echo 'EOF' >> $GITHUB_ENV + + - name: "Comment on PR" + id: comment-diff + uses: carpentries/actions/comment-diff@main + with: + pr: ${{ env.NR }} + body: ${{ env.body }} + diff --git a/.github/workflows/pr-post-remove-branch.yaml b/.github/workflows/pr-post-remove-branch.yaml new file mode 100755 index 000000000..62c2e98d4 --- /dev/null +++ b/.github/workflows/pr-post-remove-branch.yaml @@ -0,0 +1,32 @@ +name: "Bot: Remove Temporary PR Branch" + +on: + workflow_run: + workflows: ["Bot: Send Close Pull Request Signal"] + types: + - completed + +jobs: + delete: + name: "Delete branch from Pull Request" + runs-on: ubuntu-latest + if: > + github.event.workflow_run.event == 'pull_request' && + github.event.workflow_run.conclusion == 'success' + permissions: + contents: write + steps: + - name: 'Download artifact' + uses: carpentries/actions/download-workflow-artifact@main + with: + run: ${{ github.event.workflow_run.id }} + name: pr + - name: "Get PR Number" + id: get-pr + run: | + unzip pr.zip + echo "NUM=$(<./NUM)" >> $GITHUB_OUTPUT + - name: 'Remove branch' + uses: carpentries/actions/remove-branch@main + with: + pr: ${{ steps.get-pr.outputs.NUM }} diff --git a/.github/workflows/pr-preflight.yaml b/.github/workflows/pr-preflight.yaml new file mode 100755 index 000000000..d0d7420dc --- /dev/null +++ b/.github/workflows/pr-preflight.yaml @@ -0,0 +1,39 @@ +name: "Pull Request Preflight Check" + +on: + pull_request_target: + branches: + ["main"] + types: + ["opened", "synchronize", "reopened"] + +jobs: + test-pr: + name: "Test if pull request is valid" + if: ${{ github.event.action != 'closed' }} + runs-on: ubuntu-latest + outputs: + is_valid: ${{ steps.check-pr.outputs.VALID }} + permissions: + pull-requests: write + steps: + - name: "Get Invalid Hashes File" + id: hash + run: | + echo "json<> $GITHUB_OUTPUT + - name: "Check PR" + id: check-pr + uses: carpentries/actions/check-valid-pr@main + with: + pr: ${{ github.event.number }} + invalid: ${{ fromJSON(steps.hash.outputs.json)[github.repository] }} + fail_on_error: true + - name: "Comment result of validation" + id: comment-diff + if: ${{ always() }} + uses: carpentries/actions/comment-diff@main + with: + pr: ${{ github.event.number }} + body: ${{ steps.check-pr.outputs.MSG }} diff --git a/.github/workflows/pr-receive.yaml b/.github/workflows/pr-receive.yaml new file mode 100755 index 000000000..371ef542b --- /dev/null +++ b/.github/workflows/pr-receive.yaml @@ -0,0 +1,131 @@ +name: "Receive Pull Request" + +on: + pull_request: + types: + [opened, synchronize, reopened] + +concurrency: + group: ${{ github.ref }} + cancel-in-progress: true + +jobs: + test-pr: + name: "Record PR number" + if: ${{ github.event.action != 'closed' }} + runs-on: ubuntu-latest + outputs: + is_valid: ${{ steps.check-pr.outputs.VALID }} + steps: + - name: "Record PR number" + id: record + if: ${{ always() }} + run: | + echo ${{ github.event.number }} > ${{ github.workspace }}/NR # 2022-03-02: artifact name fixed to be NR + - name: "Upload PR number" + id: upload + if: ${{ always() }} + uses: actions/upload-artifact@v3 + with: + name: pr + path: ${{ github.workspace }}/NR + - name: "Get Invalid Hashes File" + id: hash + run: | + echo "json<> $GITHUB_OUTPUT + - name: "echo output" + run: | + echo "${{ steps.hash.outputs.json }}" + - name: "Check PR" + id: check-pr + uses: carpentries/actions/check-valid-pr@main + with: + pr: ${{ github.event.number }} + invalid: ${{ fromJSON(steps.hash.outputs.json)[github.repository] }} + + build-md-source: + name: "Build markdown source files if valid" + needs: test-pr + runs-on: ubuntu-latest + if: ${{ needs.test-pr.outputs.is_valid == 'true' }} + env: + GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }} + RENV_PATHS_ROOT: ~/.local/share/renv/ + CHIVE: ${{ github.workspace }}/site/chive + PR: ${{ github.workspace }}/site/pr + MD: ${{ github.workspace }}/site/built + steps: + - name: "Check Out Main Branch" + uses: actions/checkout@v3 + + - name: "Check Out Staging Branch" + uses: actions/checkout@v3 + with: + ref: md-outputs + path: ${{ env.MD }} + + - name: "Set up R" + uses: r-lib/actions/setup-r@v2 + with: + use-public-rspm: true + install-r: false + + - name: "Set up Pandoc" + uses: r-lib/actions/setup-pandoc@v2 + + - name: "Setup Lesson Engine" + uses: carpentries/actions/setup-sandpaper@main + with: + cache-version: ${{ secrets.CACHE_VERSION }} + + - name: "Setup Package Cache" + uses: carpentries/actions/setup-lesson-deps@main + with: + cache-version: ${{ secrets.CACHE_VERSION }} + + - name: "Validate and Build Markdown" + id: build-site + run: | + sandpaper::package_cache_trigger(TRUE) + sandpaper::validate_lesson(path = '${{ github.workspace }}') + sandpaper:::build_markdown(path = '${{ github.workspace }}', quiet = FALSE) + shell: Rscript {0} + + - name: "Generate Artifacts" + id: generate-artifacts + run: | + sandpaper:::ci_bundle_pr_artifacts( + repo = '${{ github.repository }}', + pr_number = '${{ github.event.number }}', + path_md = '${{ env.MD }}', + path_pr = '${{ env.PR }}', + path_archive = '${{ env.CHIVE }}', + branch = 'md-outputs' + ) + shell: Rscript {0} + + - name: "Upload PR" + uses: actions/upload-artifact@v3 + with: + name: pr + path: ${{ env.PR }} + + - name: "Upload Diff" + uses: actions/upload-artifact@v3 + with: + name: diff + path: ${{ env.CHIVE }} + retention-days: 1 + + - name: "Upload Build" + uses: actions/upload-artifact@v3 + with: + name: built + path: ${{ env.MD }} + retention-days: 1 + + - name: "Teardown" + run: sandpaper::reset_site() + shell: Rscript {0} diff --git a/.github/workflows/sandpaper-main.yaml b/.github/workflows/sandpaper-main.yaml new file mode 100755 index 000000000..e17707acd --- /dev/null +++ b/.github/workflows/sandpaper-main.yaml @@ -0,0 +1,61 @@ +name: "01 Build and Deploy Site" + +on: + push: + branches: + - main + - master + schedule: + - cron: '0 0 * * 2' + workflow_dispatch: + inputs: + name: + description: 'Who triggered this build?' + required: true + default: 'Maintainer (via GitHub)' + reset: + description: 'Reset cached markdown files' + required: false + default: false + type: boolean +jobs: + full-build: + name: "Build Full Site" + runs-on: ubuntu-latest + permissions: + checks: write + contents: write + pages: write + env: + GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }} + RENV_PATHS_ROOT: ~/.local/share/renv/ + steps: + + - name: "Checkout Lesson" + uses: actions/checkout@v3 + + - name: "Set up R" + uses: r-lib/actions/setup-r@v2 + with: + use-public-rspm: true + install-r: false + + - name: "Set up Pandoc" + uses: r-lib/actions/setup-pandoc@v2 + + - name: "Setup Lesson Engine" + uses: carpentries/actions/setup-sandpaper@main + with: + cache-version: ${{ secrets.CACHE_VERSION }} + + - name: "Setup Package Cache" + uses: carpentries/actions/setup-lesson-deps@main + with: + cache-version: ${{ secrets.CACHE_VERSION }} + + - name: "Deploy Site" + run: | + reset <- "${{ github.event.inputs.reset }}" == "true" + sandpaper::package_cache_trigger(TRUE) + sandpaper:::ci_deploy(reset = reset) + shell: Rscript {0} diff --git a/.github/workflows/sandpaper-version.txt b/.github/workflows/sandpaper-version.txt new file mode 100644 index 000000000..4aa090693 --- /dev/null +++ b/.github/workflows/sandpaper-version.txt @@ -0,0 +1 @@ +0.11.15 diff --git a/.github/workflows/update-cache.yaml b/.github/workflows/update-cache.yaml new file mode 100755 index 000000000..676d7424c --- /dev/null +++ b/.github/workflows/update-cache.yaml @@ -0,0 +1,125 @@ +name: "03 Maintain: Update Package Cache" + +on: + workflow_dispatch: + inputs: + name: + description: 'Who triggered this build (enter github username to tag yourself)?' + required: true + default: 'monthly run' + schedule: + # Run every tuesday + - cron: '0 0 * * 2' + +jobs: + preflight: + name: "Preflight Check" + runs-on: ubuntu-latest + outputs: + ok: ${{ steps.check.outputs.ok }} + steps: + - id: check + run: | + if [[ ${{ github.event_name }} == 'workflow_dispatch' ]]; then + echo "ok=true" >> $GITHUB_OUTPUT + echo "Running on request" + # using single brackets here to avoid 08 being interpreted as octal + # https://github.com/carpentries/sandpaper/issues/250 + elif [ `date +%d` -le 7 ]; then + # If the Tuesday lands in the first week of the month, run it + echo "ok=true" >> $GITHUB_OUTPUT + echo "Running on schedule" + else + echo "ok=false" >> $GITHUB_OUTPUT + echo "Not Running Today" + fi + + check_renv: + name: "Check if We Need {renv}" + runs-on: ubuntu-latest + needs: preflight + if: ${{ needs.preflight.outputs.ok == 'true'}} + outputs: + needed: ${{ steps.renv.outputs.exists }} + steps: + - name: "Checkout Lesson" + uses: actions/checkout@v3 + - id: renv + run: | + if [[ -d renv ]]; then + echo "exists=true" >> $GITHUB_OUTPUT + fi + + check_token: + name: "Check SANDPAPER_WORKFLOW token" + runs-on: ubuntu-latest + needs: check_renv + if: ${{ needs.check_renv.outputs.needed == 'true' }} + outputs: + workflow: ${{ steps.validate.outputs.wf }} + repo: ${{ steps.validate.outputs.repo }} + steps: + - name: "validate token" + id: validate + uses: carpentries/actions/check-valid-credentials@main + with: + token: ${{ secrets.SANDPAPER_WORKFLOW }} + + update_cache: + name: "Update Package Cache" + needs: check_token + if: ${{ needs.check_token.outputs.repo== 'true' }} + runs-on: ubuntu-latest + env: + GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }} + RENV_PATHS_ROOT: ~/.local/share/renv/ + steps: + + - name: "Checkout Lesson" + uses: actions/checkout@v3 + + - name: "Set up R" + uses: r-lib/actions/setup-r@v2 + with: + use-public-rspm: true + install-r: false + + - name: "Update {renv} deps and determine if a PR is needed" + id: update + uses: carpentries/actions/update-lockfile@main + with: + cache-version: ${{ secrets.CACHE_VERSION }} + + - name: Create Pull Request + id: cpr + if: ${{ steps.update.outputs.n > 0 }} + uses: carpentries/create-pull-request@main + with: + token: ${{ secrets.SANDPAPER_WORKFLOW }} + delete-branch: true + branch: "update/packages" + commit-message: "[actions] update ${{ steps.update.outputs.n }} packages" + title: "Update ${{ steps.update.outputs.n }} packages" + body: | + :robot: This is an automated build + + This will update ${{ steps.update.outputs.n }} packages in your lesson with the following versions: + + ``` + ${{ steps.update.outputs.report }} + ``` + + :stopwatch: In a few minutes, a comment will appear that will show you how the output has changed based on these updates. + + If you want to inspect these changes locally, you can use the following code to check out a new branch: + + ```bash + git fetch origin update/packages + git checkout update/packages + ``` + + - Auto-generated by [create-pull-request][1] on ${{ steps.update.outputs.date }} + + [1]: https://github.com/carpentries/create-pull-request/tree/main + labels: "type: package cache" + draft: false diff --git a/.github/workflows/update-workflows.yaml b/.github/workflows/update-workflows.yaml new file mode 100755 index 000000000..288bcd139 --- /dev/null +++ b/.github/workflows/update-workflows.yaml @@ -0,0 +1,66 @@ +name: "02 Maintain: Update Workflow Files" + +on: + workflow_dispatch: + inputs: + name: + description: 'Who triggered this build (enter github username to tag yourself)?' + required: true + default: 'weekly run' + clean: + description: 'Workflow files/file extensions to clean (no wildcards, enter "" for none)' + required: false + default: '.yaml' + schedule: + # Run every Tuesday + - cron: '0 0 * * 2' + +jobs: + check_token: + name: "Check SANDPAPER_WORKFLOW token" + runs-on: ubuntu-latest + outputs: + workflow: ${{ steps.validate.outputs.wf }} + repo: ${{ steps.validate.outputs.repo }} + steps: + - name: "validate token" + id: validate + uses: carpentries/actions/check-valid-credentials@main + with: + token: ${{ secrets.SANDPAPER_WORKFLOW }} + + update_workflow: + name: "Update Workflow" + runs-on: ubuntu-latest + needs: check_token + if: ${{ needs.check_token.outputs.workflow == 'true' }} + steps: + - name: "Checkout Repository" + uses: actions/checkout@v3 + + - name: Update Workflows + id: update + uses: carpentries/actions/update-workflows@main + with: + clean: ${{ github.event.inputs.clean }} + + - name: Create Pull Request + id: cpr + if: "${{ steps.update.outputs.new }}" + uses: carpentries/create-pull-request@main + with: + token: ${{ secrets.SANDPAPER_WORKFLOW }} + delete-branch: true + branch: "update/workflows" + commit-message: "[actions] update sandpaper workflow to version ${{ steps.update.outputs.new }}" + title: "Update Workflows to Version ${{ steps.update.outputs.new }}" + body: | + :robot: This is an automated build + + Update Workflows from sandpaper version ${{ steps.update.outputs.old }} -> ${{ steps.update.outputs.new }} + + - Auto-generated by [create-pull-request][1] on ${{ steps.update.outputs.date }} + + [1]: https://github.com/carpentries/create-pull-request/tree/main + labels: "type: template and tools" + draft: false diff --git a/.github/workflows/workbench-beta-phase.yml b/.github/workflows/workbench-beta-phase.yml new file mode 100644 index 000000000..2faa25d9c --- /dev/null +++ b/.github/workflows/workbench-beta-phase.yml @@ -0,0 +1,60 @@ +name: "Deploy to AWS" + +on: + workflow_run: + workflows: ["01 Build and Deploy Site"] + types: + - completed + workflow_dispatch: + +jobs: + preflight: + name: "Preflight Check" + runs-on: ubuntu-latest + outputs: + ok: ${{ steps.check.outputs.ok }} + folder: ${{ steps.check.outputs.folder }} + steps: + - id: check + run: | + if [[ -z "${{ secrets.DISTRIBUTION }}" || -z "${{ secrets.AWS_ACCESS_KEY_ID }}" || -z "${{ secrets.AWS_SECRET_ACCESS_KEY }}" ]]; then + echo ":information_source: No site configured" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo 'To deploy the preview on AWS, you need the `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY` and `DISTRIBUTION` secrets set up' >> $GITHUB_STEP_SUMMARY + else + echo "::set-output name=folder::"$(sed -E 's^.+/(.+)^\1^' <<< ${{ github.repository }}) + echo "::set-output name=ok::true" + fi + + full-build: + name: "Deploy to AWS" + needs: [preflight] + if: ${{ needs.preflight.outputs.ok }} + runs-on: ubuntu-latest + steps: + + - name: "Checkout site folder" + uses: actions/checkout@v3 + with: + ref: 'gh-pages' + path: 'source' + + - name: "Deploy to Bucket" + uses: jakejarvis/s3-sync-action@v0.5.1 + with: + args: --acl public-read --follow-symlinks --delete --exclude '.git/*' + env: + AWS_S3_BUCKET: preview.carpentries.org + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + SOURCE_DIR: 'source' + DEST_DIR: ${{ needs.preflight.outputs.folder }} + + - name: "Invalidate CloudFront" + uses: chetan/invalidate-cloudfront-action@master + env: + PATHS: /* + AWS_REGION: 'us-east-1' + DISTRIBUTION: ${{ secrets.DISTRIBUTION }} + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} diff --git a/.gitignore b/.gitignore new file mode 100644 index 000000000..34293534a --- /dev/null +++ b/.gitignore @@ -0,0 +1,56 @@ +# sandpaper files +episodes/*html +site/* +!site/README.md + +# History files +.Rhistory +.Rapp.history +# Session Data files +.RData +# User-specific files +.Ruserdata +# Example code in package build process +*-Ex.R +# Output files from R CMD build +/*.tar.gz +# Output files from R CMD check +/*.Rcheck/ +# RStudio files +.Rproj.user/ +# produced vignettes +vignettes/*.html +vignettes/*.pdf +# OAuth2 token, see https://github.com/hadley/httr/releases/tag/v0.3 +.httr-oauth +# knitr and R markdown default cache directories +*_cache/ +/cache/ +# Temporary files created by R markdown +*.utf8.md +*.knit.md +# R Environment Variables +.Renviron +# pkgdown site +docs/ +# translation temp files +po/*~ +# renv detritus +renv/sandbox/ +*.pyc +*~ +.DS_Store +.ipynb_checkpoints +.sass-cache +.jekyll-cache/ +.jekyll-metadata +__pycache__ +_site +.Rproj.user +.bundle/ +.vendor/ +vendor/ +.docker-vendor/ +Gemfile.lock +.*history +.vscode/* diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md new file mode 100644 index 000000000..f19b80495 --- /dev/null +++ b/CODE_OF_CONDUCT.md @@ -0,0 +1,13 @@ +--- +title: "Contributor Code of Conduct" +--- + +As contributors and maintainers of this project, +we pledge to follow the [The Carpentries Code of Conduct][coc]. + +Instances of abusive, harassing, or otherwise unacceptable behavior +may be reported by following our [reporting guidelines][coc-reporting]. + + +[coc-reporting]: https://docs.carpentries.org/topic_folders/policies/incident-reporting.html +[coc]: https://docs.carpentries.org/topic_folders/policies/code-of-conduct.html diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 000000000..ec44704c2 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,121 @@ +## Contributing + +[The Carpentries][cp-site] ([Software Carpentry][swc-site], [Data +Carpentry][dc-site], and [Library Carpentry][lc-site]) are open source +projects, and we welcome contributions of all kinds: new lessons, fixes to +existing material, bug reports, and reviews of proposed changes are all +welcome. + +### Contributor Agreement + +By contributing, you agree that we may redistribute your work under [our +license](LICENSE.md). In exchange, we will address your issues and/or assess +your change proposal as promptly as we can, and help you become a member of our +community. Everyone involved in [The Carpentries][cp-site] agrees to abide by +our [code of conduct](CODE_OF_CONDUCT.md). + +### How to Contribute + +The easiest way to get started is to file an issue to tell us about a spelling +mistake, some awkward wording, or a factual error. This is a good way to +introduce yourself and to meet some of our community members. + +1. If you do not have a [GitHub][github] account, you can [send us comments by + email][contact]. However, we will be able to respond more quickly if you use + one of the other methods described below. + +2. If you have a [GitHub][github] account, or are willing to [create + one][github-join], but do not know how to use Git, you can report problems + or suggest improvements by [creating an issue][issues]. This allows us to + assign the item to someone and to respond to it in a threaded discussion. + +3. If you are comfortable with Git, and would like to add or change material, + you can submit a pull request (PR). Instructions for doing this are + [included below](#using-github). + +Note: if you want to build the website locally, please refer to [The Workbench +documentation][template-doc]. + +### Where to Contribute + +1. If you wish to change this lesson, add issues and pull requests here. +2. If you wish to change the template used for workshop websites, please refer + to [The Workbench documentation][template-doc]. + + +### What to Contribute + +There are many ways to contribute, from writing new exercises and improving +existing ones to updating or filling in the documentation and submitting [bug +reports][issues] about things that do not work, are not clear, or are missing. +If you are looking for ideas, please see [the list of issues for this +repository][repo], or the issues for [Data Carpentry][dc-issues], [Library +Carpentry][lc-issues], and [Software Carpentry][swc-issues] projects. + +Comments on issues and reviews of pull requests are just as welcome: we are +smarter together than we are on our own. **Reviews from novices and newcomers +are particularly valuable**: it's easy for people who have been using these +lessons for a while to forget how impenetrable some of this material can be, so +fresh eyes are always welcome. + +### What *Not* to Contribute + +Our lessons already contain more material than we can cover in a typical +workshop, so we are usually *not* looking for more concepts or tools to add to +them. As a rule, if you want to introduce a new idea, you must (a) estimate how +long it will take to teach and (b) explain what you would take out to make room +for it. The first encourages contributors to be honest about requirements; the +second, to think hard about priorities. + +We are also not looking for exercises or other material that only run on one +platform. Our workshops typically contain a mixture of Windows, macOS, and +Linux users; in order to be usable, our lessons must run equally well on all +three. + +### Using GitHub + +If you choose to contribute via GitHub, you may want to look at [How to +Contribute to an Open Source Project on GitHub][how-contribute]. In brief, we +use [GitHub flow][github-flow] to manage changes: + +1. Create a new branch in your desktop copy of this repository for each + significant change. +2. Commit the change in that branch. +3. Push that branch to your fork of this repository on GitHub. +4. Submit a pull request from that branch to the [upstream repository][repo]. +5. If you receive feedback, make changes on your desktop and push to your + branch on GitHub: the pull request will update automatically. + +NB: The published copy of the lesson is usually in the `main` branch. + +Each lesson has a team of maintainers who review issues and pull requests or +encourage others to do so. The maintainers are community volunteers, and have +final say over what gets merged into the lesson. + +### Other Resources + +The Carpentries is a global organisation with volunteers and learners all over +the world. We share values of inclusivity and a passion for sharing knowledge, +teaching and learning. There are several ways to connect with The Carpentries +community listed at including via social +media, slack, newsletters, and email lists. You can also [reach us by +email][contact]. + +[repo]: https://example.com/FIXME +[contact]: mailto:team@carpentries.org +[cp-site]: https://carpentries.org/ +[dc-issues]: https://github.com/issues?q=user%3Adatacarpentry +[dc-lessons]: https://datacarpentry.org/lessons/ +[dc-site]: https://datacarpentry.org/ +[discuss-list]: https://lists.software-carpentry.org/listinfo/discuss +[github]: https://github.com +[github-flow]: https://guides.github.com/introduction/flow/ +[github-join]: https://github.com/join +[how-contribute]: https://egghead.io/series/how-to-contribute-to-an-open-source-project-on-github +[issues]: https://carpentries.org/help-wanted-issues/ +[lc-issues]: https://github.com/issues?q=user%3ALibraryCarpentry +[swc-issues]: https://github.com/issues?q=user%3Aswcarpentry +[swc-lessons]: https://software-carpentry.org/lessons/ +[swc-site]: https://software-carpentry.org/ +[lc-site]: https://librarycarpentry.org/ +[template-doc]: https://carpentries.github.io/workbench/ diff --git a/LICENSE.md b/LICENSE.md new file mode 100644 index 000000000..7632871ff --- /dev/null +++ b/LICENSE.md @@ -0,0 +1,79 @@ +--- +title: "Licenses" +--- + +## Instructional Material + +All Carpentries (Software Carpentry, Data Carpentry, and Library Carpentry) +instructional material is made available under the [Creative Commons +Attribution license][cc-by-human]. The following is a human-readable summary of +(and not a substitute for) the [full legal text of the CC BY 4.0 +license][cc-by-legal]. + +You are free: + +- to **Share**---copy and redistribute the material in any medium or format +- to **Adapt**---remix, transform, and build upon the material + +for any purpose, even commercially. + +The licensor cannot revoke these freedoms as long as you follow the license +terms. + +Under the following terms: + +- **Attribution**---You must give appropriate credit (mentioning that your work + is derived from work that is Copyright (c) The Carpentries and, where + practical, linking to ), provide a [link to the + license][cc-by-human], and indicate if changes were made. You may do so in + any reasonable manner, but not in any way that suggests the licensor endorses + you or your use. + +- **No additional restrictions**---You may not apply legal terms or + technological measures that legally restrict others from doing anything the + license permits. With the understanding that: + +Notices: + +* You do not have to comply with the license for elements of the material in + the public domain or where your use is permitted by an applicable exception + or limitation. +* No warranties are given. The license may not give you all of the permissions + necessary for your intended use. For example, other rights such as publicity, + privacy, or moral rights may limit how you use the material. + +## Software + +Except where otherwise noted, the example programs and other software provided +by The Carpentries are made available under the [OSI][osi]-approved [MIT +license][mit-license]. + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +## Trademark + +"The Carpentries", "Software Carpentry", "Data Carpentry", and "Library +Carpentry" and their respective logos are registered trademarks of [Community +Initiatives][ci]. + +[cc-by-human]: https://creativecommons.org/licenses/by/4.0/ +[cc-by-legal]: https://creativecommons.org/licenses/by/4.0/legalcode +[mit-license]: https://opensource.org/licenses/mit-license.html +[ci]: https://communityin.org/ +[osi]: https://opensource.org diff --git a/README.md b/README.md index 6bdc41969..89373d321 100644 --- a/README.md +++ b/README.md @@ -10,7 +10,7 @@ A lesson teaching foundational image processing skills with Python and [scikit-i This lesson introduces fundamental concepts in image handling and processing. Learners will gain the skills needed to load images into Python, to select, summarise, and modify specific regions in these image, and to identify and extract objects within an image for further analysis. The lesson assumes a working knowledge of Python and some previous exposure to the Bash shell. -A detailed list of prerequisites can be found in [`_extras/prereqs.md`](_extras/prereqs.md). +A detailed list of prerequisites can be found in [`_extras/prereqs.md`](learners/prereqs.md). Image Processing with Python is planned for release as an official [Data Carpentry](https://datacarpentry.org/) curriculum in 2022. @@ -26,11 +26,13 @@ All participants should agree to abide by the [The Carpentries Code of Conduct]( The Image Processing with Python lesson is currently being developed by: -* [Kimberly Meechan](https://github.com/K-Meech) -* [David Palmquist](https://github.com/quist00) -* [Ulf Schiller](https://github.com/uschille) -* [Robert Turner](https://github.com/bobturneruk) -* [Erin Becker](https://github.com/ErinBecker) -* [Toby Hodges](https://github.com/tobyhodges) +- [Kimberly Meechan](https://github.com/K-Meech) +- [David Palmquist](https://github.com/quist00) +- [Ulf Schiller](https://github.com/uschille) +- [Robert Turner](https://github.com/bobturneruk) +- [Erin Becker](https://github.com/ErinBecker) +- [Toby Hodges](https://github.com/tobyhodges) They are building on previous work by [Mark Meysenburg](https://github.com/mmeysenburg), [Tessa Durham Brooks](https://github.com/tessalea), [Dominik Kutra](https://github.com/k-dominik) and [Constantin Pape](https://github.com/constantinpape). + + diff --git a/_extras/.Rhistory b/_extras/.Rhistory deleted file mode 100644 index e69de29bb..000000000 diff --git a/_extras/prereqs.md b/_extras/prereqs.md deleted file mode 100644 index c5c98ca61..000000000 --- a/_extras/prereqs.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -title: Prerequisites ---- - -This lesson assumes you have a working knowledge of Python and some previous exposure to the Bash shell. - -These requirements can be fulfilled by: - -1. completing a Software Carpentry Python workshop **or** -1. completing a Data Carpentry Ecology workshop (with Python) **and** a Data Carpentry Genomics workshop **or** -1. coursework in or independent learning of both Python and the Bash shell. - -### Bash shell skills - -The skill set listed below is covered in any Software Carpentry workshop, as well -as in Data Carpentry's Genomics workshop. These skills can also be learned -through coursework or independent learning. - -Be able to: -* Identify and navigate to your home directory. -* Identify your current working directory. -* Navigating directories using `pwd`, `ls`, `cd `, and `cd ..` -* Run a Python script from the command line. - -### Python skills - -This skill set listed below is covered in both Software Carpentry's Python workshop and -in Data Carpentry's Ecology workshop with Python. These skills can also be learned -through coursework or independent learning. - -Be able to: -* Use the assignment operator to create `int`, `float`, and `str` variables. -* Perform basic arithmetic operations (e.g. addition, subtraction) on variables. -* Convert strings to ints or floats where appropriate. -* Create a `list` and alter lists by appending, inserting, or removing values. -* Use indexing and slicing to access elements of strings, lists, and Numpy arrays. -* Use good coding practices to comment your code and choose appropriate variable names. -* Write a `for` loop that increments a variable. -* Write conditional statements using `if`, `elif`, and `else`. -* Use comparison operators (`==`, `!=`, `<`, `<=`, `>`, `>=`) in conditional statements. -* Read data from a file using `read()`, `readline()`, and `readlines()`. -* Open, read from, write to, and close input and output files. -* Use `print()` and `len()` to inspect variables. - -The following skills are useful, but not required: -* Apply a function to an entire Numpy array or to a single array axis. -* Write a user-defined function. - -If you are signed up, or considering signing up for a workshop, and aren't sure whether you meet these reqirements, please -get in touch with the workshop instructors or host. diff --git a/config.yaml b/config.yaml new file mode 100644 index 000000000..9a2717efa --- /dev/null +++ b/config.yaml @@ -0,0 +1,90 @@ +#------------------------------------------------------------ +# Values for this lesson. +#------------------------------------------------------------ + +# Which carpentry is this (swc, dc, lc, or cp)? +# swc: Software Carpentry +# dc: Data Carpentry +# lc: Library Carpentry +# cp: Carpentries (to use for instructor training for instance) +# incubator: The Carpentries Incubator +carpentry: 'dc' + +# Overall title for pages. +title: 'Image Processing with Python' + +# Date the lesson was created (YYYY-MM-DD, this is empty by default) +created: + +# Comma-separated list of keywords for the lesson +keywords: 'software, data, lesson, The Carpentries' + +# Life cycle stage of the lesson +# possible values: pre-alpha, alpha, beta, stable +life_cycle: 'stable' + +# License of the lesson materials (recommended CC-BY 4.0) +license: 'CC-BY 4.0' + +# Link to the source repository for this lesson +source: 'https://github.com/fishtree-attempt/image-processing/' + +# Default branch of your lesson +branch: 'main' + +# Who to contact if there are any issues +contact: 'team@carpentries.org' + +# Navigation ------------------------------------------------ +# +# Use the following menu items to specify the order of +# individual pages in each dropdown section. Leave blank to +# include all pages in the folder. +# +# Example ------------- +# +# episodes: +# - introduction.md +# - first-steps.md +# +# learners: +# - setup.md +# +# instructors: +# - instructor-notes.md +# +# profiles: +# - one-learner.md +# - another-learner.md + +# Order of episodes in your lesson +episodes: +- 01-introduction.md +- 02-image-basics.md +- 03-skimage-images.md +- 04-drawing.md +- 05-creating-histograms.md +- 06-blurring.md +- 07-thresholding.md +- 08-connected-components.md +- 09-challenges.md + +# Information for Learners +learners: + +# Information for Instructors +instructors: + +# Learner Profiles +profiles: + +# Customisation --------------------------------------------- +# +# This space below is where custom yaml items (e.g. pinning +# sandpaper and varnish versions) should live + + +url: https://preview.carpentries.org/image-processing +analytics: carpentries +lang: en +workbench-beta: 'true' diff --git a/episodes/01-introduction.md b/episodes/01-introduction.md index 9b879bb76..96146380d 100644 --- a/episodes/01-introduction.md +++ b/episodes/01-introduction.md @@ -1,23 +1,22 @@ --- -title: "Introduction" +title: Introduction teaching: 5 exercises: 0 -questions: -- "What sort of scientific questions can we answer with image processing / -computer vision?" -- "What are morphometric problems?" -objectives: -- "Recognise scientific questions that could be solved with image processing - / computer vision." -- "Recognise morphometric problems (those dealing with the number, size, or -shape of the objects in an image)." -keypoints: -- "Simple Python and skimage (scikit-image) techniques can be used to solve genuine -image analysis problems." -- "Morphometric problems involve the number, shape, and / or size of the -objects in an image." --- +::::::::::::::::::::::::::::::::::::::: objectives + +- Recognise scientific questions that could be solved with image processing / computer vision. +- Recognise morphometric problems (those dealing with the number, size, or shape of the objects in an image). + +:::::::::::::::::::::::::::::::::::::::::::::::::: + +:::::::::::::::::::::::::::::::::::::::: questions + +- What sort of scientific questions can we answer with image processing / computer vision? +- What are morphometric problems? + +:::::::::::::::::::::::::::::::::::::::::::::::::: As computer systems have become faster and more powerful, and cameras and other imaging systems have become commonplace @@ -33,11 +32,10 @@ to be automated as a computer program. This lesson introduces an open source toolkit for processing image data: the Python programming language -and [the _scikit-image_ (`skimage`) library](https://scikit-image.org/). -With careful experimental design, +and [the *scikit-image* (`skimage`) library](https://scikit-image.org/). +With careful experimental design, Python code can be a powerful instrument in answering many different kinds of questions. - ## Uses of Image Processing in Research Automated processing can be used to analyse many different properties of an image, @@ -50,17 +48,16 @@ Some examples of image processing methods applied in research include: - [imaging a Black Hole](https://iopscience.iop.org/article/10.3847/2041-8213/ab0e85) - [estimating the population of Emperor Penguins](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3325796/) -- [the global-scale analysis of marine plankton diversity](https://www.cell.com/cell/fulltext/S0092-8674(19)31124-9) +- [the global-scale analysis of marine plankton diversity](https://www.cell.com/cell/fulltext/S0092-8674\(19\)31124-9) - [segmentation of liver and vessels from CT images](https://doi.org/10.1016/j.cmpb.2017.12.008) With this lesson, we aim to provide a thorough grounding in the fundamental concepts and skills of working with image data in Python. Most of the examples used in this lesson focus on -one particular class of image processing technique, _morphometrics_, +one particular class of image processing technique, *morphometrics*, but what you will learn can be used to solve a much wider range of problems. - ## Morphometrics Morphometrics involves counting the number of objects in an image, @@ -70,50 +67,62 @@ For example, we might be interested in automatically counting the number of bacterial colonies growing in a Petri dish, as shown in this image: -![Bacteria colony](../fig/colonies-01.jpg) +![](fig/colonies-01.jpg){alt='Bacteria colony'} We could use image processing to find the colonies, count them, and then highlight their locations on the original image, resulting in an image like this: -![Colonies counted](../fig/colony-mask.png) - -> ## Why write a program to do that? -> -> Note that you can easily manually count the number of bacteria colonies -> shown in the morphometric example above. -> Why should we learn how to write a Python program to do a task -> we could easily perform with our own eyes? -> There are at least two reasons to learn how to perform tasks like these -> with Python and skimage: -> -> 1. What if there are many more bacteria colonies in the Petri dish? -> For example, suppose the image looked like this: -> -> ![Bacteria colony](../fig/colonies-03.jpg) -> -> Manually counting the colonies in that image would present more of a challenge. -> A Python program using skimage could count the number of colonies more accurately, -> and much more quickly, than a human could. -> -> 2. What if you have hundreds, or thousands, of images to consider? -> Imagine having to manually count colonies on several thousand images -> like those above. -> A Python program using skimage could move through all of the images in seconds; -> how long would a graduate student require to do the task? -> Which process would be more accurate and repeatable? -> -> As you can see, the simple image processing / computer vision techniques you -> will learn during this workshop can be very valuable tools for scientific -> research. -{: .callout} - - -As we move through this workshop, -we will learn image analysis methods useful for many different scientific problems. +![](fig/colony-mask.png){alt='Colonies counted'} + +::::::::::::::::::::::::::::::::::::::::: callout + +## Why write a program to do that? + +Note that you can easily manually count the number of bacteria colonies +shown in the morphometric example above. +Why should we learn how to write a Python program to do a task +we could easily perform with our own eyes? +There are at least two reasons to learn how to perform tasks like these +with Python and skimage: + +1. What if there are many more bacteria colonies in the Petri dish? + For example, suppose the image looked like this: + +![](fig/colonies-03.jpg){alt='Bacteria colony'} + +Manually counting the colonies in that image would present more of a challenge. +A Python program using skimage could count the number of colonies more accurately, +and much more quickly, than a human could. + +2. What if you have hundreds, or thousands, of images to consider? + Imagine having to manually count colonies on several thousand images + like those above. + A Python program using skimage could move through all of the images in seconds; + how long would a graduate student require to do the task? + Which process would be more accurate and repeatable? + +As you can see, the simple image processing / computer vision techniques you +will learn during this workshop can be very valuable tools for scientific +research. + + +:::::::::::::::::::::::::::::::::::::::::::::::::: + +As we move through this workshop, +we will learn image analysis methods useful for many different scientific problems. These will be linked together and applied to a real problem in the final end-of-workshop -[capstone challenge]({{ page.root }}{% link _episodes/09-challenges.md %}). +[capstone challenge](09-challenges.md). Let's get started, by learning some basics about how images are represented and stored digitally. + +:::::::::::::::::::::::::::::::::::::::: keypoints + +- Simple Python and skimage (scikit-image) techniques can be used to solve genuine image analysis problems. +- Morphometric problems involve the number, shape, and / or size of the objects in an image. + +:::::::::::::::::::::::::::::::::::::::::::::::::: + + diff --git a/episodes/02-image-basics.md b/episodes/02-image-basics.md index 08ea01822..aba8162e9 100644 --- a/episodes/02-image-basics.md +++ b/episodes/02-image-basics.md @@ -1,45 +1,31 @@ --- -title: "Image Basics" +title: Image Basics teaching: 20 exercises: 5 -questions: -- "How are images represented in digital format?" -objectives: -- "Define the terms bit, byte, kilobyte, megabyte, etc." -- "Explain how a digital image is composed of pixels." -- "Recommend using imageio (resp. skimage) for I/O (resp. image processing) tasks." -- "Explain how images are stored in NumPy arrays." -- "Explain the left-hand coordinate system used in digital images." -- "Explain the RGB additive colour model used in digital images." -- "Explain the order of the three colour values in skimage images." -- "Explain the characteristics of the BMP, JPEG, and TIFF image formats." -- "Explain the difference between lossy and lossless compression." -- "Explain the advantages and disadvantages of compressed image formats." -- "Explain what information could be contained in image metadata." -keypoints: -- "Digital images are represented as rectangular arrays of square pixels." -- "Digital images use a left-hand coordinate system, with the origin in the -upper left corner, the x-axis running to the right, and the y-axis running -down. Some learners may prefer to think in terms of counting down rows -for the y-axis and across columns for the x-axis. Thus, we will make an -effort to allow for both approaches in our lesson presentation." -- "Most frequently, digital images use an additive RGB model, with eight bits -for the red, green, and blue channels." -- "skimage images are stored as multi-dimensional NumPy arrays." -- "In skimage images, the red channel is specified first, then the green, then -the blue, i.e., RGB." -- "Lossless compression retains all the details in an image, but lossy -compression results in loss of some of the original image detail." -- "BMP images are uncompressed, meaning they have high quality but also that -their file sizes are large." -- "JPEG images use lossy compression, meaning that their file sizes are -smaller, but image quality may suffer." -- "TIFF images can be uncompressed or compressed with lossy or lossless -compression." -- "Depending on the camera or sensor, various useful pieces of information may -be stored in an image file, in the image metadata." --- +::::::::::::::::::::::::::::::::::::::: objectives + +- Define the terms bit, byte, kilobyte, megabyte, etc. +- Explain how a digital image is composed of pixels. +- Recommend using imageio (resp. skimage) for I/O (resp. image processing) tasks. +- Explain how images are stored in NumPy arrays. +- Explain the left-hand coordinate system used in digital images. +- Explain the RGB additive colour model used in digital images. +- Explain the order of the three colour values in skimage images. +- Explain the characteristics of the BMP, JPEG, and TIFF image formats. +- Explain the difference between lossy and lossless compression. +- Explain the advantages and disadvantages of compressed image formats. +- Explain what information could be contained in image metadata. + +:::::::::::::::::::::::::::::::::::::::::::::::::: + +:::::::::::::::::::::::::::::::::::::::: questions + +- How are images represented in digital format? + +:::::::::::::::::::::::::::::::::::::::::::::::::: + The images we see on hard copy, view with our electronic devices, or process with our programs are represented and stored in the computer as numeric abstractions, approximations of what we see with our eyes in the real world. @@ -56,12 +42,12 @@ Each pixel can be thought of as a single square point of coloured light. For example, consider this image of a maize seedling, with a square area designated by a red box: -![Original size image](../fig/maize-seedling-original.jpg) +![](fig/maize-seedling-original.jpg){alt='Original size image'} Now, if we zoomed in close enough to see the pixels in the red box, we would see something like this: -![Enlarged image area](../fig/maize-seedling-enlarged.jpg) +![](fig/maize-seedling-enlarged.jpg){alt='Enlarged image area'} Note that each square in the enlarged image area - each pixel - is all one colour, @@ -70,6 +56,7 @@ Viewed from a distance, these pixels seem to blend together to form the image we see. ## Working with Pixels + As noted, in practice, real world images will typically be made up of a vast number of pixels, and each of these pixels will be one of potentially millions of colours. @@ -77,20 +64,25 @@ While we will deal with pictures of such complexity shortly, let's start our exploration with 15 pixels in a 5 X 3 matrix with 2 colours and work our way up to that complexity. -> ## Matrices, arrays, images and pixels -> The **matrix** is mathematical concept - numbers evenly arranged in a rectangle. This can be a two dimensional rectangle, -> like the shape of the screen you're looking at now. Or it could be a three dimensional equivalent, a cuboid, or have -> even more dimensions, but always keeping the evenly spaced arrangement of numbers. In computing, **array** refers -> to a structure in the computer's memory where data is stored in evenly-spaced **elements**. This is strongly analogous -> to a matrix. A `numpy` array is a **type** of variable (a simpler example of a type is an integer). For our purposes, -> the distinction between matrices and arrays is not important, we don't really care how the computer arranges our data -> in its memory. The important thing is that the computer stores values describing the pixels in images, as arrays. And -> the terms matrix and array can be used interchangeably. -{: .callout} +::::::::::::::::::::::::::::::::::::::::: callout + +## Matrices, arrays, images and pixels + +The **matrix** is mathematical concept - numbers evenly arranged in a rectangle. This can be a two dimensional rectangle, +like the shape of the screen you're looking at now. Or it could be a three dimensional equivalent, a cuboid, or have +even more dimensions, but always keeping the evenly spaced arrangement of numbers. In computing, **array** refers +to a structure in the computer's memory where data is stored in evenly-spaced **elements**. This is strongly analogous +to a matrix. A `numpy` array is a **type** of variable (a simpler example of a type is an integer). For our purposes, +the distinction between matrices and arrays is not important, we don't really care how the computer arranges our data +in its memory. The important thing is that the computer stores values describing the pixels in images, as arrays. And +the terms matrix and array can be used interchangeably. + + +:::::::::::::::::::::::::::::::::::::::::::::::::: First, the necessary imports: -~~~ +```python """ * Python libraries for learning and performing image processing. * @@ -100,81 +92,82 @@ import matplotlib.pyplot as plt import ipympl import imageio.v3 as iio import skimage -~~~ -{: .language-python} +``` The `v3` module of imageio (`imageio.v3`) is imported as `iio`. This module enables us to read and write images. -> ## Import Statements in Python -> -> In Python, the `import` statement is used to -> load additional functionality into a program. -> This is necessary when we want our code to do something more specialised, -> which cannot easily be achieved with the limited set of basic tools and -> data structures available in the default Python environment. -> -> Additional functionality can be loaded as a single function or object, -> a module defining several of these, or a library containing many modules. -> You will encounter several different forms of `import` statement. -> -> -> ~~~ -> import skimage # form 1, load whole skimage library -> import skimage.draw # form 2, load skimage.draw module only -> from skimage.draw import disk # form 3, load only the disk function -> import numpy as np # form 4, load all of numpy into an object called np -> ~~~ -> {: .language-python } -> -> > ## Further Explanation -> > -> > In the example above, form 1 loads the entire `skimage` library into the -> > program as an object. -> > Individual modules of the library are then available within that object, -> > e.g., to access the `disk` function used in [the drawing episode]({{ page.root }}{% link _episodes/04-drawing.md %}), -> > you would write `skimage.draw.disk()`. -> > -> > Form 2 loads only the `draw` module of `skimage` into the program. -> > When we run the code, -> > the program will take less time and use less memory -> > because we will not load the whole `skimage` library. -> > The syntax needed to use the module remains unchanged: -> > to access the `disk` function, -> > we would use the same function call as given for form 1. -> > -> > To further reduce the time and memory requirements for your program, -> > form 3 can be used to import only a specific function/class from a library/module. -> > Unlike the other forms, when this approach is used, -> > the imported function or class can be called by its name only, -> > without prefixing it with the name of the module/library from which it was loaded, -> > i.e., `disk()` instead of `skimage.draw.disk()` using the example above. -> > One hazard of this form is that importing like this will overwrite any -> > object with the same name that was defined/imported earlier in the program, -> > i.e., the example above would replace any existing object called `disk` -> > with the `disk` function from `skimage.draw`. -> > -> > Finally, the `as` keyword can be used when importing, -> > to define a name to be used as shorthand for the library/module being imported. -> > This name is referred to as an alias. Typically, using an alias (such as -> > `np` for the NumPy library) saves us a little typing. -> > You may see `as` combined with any of the other first three forms of `import` statement. -> > -> > Which form is used often depends on -> > the size and number of additional tools being loaded into the program. -> > -> {: .solution } -{: .callout } +:::::::::::::::::::::::::::::::::::::::: callout + +## Import Statements in Python + +In Python, the `import` statement is used to +load additional functionality into a program. +This is necessary when we want our code to do something more specialised, +which cannot easily be achieved with the limited set of basic tools and +data structures available in the default Python environment. + +Additional functionality can be loaded as a single function or object, +a module defining several of these, or a library containing many modules. +You will encounter several different forms of `import` statement. + +```python +import skimage # form 1, load whole skimage library +import skimage.draw # form 2, load skimage.draw module only +from skimage.draw import disk # form 3, load only the disk function +import numpy as np # form 4, load all of numpy into an object called np +``` + +:::::::::::::: solution + +## Further Explanation + +In the example above, form 1 loads the entire `skimage` library into the +program as an object. +Individual modules of the library are then available within that object, +e.g., to access the `disk` function used in [the drawing episode](04-drawing.md), +you would write `skimage.draw.disk()`. + +Form 2 loads only the `draw` module of `skimage` into the program. +When we run the code, +the program will take less time and use less memory +because we will not load the whole `skimage` library. +The syntax needed to use the module remains unchanged: +to access the `disk` function, +we would use the same function call as given for form 1. + +To further reduce the time and memory requirements for your program, +form 3 can be used to import only a specific function/class from a library/module. +Unlike the other forms, when this approach is used, +the imported function or class can be called by its name only, +without prefixing it with the name of the module/library from which it was loaded, +i.e., `disk()` instead of `skimage.draw.disk()` using the example above. +One hazard of this form is that importing like this will overwrite any +object with the same name that was defined/imported earlier in the program, +i.e., the example above would replace any existing object called `disk` +with the `disk` function from `skimage.draw`. + +Finally, the `as` keyword can be used when importing, +to define a name to be used as shorthand for the library/module being imported. +This name is referred to as an alias. Typically, using an alias (such as +`np` for the NumPy library) saves us a little typing. +You may see `as` combined with any of the other first three forms of `import` statement. + +Which form is used often depends on +the size and number of additional tools being loaded into the program. + +::::::::::::::::::::::::: + +:::::::::::::::::::::::::::::::::::::::::::::::::: Now that we have our libraries loaded, we will run a Jupyter Magic Command that will ensure our images display in our Jupyter document with pixel information that will help us more efficiently run commands later in the session. -~~~ +```python %matplotlib widget -~~~ -{: .language-python} +``` With that taken care of, let's load our image data from disk using @@ -185,26 +178,27 @@ the `imshow` function from the `matplotlib.pyplot` module. version has the benefit of supporting nD (multidimensional) image data natively (think of volumes, movies). -> ## Why not use `skimage.io.imread()` -> -> The `skimage` library has its own function to read an image, -> so you might be asking why we don't use it here. -> Actually, `skimage.io.imread()` uses `iio.imread()` internally when loading an image into Python. -> It is certainly something you may use as you see fit in your own code. -> In this lesson, we use the `imageio` library to read or write (save) images, -> while `skimage` is dedicated to performing operations on the images. -> Using `imageio` gives us more flexibility, especially when it comes to -> handling metadata. -> -{: .callout} - -~~~ +::::::::::::::::::::::::::::::::::::::::: callout + +## Why not use `skimage.io.imread()` + +The `skimage` library has its own function to read an image, +so you might be asking why we don't use it here. +Actually, `skimage.io.imread()` uses `iio.imread()` internally when loading an image into Python. +It is certainly something you may use as you see fit in your own code. +In this lesson, we use the `imageio` library to read or write (save) images, +while `skimage` is dedicated to performing operations on the images. +Using `imageio` gives us more flexibility, especially when it comes to +handling metadata. + +:::::::::::::::::::::::::::::::::::::::::::::::::: + +```python eight = iio.imread(uri="data/eight.tif") plt.imshow(eight) -~~~ -{: .language-python} +``` -![Image of 8](../fig/eight.png) +![](fig/eight.png){alt='Image of 8'} You might be thinking, "That does look vaguely like an eight, @@ -227,22 +221,19 @@ a 5 x 3 matrix of 15 pixels. We can demonstrate that by calling on the shape property of our image variable and see the matrix by printing our image variable to the screen. - -~~~ +```python print(eight.shape) print(eight) -~~~ -{: .language-python} +``` -~~~ +```output (5, 3) [[0. 0. 0.] [0. 1. 0.] [0. 0. 0.] [0. 1. 0.] [0. 0. 0.]] -~~~ -{: .output } +``` Thus if we have tools that will allow us to manipulate these arrays of numbers, we can manipulate the image. @@ -260,7 +251,7 @@ at this small scale we can determine the centre pixel is in row labeled 2 and column labeled 1. Using array slicing, we can then address and assign a new value to that position. -~~~ +```python zero = iio.imread(uri="data/eight.tif") zero[2,1]= 1.0 """ @@ -269,89 +260,98 @@ The follwing line of code creates a new figure for imshow to use in displaying o fig, ax = plt.subplots() plt.imshow(zero) print(zero) -~~~ -{: .language-python} +``` -~~~ +```output [[0. 0. 0.] [0. 1. 0.] [0. 1. 0.] [0. 1. 0.] [0. 0. 0.]] -~~~ -{: .output } - -![Image of 0](../fig/zero.png) - ->## Coordinate system -> -> When we process images, we can access, examine, and / or change -> the colour of any pixel we wish. -> To do this, we need some convention on how to access pixels -> individually; a way to give each one a name, or an address of a sort. -> -> The most common manner to do this, and the one we will use in our programs, -> is to assign a modified Cartesian coordinate system to the image. -> The coordinate system we usually see in mathematics has -> a horizontal x-axis and a vertical y-axis, like this: -> -> ![Cartesian coordinate system](../fig/cartesian-coordinates.png) -> -> The modified coordinate system used for our images will have only positive -> coordinates, the origin will be in the upper left corner instead of the -> centre, and y coordinate values will get larger as they go down instead of up, -> like this: -> -> ![Image coordinate system](../fig/image-coordinates.png) -> -> This is called a *left-hand coordinate system*. -> If you hold your left hand in front of your face and point your thumb at the floor, -> your extended index finger will correspond to the x-axis -> while your thumb represents the y-axis. -> -> ![Left-hand coordinate system](../fig/left-hand-coordinates.png) -> -> Until you have worked with images for a while, -> the most common mistake that you will make with coordinates is to forget -> that y coordinates get larger as they go down instead of up -> as in a normal Cartesian coordinate system. Consequently, it may be helpful to think -> in terms of counting down rows (r) for the y-axis and across columns (c) for the x-axis. This -> can be especially helpful in cases where you need to transpose image viewer data -> provided in *x,y* format to *y,x* format. Thus, we will use *cx* and *ry* where appropriate -> to help bridge these two approaches. -{: .callout } - -> ## Changing Pixel Values (5 min) -> -> Load another copy of eight named five, -> and then change the value of pixels so you have what looks like a 5 instead of an 8. -> Display the image and print out the matrix as well. -> -> > ## Solution -> > There are many possible solutions, but one method would be . . . -> > -> > ~~~ -> > five = iio.imread(uri="data/eight.tif") -> > five[1,2]= 1.0 -> > five[3,0]= 1.0 -> > fig, ax = plt.subplots() -> > plt.imshow(five) -> > print(five) -> > ~~~ -> > {: .language-python} -> > -> > ~~~ -> > [[0. 0. 0.] -> > [0. 1. 1.] -> > [0. 0. 0.] -> > [1. 1. 0.] -> > [0. 0. 0.]] -> > ~~~ -> > {: .output } -> > -> > ![Image of 5](../fig/five.png) -> {: .solution} -{: .challenge} +``` + +![](fig/zero.png){alt='Image of 0'} + +:::::::::::::::::::::::::::::::::::::::: callout + +## Coordinate system + +When we process images, we can access, examine, and / or change +the colour of any pixel we wish. +To do this, we need some convention on how to access pixels +individually; a way to give each one a name, or an address of a sort. + +The most common manner to do this, and the one we will use in our programs, +is to assign a modified Cartesian coordinate system to the image. +The coordinate system we usually see in mathematics has +a horizontal x-axis and a vertical y-axis, like this: + +![](fig/cartesian-coordinates.png){alt='Cartesian coordinate system'} + +The modified coordinate system used for our images will have only positive +coordinates, the origin will be in the upper left corner instead of the +centre, and y coordinate values will get larger as they go down instead of up, +like this: + +![](fig/image-coordinates.png){alt='Image coordinate system'} + +This is called a *left-hand coordinate system*. +If you hold your left hand in front of your face and point your thumb at the floor, +your extended index finger will correspond to the x-axis +while your thumb represents the y-axis. + +![](fig/left-hand-coordinates.png){alt='Left-hand coordinate system'} + +Until you have worked with images for a while, +the most common mistake that you will make with coordinates is to forget +that y coordinates get larger as they go down instead of up +as in a normal Cartesian coordinate system. Consequently, it may be helpful to think +in terms of counting down rows (r) for the y-axis and across columns (c) for the x-axis. This +can be especially helpful in cases where you need to transpose image viewer data +provided in *x,y* format to *y,x* format. Thus, we will use *cx* and *ry* where appropriate +to help bridge these two approaches. + + +:::::::::::::::::::::::::::::::::::::::::::::::::: + +::::::::::::::::::::::::::::::::::::::: challenge + +## Changing Pixel Values (5 min) + +Load another copy of eight named five, +and then change the value of pixels so you have what looks like a 5 instead of an 8. +Display the image and print out the matrix as well. + +::::::::::::::: solution + +## Solution + +There are many possible solutions, but one method would be . . . + +```python +five = iio.imread(uri="data/eight.tif") +five[1,2]= 1.0 +five[3,0]= 1.0 +fig, ax = plt.subplots() +plt.imshow(five) +print(five) +``` + +```output +[[0. 0. 0.] + [0. 1. 1.] + [0. 0. 0.] + [1. 1. 0.] + [0. 0. 0.]] +``` + +![](fig/five.png){alt='Image of 5'} + + + +::::::::::::::::::::::::: + +:::::::::::::::::::::::::::::::::::::::::::::::::: ## More colours @@ -361,7 +361,7 @@ One common way is to use the numbers between 0 and 255 to allow for 256 different colours or 256 different levels of grey. Let's try that out. -~~~ +```python # make a copy of eight three_colours = iio.imread(uri="data/eight.tif") @@ -374,10 +374,9 @@ three_colours[2,:] = 255. fig, ax = plt.subplots() plt.imshow(three_colours) print(three_colours) -~~~ -{: .language-python} +``` -![Image of three colours](../fig/three-colours.png) +![](fig/three-colours.png){alt='Image of three colours'} We now have 3 colours, but are they the three colours you expected? They all appear to be on a continuum of dark purple on the low end and @@ -405,13 +404,12 @@ For now, let's see how you can do that using an alternative map you have likely seen before where it will be even easier to see it as a mapped continuum of intensities: greyscale. -~~~ +```python fig, ax = plt.subplots() plt.imshow(three_colours,cmap=plt.cm.gray) -~~~ -{: .language-python} +``` -![Image in greyscale](../fig/grayscale.png) +![](fig/grayscale.png){alt='Image in greyscale'} Above we have exactly the same underying data matrix, but in greyscale. Zero maps to black, 255 maps to white, and 128 maps to medium grey. @@ -437,7 +435,7 @@ combine to produce a set of pixels using a 4 X 4 matrix with 3 dimensions for the colours red, green, and blue. Rather than loading it from a file, we will generate this example using numpy. -~~~ +```python # set the random seed so we all get the same matrix pseudorandomizer = np.random.RandomState(2021) # create a 4 × 4 checkerboard of random colours @@ -447,10 +445,9 @@ fig, ax = plt.subplots() plt.imshow(checkerboard) # display the arrays print(checkerboard) -~~~ -{: .language-python} +``` -~~~ +```output [[[116 85 57] [128 109 94] [214 44 62] @@ -470,22 +467,20 @@ print(checkerboard) [120 5 49] [166 234 142] [ 71 85 70]]] - ~~~ -{: .output } +``` -![Image of checkerboard](../fig/checkerboard.png) +![](fig/checkerboard.png){alt='Image of checkerboard'} Previously we had one number being mapped to one colour or intensity. Now we are combining the effect of 3 numbers to arrive at a single colour value. Let's see an example of that using the blue square at the end of the second row, which has the index [1, 3]. -~~~ +```python # extract all the colour information for the blue square upper_right_square = checkerboard[1, 3, :] upper_right_square -~~~ -{: .language-python} +``` This outputs: array([ 7, 1, 110]) The integers in order represent Red, Green, and Blue. @@ -507,28 +502,29 @@ to help us understand what is happening. We can do that by multiplying our image array representation with a 1d matrix that has a one for the channel we want to keep and zeros for the rest. -~~~ +```python red_channel = checkerboard * [1, 0, 0] fig, ax = plt.subplots() plt.imshow(red_channel) -~~~ -{: .language-python} -![Image of red channel](../fig/checkerboard-red-channel.png) -~~~ +``` + +![](fig/checkerboard-red-channel.png){alt='Image of red channel'} + +```python green_channel = checkerboard * [0, 1, 0] fig, ax = plt.subplots() plt.imshow(green_channel) -~~~ -{: .language-python} -![Image of green channel](../fig/checkerboard-green-channel.png) -~~~ +``` + +![](fig/checkerboard-green-channel.png){alt='Image of green channel'} + +```python blue_channel = checkerboard * [0, 0, 1] fig, ax = plt.subplots() plt.imshow(blue_channel) -~~~ -{: .language-python} +``` -![Image of blue channel](../fig/checkerboard-blue-channel.png) +![](fig/checkerboard-blue-channel.png){alt='Image of blue channel'} If we look at the upper [1, 3] square in all three figures, we can see each of those colour contributions in action. @@ -538,7 +534,6 @@ When all three channels are combined though, the blue light of those squares is being diluted by the relative strength of red and green being mixed in with them. - ## 24-bit RGB Colour This last colour model we used, @@ -551,7 +546,7 @@ an integer in the closed range [0, 255] as seen in the example. Therefore, there are 256 discrete amounts of each primary colour that can be added to produce another colour. The number of discrete amounts of each colour, 256, corresponds to the number of -bits used to hold the colour channel value, which is eight (28=256). +bits used to hold the colour channel value, which is eight (28\=256). Since we have three channels with 8 bits for each (8+8+8=24), this is called 24-bit colour depth. @@ -560,65 +555,75 @@ integers in [0, 255], representing the red, green, and blue channels, respectively. A larger number in a channel means that more of that primary colour is present. -> ## Thinking about RGB colours (5 min) -> -> Suppose that we represent colours as triples (r, g, b), where each of r, g, -> and b is an integer in [0, 255]. -> What colours are represented by each of these triples? -> (Try to answer these questions without reading further.) -> -> 1. (255, 0, 0) -> 2. (0, 255, 0) -> 3. (0, 0, 255) -> 4. (255, 255, 255) -> 5. (0, 0, 0) -> 6. (128, 128, 128) -> -> > ## Solution -> > -> > 1. (255, 0, 0) represents red, because the red channel is maximised, while -> > the other two channels have the minimum values. -> > 2. (0, 255, 0) represents green. -> > 3. (0, 0, 255) represents blue. -> > 4. (255, 255, 255) is a little harder. When we mix the maximum value of all -> > three colour channels, we see the colour white. -> > 5. (0, 0, 0) represents the absence of all colour, or black. -> > 6. (128, 128, 128) represents a medium shade of gray. -> > Note that the 24-bit RGB colour model provides at least 254 shades of gray, -> > rather than only fifty. -> > -> > Note that the RGB colour model may run contrary to your experience, -> > especially if you have mixed primary colours of paint to create new colours. -> > In the RGB model, the *lack of* any colour is black, -> > while the *maximum amount* of each of the primary colours is white. -> > With physical paint, we might start with a white base, -> > and then add differing amounts of other paints to produce a darker shade. -> > -> {: .solution} -{: .challenge} +::::::::::::::::::::::::::::::::::::::: challenge + +## Thinking about RGB colours (5 min) + +Suppose that we represent colours as triples (r, g, b), where each of r, g, +and b is an integer in [0, 255]. +What colours are represented by each of these triples? +(Try to answer these questions without reading further.) + +1. (255, 0, 0) +2. (0, 255, 0) +3. (0, 0, 255) +4. (255, 255, 255) +5. (0, 0, 0) +6. (128, 128, 128) + +::::::::::::::: solution + +## Solution + +1. (255, 0, 0) represents red, because the red channel is maximised, while + the other two channels have the minimum values. +2. (0, 255, 0) represents green. +3. (0, 0, 255) represents blue. +4. (255, 255, 255) is a little harder. When we mix the maximum value of all + three colour channels, we see the colour white. +5. (0, 0, 0) represents the absence of all colour, or black. +6. (128, 128, 128) represents a medium shade of gray. + Note that the 24-bit RGB colour model provides at least 254 shades of gray, + rather than only fifty. + +Note that the RGB colour model may run contrary to your experience, +especially if you have mixed primary colours of paint to create new colours. +In the RGB model, the *lack of* any colour is black, +while the *maximum amount* of each of the primary colours is white. +With physical paint, we might start with a white base, +and then add differing amounts of other paints to produce a darker shade. + +::::::::::::::::::::::::: + +:::::::::::::::::::::::::::::::::::::::::::::::::: After completing the previous challenge, we can look at some further examples of 24-bit RGB colours, in a visual way. The image in the next challenge shows some colour names, their 24-bit RGB triplet values, and the colour itself. -> ## RGB colour table (optional, not included in timing) -> -> ![RGB colour table](../fig/colour-table.png) -> -> We cannot really provide a complete table. -> To see why, answer this question: -> How many possible colours can be represented with the 24-bit RGB model? -> -> > ## Solution -> > -> > There are 24 total bits in an RGB colour of this type, -> > and each bit can be on or off, -> > and so there are 224 = 16,777,216 -> > possible colours with our additive, 24-bit RGB colour model. -> > -> {: .solution} -{: .challenge} +::::::::::::::::::::::::::::::::::::::: challenge + +## RGB colour table (optional, not included in timing) + +![](fig/colour-table.png){alt='RGB colour table'} + +We cannot really provide a complete table. +To see why, answer this question: +How many possible colours can be represented with the 24-bit RGB model? + +::::::::::::::: solution + +## Solution + +There are 24 total bits in an RGB colour of this type, +and each bit can be on or off, +and so there are 224 = 16,777,216 +possible colours with our additive, 24-bit RGB colour model. + +::::::::::::::::::::::::: + +:::::::::::::::::::::::::::::::::::::::::::::::::: Although 24-bit colour depth is common, there are other options. We might have 8-bit colour @@ -653,11 +658,11 @@ There are several image formats we might encounter, and we should know the basics of at least of few of them. Some formats we might encounter, and their file extensions, are shown in this table: -| Format | Extension | +| Format | Extension | | :-------------------------------------- | :------------ | -| Device-Independent Bitmap (BMP) | .bmp | -| Joint Photographic Experts Group (JPEG) | .jpg or .jpeg | -| Tagged Image File Format (TIFF) | .tif or .tiff | +| Device-Independent Bitmap (BMP) | .bmp | +| Joint Photographic Experts Group (JPEG) | .jpg or .jpeg | +| Tagged Image File Format (TIFF) | .tif or .tiff | ## BMP @@ -698,68 +703,77 @@ you will need to know about bits / bytes and how those are used to express computer storage capacities. If you already know, you can skip to the challenge below. ->## Bits and bytes -> -> Before we talk specifically about images, -> we first need to understand how numbers are stored in a modern digital computer. -> When we think of a number, -> we do so using a *decimal*, or *base-10* place-value number system. -> For example, a number like 659 is -> 6 × 102 + 5 × 101 + 9 × 100. -> Each digit in the number is multiplied by a power of 10, -> based on where it occurs, -> and there are 10 digits that can occur in each position -> (0, 1, 2, 3, 4, 5, 6, 7, 8, 9). -> -> In principle, -> computers could be constructed to represent numbers in exactly the same way. -> But, the electronic circuits inside a computer are much easier to construct -> if we restrict the numeric base to only two, instead of 10. -> (It is easier for circuitry to tell the difference between -> two voltage levels than it is to differentiate among 10 levels.) -> So, values in a computer are stored using a *binary*, -> or *base-2* place-value number system. -> -> In this system, each symbol in a number is called a *bit* instead of a digit, -> and there are only two values for each bit (0 and 1). -> We might imagine a four-bit binary number, 1101. -> Using the same kind of place-value expansion as we did above for 659, -> we see that -> 1101 = 1 × 23 + 1 × 22 + 0 × 21 + 1 × 20, -> which if we do the math is 8 + 4 + 0 + 1, or 13 in decimal. -> -> Internally, -> computers have a minimum number of bits that they work with at a given time: eight. -> A group of eight bits is called a *byte*. -> The amount of memory (RAM) and drive space our computers have is quantified -> by terms like Megabytes (MB), Gigabytes (GB), and Terabytes (TB). -> The following table provides more formal definitions for these terms. -> -> | Unit | Abbreviation | Size | -> | :------- | ------------ | :--------- | -> | Kilobyte | KB | 1024 bytes | -> | Megabyte | MB | 1024 KB | -> | Gigabyte | GB | 1024 MB | -> | Terabyte | TB | 1024 GB | -{: .callout } - -> ## BMP image size (optional, not included in timing) -> -> Imagine that we have a fairly large, but very boring image: -> a 5,000 × 5,000 pixel image composed of nothing but white pixels. -> If we used an uncompressed image format such as BMP, -> with the 24-bit RGB colour model, -> how much storage would be required for the file? -> -> > ## Solution -> > In such an image, there are 5,000 × 5,000 = 25,000,000 pixels, -> > and 24 bits for each pixel, -> > leading to 25,000,000 × 24 = 600,000,000 bits, -> > or 75,000,000 bytes (71.5MB). -> > That is quite a lot of space for a very uninteresting image! -> > -> {: .solution} -{: .challenge} +:::::::::::::::::::::::::::::::::::::::: callout + +## Bits and bytes + +Before we talk specifically about images, +we first need to understand how numbers are stored in a modern digital computer. +When we think of a number, +we do so using a *decimal*, or *base-10* place-value number system. +For example, a number like 659 is +6 × 102 + 5 × 101 + 9 × 100. +Each digit in the number is multiplied by a power of 10, +based on where it occurs, +and there are 10 digits that can occur in each position +(0, 1, 2, 3, 4, 5, 6, 7, 8, 9). + +In principle, +computers could be constructed to represent numbers in exactly the same way. +But, the electronic circuits inside a computer are much easier to construct +if we restrict the numeric base to only two, instead of 10. +(It is easier for circuitry to tell the difference between +two voltage levels than it is to differentiate among 10 levels.) +So, values in a computer are stored using a *binary*, +or *base-2* place-value number system. + +In this system, each symbol in a number is called a *bit* instead of a digit, +and there are only two values for each bit (0 and 1). +We might imagine a four-bit binary number, 1101. +Using the same kind of place-value expansion as we did above for 659, +we see that +1101 = 1 × 23 + 1 × 22 + 0 × 21 + 1 × 20, +which if we do the math is 8 + 4 + 0 + 1, or 13 in decimal. + +Internally, +computers have a minimum number of bits that they work with at a given time: eight. +A group of eight bits is called a *byte*. +The amount of memory (RAM) and drive space our computers have is quantified +by terms like Megabytes (MB), Gigabytes (GB), and Terabytes (TB). +The following table provides more formal definitions for these terms. + +| Unit | Abbreviation | Size | +| :-------------------------------------- | ------------- | :--------- | +| Kilobyte | KB | 1024 bytes | +| Megabyte | MB | 1024 KB | +| Gigabyte | GB | 1024 MB | +| Terabyte | TB | 1024 GB | + +:::::::::::::::::::::::::::::::::::::::::::::::::: + +::::::::::::::::::::::::::::::::::::::: challenge + +## BMP image size (optional, not included in timing) + +Imagine that we have a fairly large, but very boring image: +a 5,000 × 5,000 pixel image composed of nothing but white pixels. +If we used an uncompressed image format such as BMP, +with the 24-bit RGB colour model, +how much storage would be required for the file? + +::::::::::::::: solution + +## Solution + +In such an image, there are 5,000 × 5,000 = 25,000,000 pixels, +and 24 bits for each pixel, +leading to 25,000,000 × 24 = 600,000,000 bits, +or 75,000,000 bytes (71.5MB). +That is quite a lot of space for a very uninteresting image! + +::::::::::::::::::::::::: + +:::::::::::::::::::::::::::::::::::::::::::::::::: Since image files can be very large, various *compression* schemes exist for saving @@ -837,95 +851,103 @@ It supports 24-bit colour depth, and since the format is so widely used, JPEG images can be viewed and manipulated easily on all computing platforms. -> ## Examining actual image sizes (optional, not included in timing) -> -> Let us see the effects of image compression on image size with actual images. -> The following script creates a square white image 5000 X 5000 pixels, -> and then saves it as a BMP and as a JPEG image. -> -> ~~~ -> dim = 5000 -> -> img = np.zeros((dim, dim, 3), dtype="uint8") -> img.fill(255) -> -> iio.imwrite(uri="data/ws.bmp", image=img) -> iio.imwrite(uri="data/ws.jpg", image=img) -> ~~~ -> {: .language-python} -> -> Examine the file sizes of the two output files, `ws.bmp` and `ws.jpg`. -> Does the BMP image size match our previous prediction? -> How about the JPEG? -> -> > ## Solution -> > -> > The BMP file, `ws.bmp`, is 75,000,054 bytes, -> > which matches our prediction very nicely. -> > The JPEG file, `ws.jpg`, is 392,503 bytes, -> > two orders of magnitude smaller than the bitmap version. -> > -> {: .solution} -{: .challenge} - -> ## Comparing lossless versus lossy compression (optional, not included in timing) -> -> Let us see a hands-on example of lossless versus lossy compression. -> Once again, open a terminal and navigate to the `data/` directory. -> The two output images, `ws.bmp` and `ws.jpg`, should still be in the directory, -> along with another image, `tree.jpg`. -> -> We can apply lossless compression to any file by using the `zip` command. -> Recall that the `ws.bmp` file contains 75,000,054 bytes. -> Apply lossless compression to this image by executing the following command: -> `zip ws.zip ws.bmp`. -> This command tells the computer to create a new compressed file, -> `ws.zip`, from the original bitmap image. -> Execute a similar command on the tree JPEG file: `zip tree.zip tree.jpg`. -> -> Having created the compressed file, -> use the `ls -al` command to display the contents of the directory. -> How big are the compressed files? -> How do those compare to the size of `ws.bmp` and `tree.jpg`? -> What can you conclude from the relative sizes? -> -> > ## Solution -> > -> > Here is a partial directory listing, showing the sizes of the relevant files there: -> > -> > ~~~ -> > -rw-rw-r-- 1 diva diva 154344 Jun 18 08:32 tree.jpg -> > -rw-rw-r-- 1 diva diva 146049 Jun 18 08:53 tree.zip -> > -rw-rw-r-- 1 diva diva 75000054 Jun 18 08:51 ws.bmp -> > -rw-rw-r-- 1 diva diva 72986 Jun 18 08:53 ws.zip -> > ~~~ -> > {: .output} -> > -> > We can see that the regularity of the bitmap image -> > (remember, it is a 5,000 x 5,000 pixel image containing only white pixels) -> > allows the lossless compression scheme to compress the file quite effectively. -> > On the other hand, compressing `tree.jpg` does not create a much smaller file; -> > this is because the JPEG image was already in a compressed format. -> > -> {: .solution} -{: .challenge} +::::::::::::::::::::::::::::::::::::::: challenge + +## Examining actual image sizes (optional, not included in timing) + +Let us see the effects of image compression on image size with actual images. +The following script creates a square white image 5000 X 5000 pixels, +and then saves it as a BMP and as a JPEG image. + +```python +dim = 5000 + +img = np.zeros((dim, dim, 3), dtype="uint8") +img.fill(255) + +iio.imwrite(uri="data/ws.bmp", image=img) +iio.imwrite(uri="data/ws.jpg", image=img) +``` + +Examine the file sizes of the two output files, `ws.bmp` and `ws.jpg`. +Does the BMP image size match our previous prediction? +How about the JPEG? + +::::::::::::::: solution + +## Solution + +The BMP file, `ws.bmp`, is 75,000,054 bytes, +which matches our prediction very nicely. +The JPEG file, `ws.jpg`, is 392,503 bytes, +two orders of magnitude smaller than the bitmap version. + +::::::::::::::::::::::::: + +:::::::::::::::::::::::::::::::::::::::::::::::::: + +::::::::::::::::::::::::::::::::::::::: challenge + +## Comparing lossless versus lossy compression (optional, not included in timing) + +Let us see a hands-on example of lossless versus lossy compression. +Once again, open a terminal and navigate to the `data/` directory. +The two output images, `ws.bmp` and `ws.jpg`, should still be in the directory, +along with another image, `tree.jpg`. + +We can apply lossless compression to any file by using the `zip` command. +Recall that the `ws.bmp` file contains 75,000,054 bytes. +Apply lossless compression to this image by executing the following command: +`zip ws.zip ws.bmp`. +This command tells the computer to create a new compressed file, +`ws.zip`, from the original bitmap image. +Execute a similar command on the tree JPEG file: `zip tree.zip tree.jpg`. + +Having created the compressed file, +use the `ls -al` command to display the contents of the directory. +How big are the compressed files? +How do those compare to the size of `ws.bmp` and `tree.jpg`? +What can you conclude from the relative sizes? + +::::::::::::::: solution + +## Solution + +Here is a partial directory listing, showing the sizes of the relevant files there: + +```output +-rw-rw-r-- 1 diva diva 154344 Jun 18 08:32 tree.jpg +-rw-rw-r-- 1 diva diva 146049 Jun 18 08:53 tree.zip +-rw-rw-r-- 1 diva diva 75000054 Jun 18 08:51 ws.bmp +-rw-rw-r-- 1 diva diva 72986 Jun 18 08:53 ws.zip +``` + +We can see that the regularity of the bitmap image +(remember, it is a 5,000 x 5,000 pixel image containing only white pixels) +allows the lossless compression scheme to compress the file quite effectively. +On the other hand, compressing `tree.jpg` does not create a much smaller file; +this is because the JPEG image was already in a compressed format. + +::::::::::::::::::::::::: + +:::::::::::::::::::::::::::::::::::::::::::::::::: Here is an example showing how JPEG compression might impact image quality. Consider this image of several maize seedlings (scaled down here from 11,339 × 11,336 pixels in order to fit the display). -![Original image](../fig/quality-original.jpg) +![](fig/quality-original.jpg){alt='Original image'} Now, let us zoom in and look at a small section of the label in the original, first in the uncompressed format: -![Enlarged, uncompressed](../fig/quality-tif.jpg) +![](fig/quality-tif.jpg){alt='Enlarged, uncompressed'} Here is the same area of the image, but in JPEG format. We used a fairly aggressive compression parameter to make the JPEG, in order to illustrate the problems you might encounter with the format. -![Enlarged, compressed](../fig/quality-jpg.jpg) +![](fig/quality-jpg.jpg){alt='Enlarged, compressed'} The JPEG image is of clearly inferior quality. It has less colour variation and noticeable pixelation. @@ -935,7 +957,7 @@ A histogram shows how often each colour value appears in an image. The histograms for the uncompressed (left) and compressed (right) images are shown below: -![Uncompressed histogram](../fig/quality-histogram.jpg) +![](fig/quality-histogram.jpg){alt='Uncompressed histogram'} We learn how to make histograms such as these later on in the workshop. The differences in the colour histograms are even more apparent than in the @@ -954,7 +976,7 @@ you may wish to use a compressed image format to speed up file transfer time. ## PNG -PNG images are well suited for storing diagrams. It uses a lossless compression and is hence often used +PNG images are well suited for storing diagrams. It uses a lossless compression and is hence often used in web applications for non-photographic images. The format is able to store RGB and plain luminance (single channel, without an associated color) data, among others. Image data is stored row-wise and then, per row, a simple filter, like taking the difference of adjacent pixels, can be applied to increase the compressability of the data. The filtered data is then compressed in the next step and written out to the disk. @@ -979,7 +1001,7 @@ where it was captured, what type of camera was used and with what settings, etc. We normally don't see this metadata when we view an image, but we can view it independently if we wish to -(see [_Accessing Metadata_](#accessing-metadata), below). +(see [*Accessing Metadata*](#accessing-metadata), below). The important thing to be aware of at this stage is that you cannot rely on the metadata of an image being fully preserved when you use software to process that image. @@ -989,61 +1011,78 @@ certain metadata fields. In any case, remember: **if metadata is important to you, take precautions to always preserve the original files**. -> ## Accessing Metadata -> -> `imageio.v3` provides a way to display or explore the metadata -> associated with an image. Metadata is served independently from pixel data: -> -> ~~~ -> # read metadata -> metadata = iio.immeta(uri="data/eight.tif") -> # display the format-specific metadata -> metadata -> ~~~ -> {: .language-python} -> -> ~~~ -> {'is_fluoview': False, -> 'is_nih': False, -> 'is_micromanager': False, -> 'is_ome': False, -> 'is_lsm': False, -> 'is_reduced': False, -> 'is_shaped': True, -> 'is_stk': False, -> 'is_tiled': False, -> 'is_mdgel': False, -> 'compression': , -> 'predictor': 1, -> 'is_mediacy': False, -> 'description': '{"shape": [5, 3]}', -> 'description1': '', -> 'is_imagej': False, -> 'software': 'tifffile.py', -> 'resolution_unit': 1, -> 'resolution': (1.0, 1.0, 'NONE')} -> ~~~ -> {: .output } -> -> Other software exists that can help you handle metadata, -> e.g., [Fiji](https://imagej.net/Fiji) -> and [ImageMagick](https://imagemagick.org/index.php). -> You may want to explore these options if you need to work with -> the metadata of your images. -> -{: .callout } +:::::::::::::::::::::::::::::::::::::::: callout + +## Accessing Metadata + +`imageio.v3` provides a way to display or explore the metadata +associated with an image. Metadata is served independently from pixel data: + +```python +# read metadata +metadata = iio.immeta(uri="data/eight.tif") +# display the format-specific metadata +metadata +``` + +```output +{'is_fluoview': False, + 'is_nih': False, + 'is_micromanager': False, + 'is_ome': False, + 'is_lsm': False, + 'is_reduced': False, + 'is_shaped': True, + 'is_stk': False, + 'is_tiled': False, + 'is_mdgel': False, + 'compression': , + 'predictor': 1, + 'is_mediacy': False, + 'description': '{"shape": [5, 3]}', + 'description1': '', + 'is_imagej': False, + 'software': 'tifffile.py', + 'resolution_unit': 1, + 'resolution': (1.0, 1.0, 'NONE')} +``` + +Other software exists that can help you handle metadata, +e.g., [Fiji](https://imagej.net/Fiji) +and [ImageMagick](https://imagemagick.org/index.php). +You may want to explore these options if you need to work with +the metadata of your images. + +:::::::::::::::::::::::::::::::::::::::::::::::::: ## Summary of image formats used in this lesson The following table summarises the characteristics of the BMP, JPEG, and TIFF image formats: -| Format | Compression | Metadata | Advantages | Disadvantages | -| :------- | :------------ | :--------- | :-------------------- | :----------------- | -| BMP | None | None | Universally viewable, | Large file sizes | -| | | | high quality | | -| JPEG | Lossy | Yes | Universally viewable, | Detail may be lost | -| | | | smaller file size | | -| PNG | Lossless | [Yes](https://www.w3.org/TR/PNG/#11keywords) | Universally viewable, [open standard](https://www.w3.org/TR/PNG/), smaller file size | Metadata less flexible than TIFF, RGB only | -| TIFF | None, lossy, | Yes | High quality or | Not universally viewable | -| | or lossless | | smaller file size | | +| Format | Compression | Metadata | Advantages | Disadvantages | +| :-------------------------------------- | :------------ | :--------- | :--------------------- | :----------------------------------------- | +| BMP | None | None | Universally viewable, | Large file sizes | +| | | | high quality | | +| JPEG | Lossy | Yes | Universally viewable, | Detail may be lost | +| | | | smaller file size | | +| PNG | Lossless | [Yes](https://www.w3.org/TR/PNG/#11keywords) | Universally viewable, [open standard](https://www.w3.org/TR/PNG/), smaller file size | Metadata less flexible than TIFF, RGB only | +| TIFF | None, lossy, | Yes | High quality or | Not universally viewable | +| | or lossless | | smaller file size | | + +:::::::::::::::::::::::::::::::::::::::: keypoints + +- Digital images are represented as rectangular arrays of square pixels. +- Digital images use a left-hand coordinate system, with the origin in the upper left corner, the x-axis running to the right, and the y-axis running down. Some learners may prefer to think in terms of counting down rows for the y-axis and across columns for the x-axis. Thus, we will make an effort to allow for both approaches in our lesson presentation. +- Most frequently, digital images use an additive RGB model, with eight bits for the red, green, and blue channels. +- skimage images are stored as multi-dimensional NumPy arrays. +- In skimage images, the red channel is specified first, then the green, then the blue, i.e., RGB. +- Lossless compression retains all the details in an image, but lossy compression results in loss of some of the original image detail. +- BMP images are uncompressed, meaning they have high quality but also that their file sizes are large. +- JPEG images use lossy compression, meaning that their file sizes are smaller, but image quality may suffer. +- TIFF images can be uncompressed or compressed with lossy or lossless compression. +- Depending on the camera or sensor, various useful pieces of information may be stored in an image file, in the image metadata. + +:::::::::::::::::::::::::::::::::::::::::::::::::: + + diff --git a/episodes/03-skimage-images.md b/episodes/03-skimage-images.md index ba8575699..0f41c1da0 100644 --- a/episodes/03-skimage-images.md +++ b/episodes/03-skimage-images.md @@ -1,36 +1,31 @@ --- -title: "Working with skimage" +title: Working with skimage teaching: 70 exercises: 50 -questions: -- "How can the skimage Python computer vision library be used to work with images?" -objectives: -- "Read and save images with imageio." -- "Display images with matplotlib." -- "Resize images with skimage." -- "Perform simple image thresholding with NumPy array operations." -- "Extract sub-images using array slicing." -keypoints: -- "Images are read from disk with the `iio.imread()` function." -- "We create a window that automatically scales the displayed image -with matplotlib and calling `show()` on the global figure object." -- "Colour images can be transformed to grayscale using `skimage.color.rgb2gray()` or, -in many cases, -be read as grayscale directly by passing the argument `mode=\"L\"` to `iio.imread()`." -- "We can resize images with the `skimage.transform.resize()` function." -- "NumPy array commands, such as `image[image < 128] = 0`, can be used to manipulate -the pixels of an image." -- "Array slicing can be used to extract sub-images or modify areas of -images, e.g., `clip = image[60:150, 135:480, :]`." -- "Metadata is not retained when images are loaded as skimage images." --- +::::::::::::::::::::::::::::::::::::::: objectives + +- Read and save images with imageio. +- Display images with matplotlib. +- Resize images with skimage. +- Perform simple image thresholding with NumPy array operations. +- Extract sub-images using array slicing. + +:::::::::::::::::::::::::::::::::::::::::::::::::: + +:::::::::::::::::::::::::::::::::::::::: questions + +- How can the skimage Python computer vision library be used to work with images? + +:::::::::::::::::::::::::::::::::::::::::::::::::: + We have covered much of how images are represented in computer software. In this episode we will learn some more methods - for accessing and changing digital images. +for accessing and changing digital images. ## First, import the packages needed for this episode -~~~ +```python import numpy as np import matplotlib.pyplot as plt import ipympl @@ -39,8 +34,7 @@ import skimage import skimage.color import skimage.transform import skimage.util -~~~ -{: .language-python} +``` ## Reading, displaying, and saving images @@ -55,15 +49,14 @@ Let us examine a simple Python program to load, display, and save an image to a different format. Here are the first few lines: -~~~ +```python """ * Python program to open, display, and save an image. * """ # read image chair = iio.imread(uri="data/chair.jpg") -~~~ -{: .language-python} +``` We use the `iio.imread()` function to read a JPEG image entitled **chair.jpg**. Imageio reads the image, converts it from JPEG into a NumPy array, @@ -71,11 +64,10 @@ and returns the array; we save the array in a variable named `image`. Next, we will do something with the image: -~~~ +```python fig, ax = plt.subplots() plt.imshow(chair) -~~~ -{: .language-python} +``` Once we have the image in the program, we first call `plt.subplots()` so that we will have @@ -84,11 +76,10 @@ Next we call `plt.imshow()` in order to display the image. Now, we will save the image in another format: -~~~ +```python # save a new version in .tif format iio.imwrite(uri="data/chair.tif", image=chair) -~~~ -{: .language-python} +``` The final statement in the program, `iio.imwrite(uri="data/chair.tif", image=chair)`, writes the image to a file named `chair.tif` in the `data/` directory. @@ -96,144 +87,165 @@ The `imwrite()` function automatically determines the type of the file, based on the file extension we provide. In this case, the `.tif` extension causes the image to be saved as a TIFF. -> ## Metadata, revisited -> -> Remember, as mentioned in the previous section, _images saved with `imwrite()` -> will not retain all metadata associated with the original image -> that was loaded into Python!_ -> If the image metadata is important to you, be sure to **always keep an unchanged -> copy of the original image!** -{: .callout } - -> ## Extensions do not always dictate file type -> -> The `iio.imwrite()` function automatically uses the file type we specify in -> the file name parameter's extension. -> Note that this is not always the case. -> For example, if we are editing a document in Microsoft Word, -> and we save the document as `paper.pdf` instead of `paper.docx`, -> the file *is not* saved as a PDF document. -{: .callout} - -> ## Named versus positional arguments -> -> When we call functions in Python, -> there are two ways we can specify the necessary arguments. -> We can specify the arguments *positionally*, i.e., -> in the order the parameters appear in the function definition, -> or we can use *named arguments*. -> -> For example, the `iio.imwrite()` -> [function definition](https://imageio.readthedocs.io/en/stable/_autosummary/imageio.v3.imwrite.html) -> specifies two parameters, -> the resource to save the image to (e.g., a file name, an http address) and -> the image to write to disk. -> So, we could save the chair image in the sample code above -> using positional arguments like this: -> -> `iio.imwrite("data/chair.tif", image)` -> -> Since the function expects the first argument to be the file name, -> there is no confusion about what `"data/chair.jpg"` means. The same goes -> for the second argument. -> -> The style we will use in this workshop is to name each argument, like this: -> -> `iio.imwrite(uri="data/chair.tif", image=image)` -> -> This style will make it easier for you to learn how to use the variety of -> functions we will cover in this workshop. -{: .callout} - - -> ## Resizing an image (10 min) -> -> Add `import skimage.transform` and `import skimage.util` to your list of imports. -> Using the `chair.jpg` image located in the data folder, -> write a Python script to read your image into a variable named `chair`. -> Then, resize the image to 10 percent of its current size using these lines of code: -> -> ~~~ -> new_shape = (chair.shape[0] // 10, chair.shape[1] // 10, chair.shape[2]) -> resized_chair = skimage.transform.resize(image=chair, output_shape=new_shape) -> resized_chair = skimage.util.img_as_ubyte(resized_chair) -> ~~~ -> {: .language-python} -> -> As it is used here, -> the parameters to the `skimage.transform.resize()` function are -> the image to transform, `chair`, -> the dimensions we want the new image to have, `new_shape`. -> -> > Note that the pixel values in the new image are an approximation of -> > the original values and should not be confused with actual, observed -> > data. This is because `skimage` interpolates the pixel values when -> > reducing or increasing the size of an -> > image. `skimage.transform.resize` has a number of optional -> > parameters that allow the user to control this interpolation. You -> > can find more details in the [scikit-image -> > documentation](https://scikit-image.org/docs/stable/api/skimage.transform.html#skimage.transform.resize). -> {: .callout} -> -> Image files on disk are normally stored as whole numbers for space efficiency, -> but transformations and other math operations often result in -> conversion to floating point numbers. -> Using the `skimage.util.img_as_ubyte()` method converts it back to whole numbers -> before we save it back to disk. -> If we don't convert it before saving, -> `iio.imwrite()` may not recognise it as image data. -> -> Next, write the resized image out to a new file named `resized.jpg` -> in your data directory. -> Finally, use `plt.imshow()` with each of your image variables to display -> both images in your notebook. -> Don't forget to use `fig, ax = plt.subplots()` so you don't overwrite -> the first image with the second. -> Images may appear the same size in jupyter, -> but you can see the size difference by comparing the scales for each. -> You can also see the differnce in file storage size on disk by -> hovering your mouse cursor over the original -> and the new file in the jupyter file browser, using `ls -l` in your shell, -> or the OS file browser if it is configured to show file sizes. -> -> > ## Solution -> > -> > Here is what your Python script might look like. -> > -> > ~~~ -> > """ -> > * Python script to read an image, resize it, and save it -> > * under a different name. -> > """ -> > -> > # read in image -> > chair = iio.imread(uri="data/chair.jpg") -> > -> > # resize the image -> > new_shape = (chair.shape[0] // 10, chair.shape[1] // 10, chair.shape[2]) -> > resized_chair = skimage.transform.resize(image=chair, output_shape=new_shape) -> > resized_chair = skimage.util.img_as_ubyte(resized_chair) -> > -> > # write out image -> > iio.imwrite(uri="data/resized_chair.jpg", image=resized_chair) -> > -> > # display images -> > fig, ax = plt.subplots() -> > plt.imshow(chair) -> > fig, ax = plt.subplots() -> > plt.imshow(resized_chair) -> > ~~~ -> > {: .language-python} -> > -> > The script resizes the `data/chair.jpg` image by a factor of 10 in both dimensions, -> > saves the result to the `data/resized_chair.jpg` file, -> > and displays original and resized for comparision. -> {: .solution} -{: .challenge} +:::::::::::::::::::::::::::::::::::::::: callout + +## Metadata, revisited + +Remember, as mentioned in the previous section, *images saved with `imwrite()` +will not retain all metadata associated with the original image +that was loaded into Python!* +If the image metadata is important to you, be sure to **always keep an unchanged +copy of the original image!** + + +:::::::::::::::::::::::::::::::::::::::::::::::::: + +::::::::::::::::::::::::::::::::::::::::: callout + +## Extensions do not always dictate file type + +The `iio.imwrite()` function automatically uses the file type we specify in +the file name parameter's extension. +Note that this is not always the case. +For example, if we are editing a document in Microsoft Word, +and we save the document as `paper.pdf` instead of `paper.docx`, +the file *is not* saved as a PDF document. + + +:::::::::::::::::::::::::::::::::::::::::::::::::: + +::::::::::::::::::::::::::::::::::::::::: callout + +## Named versus positional arguments + +When we call functions in Python, +there are two ways we can specify the necessary arguments. +We can specify the arguments *positionally*, i.e., +in the order the parameters appear in the function definition, +or we can use *named arguments*. + +For example, the `iio.imwrite()` +[function definition](https://imageio.readthedocs.io/en/stable/_autosummary/imageio.v3.imwrite.html) +specifies two parameters, +the resource to save the image to (e.g., a file name, an http address) and +the image to write to disk. +So, we could save the chair image in the sample code above +using positional arguments like this: + +`iio.imwrite("data/chair.tif", image)` + +Since the function expects the first argument to be the file name, +there is no confusion about what `"data/chair.jpg"` means. The same goes +for the second argument. + +The style we will use in this workshop is to name each argument, like this: + +`iio.imwrite(uri="data/chair.tif", image=image)` + +This style will make it easier for you to learn how to use the variety of +functions we will cover in this workshop. + + +:::::::::::::::::::::::::::::::::::::::::::::::::: + +::::::::::::::::::::::::::::::::::::::: challenge + +## Resizing an image (10 min) + +Add `import skimage.transform` and `import skimage.util` to your list of imports. +Using the `chair.jpg` image located in the data folder, +write a Python script to read your image into a variable named `chair`. +Then, resize the image to 10 percent of its current size using these lines of code: + +```python +new_shape = (chair.shape[0] // 10, chair.shape[1] // 10, chair.shape[2]) +resized_chair = skimage.transform.resize(image=chair, output_shape=new_shape) +resized_chair = skimage.util.img_as_ubyte(resized_chair) +``` + +As it is used here, +the parameters to the `skimage.transform.resize()` function are +the image to transform, `chair`, +the dimensions we want the new image to have, `new_shape`. + +::::::::::::::::::::::::::::::::::::::::: callout + +Note that the pixel values in the new image are an approximation of +the original values and should not be confused with actual, observed +data. This is because `skimage` interpolates the pixel values when +reducing or increasing the size of an +image. `skimage.transform.resize` has a number of optional +parameters that allow the user to control this interpolation. You +can find more details in the [scikit-image +documentation](https://scikit-image.org/docs/stable/api/skimage.transform.html#skimage.transform.resize). + + +:::::::::::::::::::::::::::::::::::::::::::::::::: + +Image files on disk are normally stored as whole numbers for space efficiency, +but transformations and other math operations often result in +conversion to floating point numbers. +Using the `skimage.util.img_as_ubyte()` method converts it back to whole numbers +before we save it back to disk. +If we don't convert it before saving, +`iio.imwrite()` may not recognise it as image data. + +Next, write the resized image out to a new file named `resized.jpg` +in your data directory. +Finally, use `plt.imshow()` with each of your image variables to display +both images in your notebook. +Don't forget to use `fig, ax = plt.subplots()` so you don't overwrite +the first image with the second. +Images may appear the same size in jupyter, +but you can see the size difference by comparing the scales for each. +You can also see the differnce in file storage size on disk by +hovering your mouse cursor over the original +and the new file in the jupyter file browser, using `ls -l` in your shell, +or the OS file browser if it is configured to show file sizes. + +::::::::::::::: solution + +## Solution + +Here is what your Python script might look like. + +```python +""" + * Python script to read an image, resize it, and save it + * under a different name. +""" + +# read in image +chair = iio.imread(uri="data/chair.jpg") + +# resize the image +new_shape = (chair.shape[0] // 10, chair.shape[1] // 10, chair.shape[2]) +resized_chair = skimage.transform.resize(image=chair, output_shape=new_shape) +resized_chair = skimage.util.img_as_ubyte(resized_chair) + +# write out image +iio.imwrite(uri="data/resized_chair.jpg", image=resized_chair) + +# display images +fig, ax = plt.subplots() +plt.imshow(chair) +fig, ax = plt.subplots() +plt.imshow(resized_chair) +``` + +The script resizes the `data/chair.jpg` image by a factor of 10 in both dimensions, +saves the result to the `data/resized_chair.jpg` file, +and displays original and resized for comparision. + + + +::::::::::::::::::::::::: + +:::::::::::::::::::::::::::::::::::::::::::::::::: ## Manipulating pixels -In [the _Image Basics_ episode]({{page.root}}{% link _episodes/02-image-basics.md %}), +In [the *Image Basics* episode](02-image-basics.md), we individually manipulated the colours of pixels by changing the numbers stored in the image's NumPy array. Let's apply the principles learned there along with some new principles to a real world example. @@ -242,14 +254,14 @@ Suppose we are interested in this maize root cluster image. We want to be able to focus our program's attention on the roots themselves, while ignoring the black background. -![Root cluster image](../data/maize-root-cluster.jpg) +![](data/maize-root-cluster.jpg){alt='Root cluster image'} Since the image is stored as an array of numbers, we can simply look through the array for pixel colour values that are less than some threshold value. This process is called *thresholding*, and we will see more powerful methods to perform the thresholding task in -[the _Thresholding_ episode]({{ page.root }}{% link _episodes/07-thresholding.md %}). +[the *Thresholding* episode](07-thresholding.md). Here, though, we will look at a simple and elegant NumPy method for thresholding. Let us develop a program that keeps only the pixel colour values in an image that have value greater than or equal to 128. @@ -258,11 +270,16 @@ i.e., pixels that do not belong to the black background. We will start by reading the image and displaying it. -> ## Loading images with `imageio`: Read-only arrays -> When loading an image with `imageio`, in certain situations the image is stored in a read-only array. If you attempt to manipulate the pixels in a read-only array, you will receive an error message `ValueError: assignment destination is read-only`. In order to make the image array writeable, we can create a copy with `image = np.array(image)` before manipulating the pixel values. -{: .callout} +::::::::::::::::::::::::::::::::::::::::: callout + +## Loading images with `imageio`: Read-only arrays + +When loading an image with `imageio`, in certain situations the image is stored in a read-only array. If you attempt to manipulate the pixels in a read-only array, you will receive an error message `ValueError: assignment destination is read-only`. In order to make the image array writeable, we can create a copy with `image = np.array(image)` before manipulating the pixel values. + -~~~ +:::::::::::::::::::::::::::::::::::::::::::::::::: + +```python """ * Python script to ignore low intensity pixels in an image. * @@ -275,21 +292,18 @@ maize_roots = np.array(maize_roots) # display original image fig, ax = plt.subplots() plt.imshow(maize_roots) -~~~ -{: .language-python} - +``` Now we can threshold the image and display the result. -~~~ +```python # keep only high-intensity pixels maize_roots[maize_roots < 128] = 0 # display modified image fig, ax = plt.subplots() plt.imshow(maize_roots) -~~~ -{: .language-python} +``` The NumPy command to ignore all low-intensity pixels is `roots[roots < 128] = 0`. Every pixel colour value in the whole 3-dimensional array with a value less @@ -297,8 +311,7 @@ that 128 is set to zero. In this case, the result is an image in which the extraneous background detail has been removed. -![Thresholded root image](../fig/maize-root-cluster-threshold.jpg) - +![](fig/maize-root-cluster-threshold.jpg){alt='Thresholded root image'} ## Converting colour images to grayscale @@ -314,20 +327,24 @@ original data type and the data range back 0 to 255. Note that it is often better to use image values represented by floating point values, because using floating point numbers is numerically more stable. -> ## Colour and `color` -> -> The Carpentries generally prefers UK English spelling, -> which is why we use "colour" in the explanatory text of this lesson. -> However, `skimage` contains many modules and functions that include -> the US English spelling, `color`. -> The exact spelling matters here, -> e.g. you will encounter an error if you try to run `skimage.colour.rgb2gray()`. -> To account for this, we will use the US English spelling, `color`, -> in example Python code throughout the lesson. -> You will encounter a similar approach with "centre" and `center`. -{: .callout } - -~~~ +:::::::::::::::::::::::::::::::::::::::: callout + +## Colour and `color` + +The Carpentries generally prefers UK English spelling, +which is why we use "colour" in the explanatory text of this lesson. +However, `skimage` contains many modules and functions that include +the US English spelling, `color`. +The exact spelling matters here, +e.g. you will encounter an error if you try to run `skimage.colour.rgb2gray()`. +To account for this, we will use the US English spelling, `color`, +in example Python code throughout the lesson. +You will encounter a similar approach with "centre" and `center`. + + +:::::::::::::::::::::::::::::::::::::::::::::::::: + +```python """ * Python script to load a color image as grayscale. * @@ -344,13 +361,12 @@ plt.imshow(chair) gray_chair = skimage.color.rgb2gray(chair) fig, ax = plt.subplots() plt.imshow(gray_chair, cmap="gray") -~~~ -{: .language-python} +``` We can also load colour images as grayscale directly by passing the argument `mode="L"` to `iio.imread()`. -~~~ +```python """ * Python script to load a color image as grayscale. * @@ -362,88 +378,99 @@ gray_chair = iio.imread(uri="data/chair.jpg", mode="L") # display grayscale image fig, ax = plt.subplots() plt.imshow(gray_chair, cmap="gray") -~~~ -{: .language-python} +``` The first argument to `iio.imread()` is the filename of the image. The second argument `mode="L"` determines the type and range of the pixel values in the image (e.g., an 8-bit pixel has a range of 0-255). This argument is forwarded to the `pillow` backend, a Python imaging library for which mode "L" means 8-bit pixels and single-channel (i.e., grayscale). The backend used by `iio.imread()` may be specified as an optional argument: to use `pillow`, you would pass `plugin="pillow"`. If the backend is not specified explicitly, `iio.imread()` determines the backend to use based on the image type. -> ## Loading images with `imageio`: Pixel type and depth -> When loading an image with `mode="L"`, the pixel values are stored as 8-bit integer numbers that can take values in the range 0-255. However, pixel values may also be stored with other types and ranges. For example, some `skimage` functions return the pixel values as floating point numbers in the range 0-1. The type and range of the pixel values are important for the colorscale when plotting, and for masking and thresholding images as we will see later in the lesson. If you are unsure about the type of the pixel values, you can inspect it with `print(image.dtype)`. For the example above, you should find that it is `dtype('uint8')` indicating 8-bit integer numbers. -{: .callout} - -> ## Keeping only low intensity pixels (10 min) -> -> A little earlier, we showed how we could use Python and skimage to turn -> on only the high intensity pixels from an image, while turning all the low -> intensity pixels off. -> Now, you can practice doing the opposite - keeping all -> the low intensity pixels while changing the high intensity ones. -> -> The file `data/sudoku.png` is an RGB image of a sudoku puzzle: -> -> ![Su-Do-Ku puzzle](../data/sudoku.png) -> -> Your task is to turn all of the bright pixels in the image to a -> light gray colour. In other words, mask the bright pixels that have -> a pixel value greater than, say, 192 and set their value to 192 (the -> value 192 is chosen here because it corresponds to 75% of the -> range 0-255 of an 8-bit pixel). The results should look like this: -> -> ![Modified Su-Do-Ku puzzle](../fig/sudoku-gray.png) -> -> _Hint: this is an instance where it is helpful to load the image in grayscale format._ -> -> > ## Solution -> > -> > First, load the image file `data/sudoku.png` as a grayscale image. Remember that we use `image = np.array(image)` to create a copy of the image array because `imageio` returns a non-writeable image. -> > -> > ~~~ -> > -> > sudoku = iio.imread(uri="data/sudoku.png") -> > ~~~ -> > {: .language-python } -> > -> > Then change all bright pixel values greater than 192 to 192: -> > -> > ~~~ -> > sudoku = sudoku.copy() -> > sudoku[sudoku > 125] = 125 -> > ~~~ -> > {: .language-python } -> > -> > Finally, display the modified image. Note that we have to specify `vmin=0` and `vmax=255` as the range of the colorscale because it would otherwise automatically adjust to the new range 0-192. -> > -> > ~~~ -> > fig, ax = plt.subplots() -> > plt.imshow(sudoku, cmap="gray", vmin=0, vmax=1) -> > ~~~ -> > {: .language-python} -> {: .solution} -{: .challenge} - -> ## Plotting single channel images (cmap, vmin, vmax) -> -> Compared to a colour image, a grayscale image contains only a single -> intensity value per pixel. When we plot such an image with `plt.imshow`, -> matplotlib uses a colour map, to assign each intensity value a colour. -> The default colour map is called "viridis" and maps low values to purple -> and high values to yellow. We can instruct matplotlib to map low values -> to black and high values to white instead, by calling `plt.imshow` with -> `cmap="gray"`. -> [The documentation contains an overview of pre-defined colour maps](https://matplotlib.org/stable/gallery/color/colormap_reference.html). -> -> Furthermore, matplotlib determines the minimum and maximum values of -> the colour map dynamically from the image, by default. That means that in -> an image where the minimum is 64 and the maximum is 192, those values -> will be mapped to black and white respectively (and not dark gray and light -> gray as you might expect). If there are defined minimum and maximum vales, -> you can specify them via `vmin` and `vmax` to get the desired output. -> -> If you forget about this, it can lead to unexpected results. Try removing -> the `vmax` parameter from the sudoku challenge solution and see what happens. -{: .callout } +::::::::::::::::::::::::::::::::::::::::: callout + +## Loading images with `imageio`: Pixel type and depth + +When loading an image with `mode="L"`, the pixel values are stored as 8-bit integer numbers that can take values in the range 0-255. However, pixel values may also be stored with other types and ranges. For example, some `skimage` functions return the pixel values as floating point numbers in the range 0-1. The type and range of the pixel values are important for the colorscale when plotting, and for masking and thresholding images as we will see later in the lesson. If you are unsure about the type of the pixel values, you can inspect it with `print(image.dtype)`. For the example above, you should find that it is `dtype('uint8')` indicating 8-bit integer numbers. + + +:::::::::::::::::::::::::::::::::::::::::::::::::: + +::::::::::::::::::::::::::::::::::::::: challenge + +## Keeping only low intensity pixels (10 min) + +A little earlier, we showed how we could use Python and skimage to turn +on only the high intensity pixels from an image, while turning all the low +intensity pixels off. +Now, you can practice doing the opposite - keeping all +the low intensity pixels while changing the high intensity ones. + +The file `data/sudoku.png` is an RGB image of a sudoku puzzle: + +![](data/sudoku.png){alt='Su-Do-Ku puzzle'} + +Your task is to turn all of the bright pixels in the image to a +light gray colour. In other words, mask the bright pixels that have +a pixel value greater than, say, 192 and set their value to 192 (the +value 192 is chosen here because it corresponds to 75% of the +range 0-255 of an 8-bit pixel). The results should look like this: + +![](fig/sudoku-gray.png){alt='Modified Su-Do-Ku puzzle'} + +*Hint: this is an instance where it is helpful to load the image in grayscale format.* + +::::::::::::::: solution + +## Solution + +First, load the image file `data/sudoku.png` as a grayscale image. Remember that we use `image = np.array(image)` to create a copy of the image array because `imageio` returns a non-writeable image. + +```python + +sudoku = iio.imread(uri="data/sudoku.png") +``` + +Then change all bright pixel values greater than 192 to 192: + +```python +sudoku = sudoku.copy() +sudoku[sudoku > 125] = 125 +``` + +Finally, display the modified image. Note that we have to specify `vmin=0` and `vmax=255` as the range of the colorscale because it would otherwise automatically adjust to the new range 0-192. + +```python +fig, ax = plt.subplots() +plt.imshow(sudoku, cmap="gray", vmin=0, vmax=1) +``` + +::::::::::::::::::::::::: + +:::::::::::::::::::::::::::::::::::::::::::::::::: + +:::::::::::::::::::::::::::::::::::::::: callout + +## Plotting single channel images (cmap, vmin, vmax) + +Compared to a colour image, a grayscale image contains only a single +intensity value per pixel. When we plot such an image with `plt.imshow`, +matplotlib uses a colour map, to assign each intensity value a colour. +The default colour map is called "viridis" and maps low values to purple +and high values to yellow. We can instruct matplotlib to map low values +to black and high values to white instead, by calling `plt.imshow` with +`cmap="gray"`. +[The documentation contains an overview of pre-defined colour maps](https://matplotlib.org/stable/gallery/color/colormap_reference.html). + +Furthermore, matplotlib determines the minimum and maximum values of +the colour map dynamically from the image, by default. That means that in +an image where the minimum is 64 and the maximum is 192, those values +will be mapped to black and white respectively (and not dark gray and light +gray as you might expect). If there are defined minimum and maximum vales, +you can specify them via `vmin` and `vmax` to get the desired output. + +If you forget about this, it can lead to unexpected results. Try removing +the `vmax` parameter from the sudoku challenge solution and see what happens. + + +:::::::::::::::::::::::::::::::::::::::::::::::::: ## Access via slicing @@ -459,7 +486,7 @@ Consider this image of a whiteboard, and suppose that we want to create a sub-image with just the portion that says "odd + even = odd," along with the red box that is drawn around the words. -![Whiteboard image](../data/board.jpg) +![](data/board.jpg){alt='Whiteboard image'} Using the same display technique we have used throughout this course, we can determine the coordinates of the corners of the area we wish to extract @@ -469,7 +496,7 @@ area with an upper-left coordinate of *(135, 60)* and a lower-right coordinate of *(480, 150)*, as shown in this version of the whiteboard picture: -![Whiteboard coordinates](../fig/board-coordinates.jpg) +![](fig/board-coordinates.jpg){alt='Whiteboard coordinates'} Note that the coordinates in the preceding image are specified in *(cx, ry)* order. Now if our entire whiteboard image is stored as an skimage image named `image`, @@ -486,7 +513,7 @@ indicates that we want all three colour channels in our new image. A script to create the subimage would start by loading the image: -~~~ +```python """ * Python script demonstrating image modification and creation via * NumPy array slicing. @@ -497,31 +524,28 @@ board = iio.imread(uri="data/board.jpg") board = np.array(board) fig, ax = plt.subplots() plt.imshow(board) -~~~ -{: .language-python} +``` Then we use array slicing to create a new image with our selected area and then display the new image. -~~~ +```python # extract, display, and save sub-image clipped_board = board[60:151, 135:481, :] fig, ax = plt.subplots() plt.imshow(clipped_board) iio.imwrite(uri="data/clipped_board.tif", image=clipped_board) -~~~ -{: .language-python} +``` We can also change the values in an image, as shown next. -~~~ +```python # replace clipped area with sampled color color = board[330, 90] board[60:151, 135:481] = color fig, ax = plt.subplots() plt.imshow(board) -~~~ -{: .language-python} +``` First, we sample a single pixel's colour at a particular location of the image, saving it in a variable named `color`, @@ -536,40 +560,59 @@ In this case, the command "erases" that area of the whiteboard, replacing the words with a beige colour, as shown in the final image produced by the program: -!["Erased" whiteboard](../fig/board-final.jpg) - -> ## Practicing with slices (10 min - optional, not included in timing) -> -> Using the techniques you just learned, write a script that -> creates, displays, and saves a sub-image containing -> only the plant and its roots from "data/maize-root-cluster.jpg" -> -> > ## Solution -> > -> > Here is the completed Python program to select only the plant and roots -> > in the image. -> > -> > ~~~ -> > """ -> > * Python script to extract a sub-image containing only the plant and -> > * roots in an existing image. -> > """ -> > -> > # load and display original image -> > maize_roots = iio.imread(uri="data/maize-root-cluster.jpg") -> > fig, ax = plt.subplots() -> > plt.imshow(maize_roots) -> > -> > # extract, display, and save sub-image -> > # WRITE YOUR CODE TO SELECT THE SUBIMAGE NAME clip HERE: -> > clipped_maize = maize_roots[0:400, 275:550, :] -> > fig, ax = plt.subplots() -> > plt.imshow(clipped_maize) -> > -> > -> > # WRITE YOUR CODE TO SAVE clip HERE -> > iio.imwrite(uri="data/clipped_maize.jpg", image=clipped_maize) -> > ~~~ -> > {: .language-python} -> {: .solution} -{: .challenge} +![](fig/board-final.jpg){alt='"Erased" whiteboard'} + +::::::::::::::::::::::::::::::::::::::: challenge + +## Practicing with slices (10 min - optional, not included in timing) + +Using the techniques you just learned, write a script that +creates, displays, and saves a sub-image containing +only the plant and its roots from "data/maize-root-cluster.jpg" + +::::::::::::::: solution + +## Solution + +Here is the completed Python program to select only the plant and roots +in the image. + +```python +""" + * Python script to extract a sub-image containing only the plant and + * roots in an existing image. +""" + +# load and display original image +maize_roots = iio.imread(uri="data/maize-root-cluster.jpg") +fig, ax = plt.subplots() +plt.imshow(maize_roots) + +# extract, display, and save sub-image +# WRITE YOUR CODE TO SELECT THE SUBIMAGE NAME clip HERE: +clipped_maize = maize_roots[0:400, 275:550, :] +fig, ax = plt.subplots() +plt.imshow(clipped_maize) + + +# WRITE YOUR CODE TO SAVE clip HERE +iio.imwrite(uri="data/clipped_maize.jpg", image=clipped_maize) +``` + +::::::::::::::::::::::::: + +:::::::::::::::::::::::::::::::::::::::::::::::::: + +:::::::::::::::::::::::::::::::::::::::: keypoints + +- Images are read from disk with the `iio.imread()` function. +- We create a window that automatically scales the displayed image with matplotlib and calling `show()` on the global figure object. +- Colour images can be transformed to grayscale using `skimage.color.rgb2gray()` or, in many cases, be read as grayscale directly by passing the argument `mode="L"` to `iio.imread()`. +- We can resize images with the `skimage.transform.resize()` function. +- NumPy array commands, such as `image[image < 128] = 0`, can be used to manipulate the pixels of an image. +- Array slicing can be used to extract sub-images or modify areas of images, e.g., `clip = image[60:150, 135:480, :]`. +- Metadata is not retained when images are loaded as skimage images. + +:::::::::::::::::::::::::::::::::::::::::::::::::: + + diff --git a/episodes/04-drawing.md b/episodes/04-drawing.md index 446a22ec9..2b466f526 100644 --- a/episodes/04-drawing.md +++ b/episodes/04-drawing.md @@ -1,24 +1,24 @@ --- -title: "Drawing and Bitwise Operations" +title: Drawing and Bitwise Operations teaching: 45 exercises: 45 -questions: -- "How can we draw on skimage images and use bitwise operations and masks to -select certain parts of an image?" -objectives: -- "Create a blank, black skimage image." -- "Draw rectangles and other shapes on skimage images." -- "Explain how a white shape on a black background can be used as a mask to -select specific parts of an image." -- "Use bitwise operations to apply a mask to an image." -keypoints: -- "We can use the NumPy `zeros()` function to create a blank, black image." -- "We can draw on skimage images with functions such as -`skimage.draw.rectangle()`, `skimage.draw.disk()`, `skimage.draw.line()`, -and more." -- "The drawing functions return indices to pixels that can be set directly." --- +::::::::::::::::::::::::::::::::::::::: objectives + +- Create a blank, black skimage image. +- Draw rectangles and other shapes on skimage images. +- Explain how a white shape on a black background can be used as a mask to select specific parts of an image. +- Use bitwise operations to apply a mask to an image. + +:::::::::::::::::::::::::::::::::::::::::::::::::: + +:::::::::::::::::::::::::::::::::::::::: questions + +- How can we draw on skimage images and use bitwise operations and masks to select certain parts of an image? + +:::::::::::::::::::::::::::::::::::::::::::::::::: + The next series of episodes covers a basic toolkit of skimage operators. With these tools, we will be able to create programs to perform simple analyses of images @@ -26,7 +26,7 @@ based on changes in colour or shape. ## First, import the packages needed for this episode -~~~ +```python import numpy as np import matplotlib.pyplot as plt import ipympl @@ -34,8 +34,7 @@ import imageio.v3 as iio import skimage import skimage.draw %matplotlib widget -~~~ -{: .language-python} +``` Here, we import the `draw` submodule of `skimage` as well as packages familiar from earlier in the lesson. @@ -45,7 +44,7 @@ from earlier in the lesson. Often we wish to select only a portion of an image to analyze, and ignore the rest. Creating a rectangular sub-image with slicing, -as we did in [the _Image Representation in skimage_ episode]({{ page.root }}{% link _episodes/03-skimage-images.md %}) +as we did in [the *Image Representation in skimage* episode](03-skimage-images.md) is one option for simple cases. Another option is to create another special image, of the same size as the original, @@ -58,7 +57,7 @@ skimage provides tools to do that. Consider this image of maize seedlings: -![Maize seedlings](../fig/maize-seedlings.jpg) +![](fig/maize-seedlings.jpg){alt='Maize seedlings'} Now, suppose we want to analyze only the area of the image containing the roots themselves; @@ -74,14 +73,13 @@ A Python program to create a mask to select only that area of the image would start with a now-familiar section of code to open and display the original image: -~~~ +```python # Load and display the original image maize_seedlings = iio.imread(uri="data/maize-seedlings.tif") fig, ax = plt.subplots() plt.imshow(maize_seedlings) -~~~ -{: .language-python} +``` We load and display the initial image in the same way we have done before. @@ -94,11 +92,10 @@ But first, we need to generate a mask array of the same size as the image. Luckily, the NumPy library provides a function to create just such an array. The next section of code shows how: -~~~ +```python # Create the basic mask mask = np.ones(shape=maize_seedlings.shape[0:2], dtype="bool") -~~~ -{: .language-python} +``` The first argument to the `ones()` function is the shape of the original image, so that our mask will be exactly the same size as the original. @@ -114,7 +111,7 @@ You could check this, e.g., by `print(mask[0, 0])`. Next, we draw a filled, rectangle on the mask: -~~~ +```python # Draw filled rectangle on the mask image rr, cc = skimage.draw.rectangle(start=(357, 44), end=(740, 720)) mask[rr, cc] = False @@ -122,192 +119,200 @@ mask[rr, cc] = False # Display mask image fig, ax = plt.subplots() plt.imshow(mask, cmap="gray") -~~~ -{: .language-python} +``` Here is what our constructed mask looks like: -![Maize image mask](../fig/maize-seedlings-mask.png){: .image-with-shadow} +![](fig/maize-seedlings-mask.png){alt='Maize image mask' .image-with-shadow} The parameters of the `rectangle()` function `(357, 44)` and `(740, 720)`, are the coordinates of the upper-left (`start`) and lower-right (`end`) corners of a rectangle in *(ry, cx)* order. The function returns the rectangle as row (`rr`) and column (`cc`) coordinate arrays. -> ## Check the documentation! -> -> When using an skimage function for the first time - or the fifth time - -> it is wise to check how the function is used, via -> [the skimage documentation](https://scikit-image.org/docs/dev/user_guide) -> or other usage examples on programming-related sites such as -> [Stack Overflow](https://stackoverflow.com/). -> Basic information about skimage functions can be found interactively in Python, -> via commands like `help(skimage)` or `help(skimage.draw.rectangle)`. -> Take notes in your lab notebook. -> And, it is always wise to run some test code to verify -> that the functions your program uses are behaving in the manner you intend. -{: .callout} - -> ## Variable naming conventions! -> -> You may have wondered why we called the return values of the rectangle function -> `rr` and `cc`?! -> You may have guessed that `r` is short for `row` and `c` is short for `column`. -> However, the rectangle function returns mutiple rows and columns; -> thus we used a convention of doubling the letter `r` to `rr` (and `c` to `cc`) -> to indicate that those are multiple values. -> In fact it may have even been clearer to name those variables `rows` and `columns`; -> however this would have been also much longer. -> Whatever you decide to do, try to stick to some already existing conventions, -> such that it is easier for other people to understand your code. -{: .callout} - -> ## Other drawing operations (15 min) -> -> There are other functions for drawing on images, -> in addition to the `skimage.draw.rectangle()` function. -> We can draw circles, lines, text, and other shapes as well. -> These drawing functions may be useful later on, to help annotate images -> that our programs produce. -> Practice some of these functions here. -> -> Circles can be drawn with the `skimage.draw.disk()` function, -> which takes two parameters: -> the (ry, cx) point of the centre of the circle, -> and the radius of the circle. -> There is an optional `shape` parameter that can be supplied to this function. -> It will limit the output coordinates for cases where the circle -> dimensions exceed the ones of the image. -> -> Lines can be drawn with the `skimage.draw.line()` function, -> which takes four parameters: -> the (ry, cx) coordinate of one end of the line, -> and the (ry, cx) coordinate of the other end of the line. -> -> Other drawing functions supported by skimage can be found in -> [the skimage reference pages](https://scikit-image.org/docs/dev/api/skimage.draw.html?highlight=draw#module-skimage.draw). -> -> First let's make an empty, black image with a size of 800x600 pixels. -> Recall that a colour image has three channels for the colours red, green, and blue -> (RGB, cf. [Image Basics]({{ page.root }}{% link _episodes/03-skimage-images.md %})). -> Hence we need to create a 3D array of shape `(600, 800, 3)` where the last dimension represents the RGB colour channels. -> -> ~~~ -> # create the black canvas -> canvas = np.zeros(shape=(600, 800, 3), dtype="uint8") -> ~~~ -> {: .language-python} -> -> Now your task is to draw some other coloured shapes and lines on the image, -> perhaps something like this: -> -> ![Sample shapes](../fig/drawing-practice.jpg) -> -> > ## Solution -> > Drawing a circle: -> > -> > ~~~ -> > # Draw a blue circle with centre (200, 300) in (ry, cx) coordinates, and radius 100 -> > rr, cc = skimage.draw.disk(center=(200, 300), radius=100, shape=canvas.shape[0:2]) -> > canvas[rr, cc] = (0, 0, 255) -> > ~~~ -> > {: .language-python} -> > -> > Drawing a line: -> > -> > ~~~ -> > # Draw a green line from (400, 200) to (500, 700) in (ry, cx) coordinates -> > rr, cc = skimage.draw.line(r0=400, c0=200, r1=500, c1=700) -> > canvas[rr, cc] = (0, 255, 0) -> > ~~~ -> > {: .language-python} -> > -> > ~~~ -> > # Display the image -> > fig, ax = plt.subplots() -> > plt.imshow(canvas) -> > ~~~ -> > {: .language-python} -> > -> > We could expand this solution, if we wanted, -> > to draw rectangles, circles and lines at random positions within our black canvas. -> > To do this, we could use the `random` python module, -> > and the function `random.randrange`, -> > which can produce random numbers within a certain range. -> > -> > Let's draw 15 randomly placed circles: -> > -> > ~~~ -> > import random -> > -> > # create the black canvas -> > canvas = np.zeros(shape=(600, 800, 3), dtype="uint8") -> > -> > # draw a blue circle at a random location 15 times -> > for i in range(15): -> > rr, cc = skimage.draw.disk(center=( -> > random.randrange(600), -> > random.randrange(800)), -> > radius=50, -> > shape=canvas.shape[0:2], -> > ) -> > canvas[rr, cc] = (0, 0, 255) -> > -> > # display the results -> > fig, ax = plt.subplots() -> > plt.imshow(canvas) -> > ~~~ -> > {: .language-python} -> > -> > We could expand this even further to also -> > randomly choose whether to plot a rectangle, a circle, or a square. -> > Again, we do this with the `random` module, -> > now using the function `random.random` -> > that returns a random number between 0.0 and 1.0. -> > -> > ~~~ -> > import random -> > -> > # Draw 15 random shapes (rectangle, circle or line) at random positions -> > for i in range(15): -> > # generate a random number between 0.0 and 1.0 and use this to decide if we -> > # want a circle, a line or a sphere -> > x = random.random() -> > if x < 0.33: -> > # draw a blue circle at a random location -> > rr, cc = skimage.draw.disk(center=( -> > random.randrange(600), -> > random.randrange(800)), -> > radius=50, -> > shape=canvas.shape[0:2], -> > ) -> > color = (0, 0, 255) -> > elif x < 0.66: -> > # draw a green line at a random location -> > rr, cc = skimage.draw.line( -> > r0=random.randrange(600), -> > c0=random.randrange(800), -> > r1=random.randrange(600), -> > c1=random.randrange(800), -> > ) -> > color = (0, 255, 0) -> > else: -> > # draw a red rectangle at a random location -> > rr, cc = skimage.draw.rectangle( -> > start=(random.randrange(600), random.randrange(800)), -> > extent=(50, 50), -> > shape=canvas.shape[0:2], -> > ) -> > color = (255, 0, 0) -> > -> > canvas[rr, cc] = color -> > -> > # display the results -> > fig, ax = plt.subplots() -> > plt.imshow(canvas) -> > ~~~ -> > {: .language-python} -> {: .solution} -{: .challenge} +::::::::::::::::::::::::::::::::::::::::: callout + +## Check the documentation! + +When using an skimage function for the first time - or the fifth time - +it is wise to check how the function is used, via +[the skimage documentation](https://scikit-image.org/docs/dev/user_guide) +or other usage examples on programming-related sites such as +[Stack Overflow](https://stackoverflow.com/). +Basic information about skimage functions can be found interactively in Python, +via commands like `help(skimage)` or `help(skimage.draw.rectangle)`. +Take notes in your lab notebook. +And, it is always wise to run some test code to verify +that the functions your program uses are behaving in the manner you intend. + + +:::::::::::::::::::::::::::::::::::::::::::::::::: + +::::::::::::::::::::::::::::::::::::::::: callout + +## Variable naming conventions! + +You may have wondered why we called the return values of the rectangle function +`rr` and `cc`?! +You may have guessed that `r` is short for `row` and `c` is short for `column`. +However, the rectangle function returns mutiple rows and columns; +thus we used a convention of doubling the letter `r` to `rr` (and `c` to `cc`) +to indicate that those are multiple values. +In fact it may have even been clearer to name those variables `rows` and `columns`; +however this would have been also much longer. +Whatever you decide to do, try to stick to some already existing conventions, +such that it is easier for other people to understand your code. + + +:::::::::::::::::::::::::::::::::::::::::::::::::: + +::::::::::::::::::::::::::::::::::::::: challenge + +## Other drawing operations (15 min) + +There are other functions for drawing on images, +in addition to the `skimage.draw.rectangle()` function. +We can draw circles, lines, text, and other shapes as well. +These drawing functions may be useful later on, to help annotate images +that our programs produce. +Practice some of these functions here. + +Circles can be drawn with the `skimage.draw.disk()` function, +which takes two parameters: +the (ry, cx) point of the centre of the circle, +and the radius of the circle. +There is an optional `shape` parameter that can be supplied to this function. +It will limit the output coordinates for cases where the circle +dimensions exceed the ones of the image. + +Lines can be drawn with the `skimage.draw.line()` function, +which takes four parameters: +the (ry, cx) coordinate of one end of the line, +and the (ry, cx) coordinate of the other end of the line. + +Other drawing functions supported by skimage can be found in +[the skimage reference pages](https://scikit-image.org/docs/dev/api/skimage.draw.html?highlight=draw#module-skimage.draw). + +First let's make an empty, black image with a size of 800x600 pixels. +Recall that a colour image has three channels for the colours red, green, and blue +(RGB, cf. [Image Basics](03-skimage-images.md)). +Hence we need to create a 3D array of shape `(600, 800, 3)` where the last dimension represents the RGB colour channels. + +```python +# create the black canvas +canvas = np.zeros(shape=(600, 800, 3), dtype="uint8") +``` + +Now your task is to draw some other coloured shapes and lines on the image, +perhaps something like this: + +![](fig/drawing-practice.jpg){alt='Sample shapes'} + +::::::::::::::: solution + +## Solution + +Drawing a circle: + +```python +# Draw a blue circle with centre (200, 300) in (ry, cx) coordinates, and radius 100 +rr, cc = skimage.draw.disk(center=(200, 300), radius=100, shape=canvas.shape[0:2]) +canvas[rr, cc] = (0, 0, 255) +``` + +Drawing a line: + +```python +# Draw a green line from (400, 200) to (500, 700) in (ry, cx) coordinates +rr, cc = skimage.draw.line(r0=400, c0=200, r1=500, c1=700) +canvas[rr, cc] = (0, 255, 0) +``` + +```python +# Display the image +fig, ax = plt.subplots() +plt.imshow(canvas) +``` + +We could expand this solution, if we wanted, +to draw rectangles, circles and lines at random positions within our black canvas. +To do this, we could use the `random` python module, +and the function `random.randrange`, +which can produce random numbers within a certain range. + +Let's draw 15 randomly placed circles: + +```python +import random + +# create the black canvas +canvas = np.zeros(shape=(600, 800, 3), dtype="uint8") + +# draw a blue circle at a random location 15 times +for i in range(15): + rr, cc = skimage.draw.disk(center=( + random.randrange(600), + random.randrange(800)), + radius=50, + shape=canvas.shape[0:2], + ) + canvas[rr, cc] = (0, 0, 255) + +# display the results +fig, ax = plt.subplots() +plt.imshow(canvas) +``` + +We could expand this even further to also +randomly choose whether to plot a rectangle, a circle, or a square. +Again, we do this with the `random` module, +now using the function `random.random` +that returns a random number between 0.0 and 1.0. + +```python +import random + +# Draw 15 random shapes (rectangle, circle or line) at random positions +for i in range(15): + # generate a random number between 0.0 and 1.0 and use this to decide if we + # want a circle, a line or a sphere + x = random.random() + if x < 0.33: + # draw a blue circle at a random location + rr, cc = skimage.draw.disk(center=( + random.randrange(600), + random.randrange(800)), + radius=50, + shape=canvas.shape[0:2], + ) + color = (0, 0, 255) + elif x < 0.66: + # draw a green line at a random location + rr, cc = skimage.draw.line( + r0=random.randrange(600), + c0=random.randrange(800), + r1=random.randrange(600), + c1=random.randrange(800), + ) + color = (0, 255, 0) + else: + # draw a red rectangle at a random location + rr, cc = skimage.draw.rectangle( + start=(random.randrange(600), random.randrange(800)), + extent=(50, 50), + shape=canvas.shape[0:2], + ) + color = (255, 0, 0) + + canvas[rr, cc] = color + +# display the results +fig, ax = plt.subplots() +plt.imshow(canvas) +``` + +::::::::::::::::::::::::: + +:::::::::::::::::::::::::::::::::::::::::::::::::: ## Image modification @@ -315,30 +320,38 @@ All that remains is the task of modifying the image using our mask in such a way that the areas with `True` pixels in the mask are not shown in the image any more. -> ## How does a mask work? (optional, not included in timing) -> -> Now, consider the mask image we created above. -> The values of the mask that corresponds to the portion of the image -> we are interested in are all `False`, -> while the values of the mask that corresponds to the portion of the image we -> want to remove are all `True`. -> -> How do we change the original image using the mask? -> -> > ## Solution -> > -> > When indexing the image using the mask, we access only those pixels at -> > positions where the mask is `True`. -> > So, when indexing with the mask, -> > one can set those values to 0, and effectively remove them from the image. -> {: .solution} -{: .challenge} +::::::::::::::::::::::::::::::::::::::: challenge + +## How does a mask work? (optional, not included in timing) + +Now, consider the mask image we created above. +The values of the mask that corresponds to the portion of the image +we are interested in are all `False`, +while the values of the mask that corresponds to the portion of the image we +want to remove are all `True`. + +How do we change the original image using the mask? + +::::::::::::::: solution + +## Solution + +When indexing the image using the mask, we access only those pixels at +positions where the mask is `True`. +So, when indexing with the mask, +one can set those values to 0, and effectively remove them from the image. + + + +::::::::::::::::::::::::: + +:::::::::::::::::::::::::::::::::::::::::::::::::: Now we can write a Python program to use a mask to retain only the portions of our maize roots image that actually contains the seedling roots. We load the original image and create the mask in the same way as before: -~~~ +```python # Load the original image maize_seedlings = iio.imread(uri="data/maize-seedlings.tif") @@ -348,203 +361,223 @@ mask = np.ones(shape=maize_seedlings.shape[0:2], dtype="bool") # Draw a filled rectangle on the mask image rr, cc = skimage.draw.rectangle(start=(357, 44), end=(740, 720)) mask[rr, cc] = False -~~~ -{: .language-python} +``` Then, we use numpy indexing to remove the portions of the image, where the mask is `True`: -~~~ +```python # Apply the mask maize_seedlings[mask] = 0 -~~~ -{: .language-python} +``` Then, we display the masked image. -~~~ +```python fig, ax = plt.subplots() plt.imshow(maize_seedlings) -~~~ -{: .language-python} +``` The resulting masked image should look like this: -![Applied mask](../fig/maize-seedlings-masked.jpg) - -> ## Masking an image of your own (optional, not included in timing) -> -> Now, it is your turn to practice. -> Using your mobile phone, tablet, webcam, or digital camera, -> take an image of an object with a simple overall geometric shape -> (think rectangular or circular). -> Copy that image to your computer, write some code to make a mask, -> and apply it to select the part of the image containing your object. -> For example, here is an image of a remote control: -> -> ![Remote control image](../data/remote-control.jpg) -> -> And, here is the end result of a program masking out everything but the remote: -> -> ![Remote control masked](../fig/remote-control-masked.jpg) -> -> > ## Solution -> > -> > Here is a Python program to produce the cropped remote control image shown above. -> > Of course, your program should be tailored to your image. -> > -> > ~~~ -> > # Load the image -> > remote = iio.imread(uri="data/remote-control.jpg") -> > remote = np.array(remote) -> > -> > # Create the basic mask -> > mask = np.ones(shape=remote.shape[0:2], dtype="bool") -> > -> > # Draw a filled rectangle on the mask image -> > rr, cc = skimage.draw.rectangle(start=(93, 1107), end=(1821, 1668)) -> > mask[rr, cc] = False -> > -> > # Apply the mask -> > remote[mask] = 0 -> > -> > # Display the result -> > fig, ax = plt.subplots() -> > plt.imshow(remote) -> > ~~~ -> > {: .language-python} -> {: .solution} -{: .challenge} - -> ## Masking a 96-well plate image (30 min) -> -> Consider this image of a 96-well plate that has been scanned on a flatbed scanner. -> -> ~~~ -> # Load the image -> wellplate = iio.imread(uri="data/wellplate-01.jpg") -> wellplate = np.array(wellplate) -> -> # Display the image -> fig, ax = plt.subplots() -> plt.imshow(wellplate) -> ~~~ -> {: .language-python} -> -> ![96-well plate](../data/wellplate-01.jpg) -> -> Suppose that we are interested in the colours of the solutions in each of the wells. -> We *do not* care about the colour of the rest of the image, -> i.e., the plastic that makes up the well plate itself. -> -> Your task is to write some code that will produce a mask that will -> mask out everything except for the wells. -> To help with this, you should use the text file `data/centers.txt` that contains -> the (cx, ry) coordinates of the centre of each of the 96 wells in this image. -> You may assume that each of the wells has a radius of 16 pixels. -> -> Your program should produce output that looks like this: -> -> ![Masked 96-well plate](../fig/wellplate-01-masked.jpg) -> -> > ## Solution -> > -> > ~~~ -> > # read in original image -> > wellplate = iio.imread(uri="data/wellplate-01.jpg") -> > wellplate = np.array(wellplate) -> > -> > # create the mask image -> > mask = np.ones(shape=wellplate.shape[0:2], dtype="bool") -> > -> > # open and iterate through the centers file... -> > with open("data/centers.txt", "r") as center_file: -> > for line in center_file: -> > # ... getting the coordinates of each well... -> > coordinates = line.split() -> > cx = int(coordinates[0]) -> > ry = int(coordinates[1]) -> > -> > # ... and drawing a circle on the mask -> > rr, cc = skimage.draw.disk(center=(ry, cx), radius=16, shape=wellplate.shape[0:2]) -> > mask[rr, cc] = False -> > -> > # apply the mask -> > wellplate[mask] = 0 -> > -> > # display the result -> > fig, ax = plt.subplots() -> > plt.imshow(wellplate) -> > ~~~ -> > {: .language-python} -> > -> {: .solution} -{: .challenge} - -> ## Masking a 96-well plate image, take two (optional, not included in timing) -> -> If you spent some time looking at the contents of -> the `data/centers.txt` file from the previous challenge, -> you may have noticed that the centres of each well in the image are very regular. -> *Assuming* that the images are scanned in such a way that -> the wells are always in the same place, -> and that the image is perfectly oriented -> (i.e., it does not slant one way or another), -> we could produce our well plate mask without having to -> read in the coordinates of the centres of each well. -> Assume that the centre of the upper left well in the image is at -> location cx = 91 and ry = 108, and that there are -> 70 pixels between each centre in the cx dimension and -> 72 pixels between each centre in the ry dimension. -> Each well still has a radius of 16 pixels. -> Write a Python program that produces the same output image as in the previous challenge, -> but *without* having to read in the `centers.txt` file. -> *Hint: use nested for loops.* -> -> > ## Solution -> > -> > Here is a Python program that is able to create the masked image without -> > having to read in the `centers.txt` file. -> > -> > ~~~ -> > # read in original image -> > wellplate = iio.imread(uri="data/wellplate-01.jpg") -> > wellplate = np.array(wellplate) -> > -> > # create the mask image -> > mask = np.ones(shape=wellplate.shape[0:2], dtype="bool") -> > -> > # upper left well coordinates -> > cx0 = 91 -> > ry0 = 108 -> > -> > # spaces between wells -> > deltaCX = 70 -> > deltaRY = 72 -> > -> > cx = cx0 -> > ry = ry0 -> > -> > # iterate each row and column -> > for row in range(12): -> > # reset cx to leftmost well in the row -> > cx = cx0 -> > for col in range(8): -> > -> > # ... and drawing a circle on the mask -> > rr, cc = skimage.draw.disk(center=(ry, cx), radius=16, shape=wellplate.shape[0:2]) -> > mask[rr, cc] = False -> > cx += deltaCX -> > # after one complete row, move to next row -> > ry += deltaRY -> > -> > # apply the mask -> > wellplate[mask] = 0 -> > -> > # display the result -> > fig, ax = plt.subplots() -> > plt.imshow(wellplate) -> > ~~~ -> > {: .language-python} -> {: .solution} -{: .challenge} +![](fig/maize-seedlings-masked.jpg){alt='Applied mask'} + +::::::::::::::::::::::::::::::::::::::: challenge + +## Masking an image of your own (optional, not included in timing) + +Now, it is your turn to practice. +Using your mobile phone, tablet, webcam, or digital camera, +take an image of an object with a simple overall geometric shape +(think rectangular or circular). +Copy that image to your computer, write some code to make a mask, +and apply it to select the part of the image containing your object. +For example, here is an image of a remote control: + +![](data/remote-control.jpg){alt='Remote control image'} + +And, here is the end result of a program masking out everything but the remote: + +![](fig/remote-control-masked.jpg){alt='Remote control masked'} + +::::::::::::::: solution + +## Solution + +Here is a Python program to produce the cropped remote control image shown above. +Of course, your program should be tailored to your image. + +```python +# Load the image +remote = iio.imread(uri="data/remote-control.jpg") +remote = np.array(remote) + +# Create the basic mask +mask = np.ones(shape=remote.shape[0:2], dtype="bool") + +# Draw a filled rectangle on the mask image +rr, cc = skimage.draw.rectangle(start=(93, 1107), end=(1821, 1668)) +mask[rr, cc] = False + +# Apply the mask +remote[mask] = 0 + +# Display the result +fig, ax = plt.subplots() +plt.imshow(remote) +``` + +::::::::::::::::::::::::: + +:::::::::::::::::::::::::::::::::::::::::::::::::: + +::::::::::::::::::::::::::::::::::::::: challenge + +## Masking a 96-well plate image (30 min) + +Consider this image of a 96-well plate that has been scanned on a flatbed scanner. + +```python +# Load the image +wellplate = iio.imread(uri="data/wellplate-01.jpg") +wellplate = np.array(wellplate) + +# Display the image +fig, ax = plt.subplots() +plt.imshow(wellplate) +``` + +![](data/wellplate-01.jpg){alt='96-well plate'} + +Suppose that we are interested in the colours of the solutions in each of the wells. +We *do not* care about the colour of the rest of the image, +i.e., the plastic that makes up the well plate itself. + +Your task is to write some code that will produce a mask that will +mask out everything except for the wells. +To help with this, you should use the text file `data/centers.txt` that contains +the (cx, ry) coordinates of the centre of each of the 96 wells in this image. +You may assume that each of the wells has a radius of 16 pixels. + +Your program should produce output that looks like this: + +![](fig/wellplate-01-masked.jpg){alt='Masked 96-well plate'} + +::::::::::::::: solution + +## Solution + +```python +# read in original image +wellplate = iio.imread(uri="data/wellplate-01.jpg") +wellplate = np.array(wellplate) + +# create the mask image +mask = np.ones(shape=wellplate.shape[0:2], dtype="bool") + +# open and iterate through the centers file... +with open("data/centers.txt", "r") as center_file: + for line in center_file: + # ... getting the coordinates of each well... + coordinates = line.split() + cx = int(coordinates[0]) + ry = int(coordinates[1]) + + # ... and drawing a circle on the mask + rr, cc = skimage.draw.disk(center=(ry, cx), radius=16, shape=wellplate.shape[0:2]) + mask[rr, cc] = False + +# apply the mask +wellplate[mask] = 0 + +# display the result +fig, ax = plt.subplots() +plt.imshow(wellplate) +``` + +::::::::::::::::::::::::: + +:::::::::::::::::::::::::::::::::::::::::::::::::: + +::::::::::::::::::::::::::::::::::::::: challenge + +## Masking a 96-well plate image, take two (optional, not included in timing) + +If you spent some time looking at the contents of +the `data/centers.txt` file from the previous challenge, +you may have noticed that the centres of each well in the image are very regular. +*Assuming* that the images are scanned in such a way that +the wells are always in the same place, +and that the image is perfectly oriented +(i.e., it does not slant one way or another), +we could produce our well plate mask without having to +read in the coordinates of the centres of each well. +Assume that the centre of the upper left well in the image is at +location cx = 91 and ry = 108, and that there are +70 pixels between each centre in the cx dimension and +72 pixels between each centre in the ry dimension. +Each well still has a radius of 16 pixels. +Write a Python program that produces the same output image as in the previous challenge, +but *without* having to read in the `centers.txt` file. +*Hint: use nested for loops.* + +::::::::::::::: solution + +## Solution + +Here is a Python program that is able to create the masked image without +having to read in the `centers.txt` file. + +```python +# read in original image +wellplate = iio.imread(uri="data/wellplate-01.jpg") +wellplate = np.array(wellplate) + +# create the mask image +mask = np.ones(shape=wellplate.shape[0:2], dtype="bool") + +# upper left well coordinates +cx0 = 91 +ry0 = 108 + +# spaces between wells +deltaCX = 70 +deltaRY = 72 + +cx = cx0 +ry = ry0 + +# iterate each row and column +for row in range(12): + # reset cx to leftmost well in the row + cx = cx0 + for col in range(8): + + # ... and drawing a circle on the mask + rr, cc = skimage.draw.disk(center=(ry, cx), radius=16, shape=wellplate.shape[0:2]) + mask[rr, cc] = False + cx += deltaCX + # after one complete row, move to next row + ry += deltaRY + +# apply the mask +wellplate[mask] = 0 + +# display the result +fig, ax = plt.subplots() +plt.imshow(wellplate) +``` + +::::::::::::::::::::::::: + +:::::::::::::::::::::::::::::::::::::::::::::::::: + +:::::::::::::::::::::::::::::::::::::::: keypoints + +- We can use the NumPy `zeros()` function to create a blank, black image. +- We can draw on skimage images with functions such as `skimage.draw.rectangle()`, `skimage.draw.disk()`, `skimage.draw.line()`, and more. +- The drawing functions return indices to pixels that can be set directly. + +:::::::::::::::::::::::::::::::::::::::::::::::::: + + diff --git a/episodes/05-creating-histograms.md b/episodes/05-creating-histograms.md index b2d960e6d..107c25843 100644 --- a/episodes/05-creating-histograms.md +++ b/episodes/05-creating-histograms.md @@ -1,30 +1,30 @@ --- -title: "Creating Histograms" +title: Creating Histograms teaching: 40 exercises: 40 -questions: -- "How can we create grayscale and colour histograms to understand the -distribution of colour values in an image?" -objectives: -- "Explain what a histogram is." -- "Load an image in grayscale format." -- "Create and display grayscale and colour histograms for entire images." -- "Create and display grayscale and colour histograms for certain areas of images, via masks." -keypoints: -- "In many cases, we can load images in grayscale by passing the `mode=\"L\"` -argument to the `iio.imread()` function." -- "We can create histograms of images with the `np.histogram` function." -- "We can separate the RGB channels of an image using slicing operations." -- "We can display histograms using the `matplotlib pyplot` `figure()`, -`title()`, `xlabel()`, `ylabel()`, `xlim()`, `plot()`, and `show()` functions." --- +::::::::::::::::::::::::::::::::::::::: objectives + +- Explain what a histogram is. +- Load an image in grayscale format. +- Create and display grayscale and colour histograms for entire images. +- Create and display grayscale and colour histograms for certain areas of images, via masks. + +:::::::::::::::::::::::::::::::::::::::::::::::::: + +:::::::::::::::::::::::::::::::::::::::: questions + +- How can we create grayscale and colour histograms to understand the distribution of colour values in an image? + +:::::::::::::::::::::::::::::::::::::::::::::::::: + In this episode, we will learn how to use skimage functions to create and display histograms for images. ## First, import the packages needed for this episode -~~~ +```python import numpy as np import matplotlib.pyplot as plt import ipympl @@ -32,32 +32,31 @@ import imageio.v3 as iio import skimage import skimage.draw %matplotlib widget -~~~ -{: .language-python} +``` ## Introduction to Histograms As it pertains to images, a *histogram* is a graphical representation showing how frequently various colour values occur in the image. We saw in -[the _Image Basics_ episode]({{ page.root }}{% link _episodes/02-image-basics.md %}) +[the *Image Basics* episode](02-image-basics.md) that we could use a histogram to visualise the differences in uncompressed and compressed image formats. If your project involves detecting colour changes between images, histograms will prove to be very useful, and histograms are also quite handy as a preparatory step before performing -[thresholding]({{ page.root }}/07-thresholding). +[thresholding](07-thresholding.md). ## Grayscale Histograms We will start with grayscale images, and then move on to colour images. We will use this image of a plant seedling as an example: -![Plant seedling](../data/plant-seedling.jpg) +![](data/plant-seedling.jpg){alt='Plant seedling'} Here we load the image in grayscale instead of full colour, and display it: -~~~ +```python # read the image of a plant seedling as grayscale from the outset plant_seedling = iio.imread(uri="data/plant-seedling.jpg", mode="L") @@ -67,10 +66,9 @@ plant_seedling = skimage.util.img_as_float(plant_seedling) # display the image fig, ax = plt.subplots() plt.imshow(plant_seedling, cmap="gray") -~~~ -{: .language-python} +``` -![Plant seedling](../fig/plant-seedling-grayscale.png) +![](fig/plant-seedling-grayscale.png){alt='Plant seedling'} Again, we use the `iio.imread()` function to load our image. Then, we convert the grayscale image of integer dtype, with 0-255 range, into @@ -81,11 +79,10 @@ We will keep working with images in the value range 0 to 1 in this lesson. We now use the function `np.histogram` to compute the histogram of our image which, after all, is a NumPy array: -~~~ +```python # create the histogram histogram, bin_edges = np.histogram(plant_seedling, bins=256, range=(0, 1)) -~~~ -{: .language-python} +``` The parameter `bins` determines the number of "bins" to use for the histogram. We pass in `256` because we want to see the pixel count for each of @@ -113,7 +110,7 @@ so it has one more element, than the histogram. Next, we turn our attention to displaying the histogram, by taking advantage of the plotting facilities of the `matplotlib` library. -~~~ +```python # configure and draw the histogram figure plt.figure() plt.title("Grayscale Histogram") @@ -122,9 +119,7 @@ plt.ylabel("pixel count") plt.xlim([0.0, 1.0]) # <- named arguments do not work here plt.plot(bin_edges[0:-1], histogram) # <- or here -~~~ -{: .language-python} - +``` We create the plot with `plt.figure()`, then label the figure and the coordinate axes with `plt.title()`, @@ -133,16 +128,20 @@ The last step in the preparation of the figure is to set the limits on the values on the x-axis with the `plt.xlim([0.0, 1.0])` function call. -> ## Variable-length argument lists -> -> Note that we cannot used named parameters for the -> `plt.xlim()` or `plt.plot()` functions. -> This is because these functions are defined to take an arbitrary number of -> *unnamed* arguments. -> The designers wrote the functions this way because they are very versatile, -> and creating named parameters for all of the possible ways to use them -> would be complicated. -{: .callout} +::::::::::::::::::::::::::::::::::::::::: callout + +## Variable-length argument lists + +Note that we cannot used named parameters for the +`plt.xlim()` or `plt.plot()` functions. +This is because these functions are defined to take an arbitrary number of +*unnamed* arguments. +The designers wrote the functions this way because they are very versatile, +and creating named parameters for all of the possible ways to use them +would be complicated. + + +:::::::::::::::::::::::::::::::::::::::::::::::::: Finally, we create the histogram plot itself with `plt.plot(bin_edges[0:-1], histogram)`. @@ -152,101 +151,108 @@ indexing the `bin_edges` array to ignore the last value When we run the program on this image of a plant seedling, it produces this histogram: -![Plant seedling histogram](../fig/plant-seedling-grayscale-histogram.png) - -> ## Histograms in matplotlib -> -> Matplotlib provides a dedicated function to compute and display histograms: -> `plt.hist()`. -> We will not use it in this lesson in order to understand how to -> calculate histograms in more detail. -> In practice, it is a good idea to use this function, -> because it visualises histograms more appropriately than `plt.plot()`. -> Here, you could use it by calling -> `plt.hist(image.flatten(), bins=256, range=(0, 1))` -> instead of -> `np.histogram()` and `plt.plot()` -> (`*.flatten()` is a numpy function that converts our two-dimensional -> image into a one-dimensional array). -> -{: .callout} - -> ## Using a mask for a histogram (15 min) -> -> Looking at the histogram above, -> you will notice that there is a large number of very dark pixels, -> as indicated in the chart by the spike around the grayscale value 0.12. -> That is not so surprising, since the original image is mostly black background. -> What if we want to focus more closely on the leaf of the seedling? -> That is where a mask enters the picture! -> -> First, hover over the plant seedling image with your mouse to determine the -> *(x, y)* coordinates of a bounding box around the leaf of the seedling. -> Then, using techniques from -> [the _Drawing and Bitwise Operations_ episode]({{ page.root }}{% link _episodes/04-drawing.md %}), -> create a mask with a white rectangle covering that bounding box. -> -> After you have created the mask, apply it to the input image before passing -> it to the `np.histogram` function. -> -> > ## Solution -> > ~~~ -> > -> > # read the image as grayscale from the outset -> > plant_seedling = iio.imread(uri="data/plant-seedling.jpg", mode="L") -> > -> > # display the image -> > fig, ax = plt.subplots() -> > plt.imshow(plant_seedling, cmap="gray") -> > -> > # create mask here, using np.zeros() and skimage.draw.rectangle() -> > mask = np.zeros(shape=plant_seedling.shape, dtype="bool") -> > rr, cc = skimage.draw.rectangle(start=(199, 410), end=(384, 485)) -> > mask[rr, cc] = True -> > -> > # display the mask -> > fig, ax = plt.subplots() -> > plt.imshow(mask, cmap="gray") -> > -> > # mask the image and create the new histogram -> > histogram, bin_edges = np.histogram(plant_seedling[mask], bins=256, range=(0.0, 1.0)) -> > -> > # configure and draw the histogram figure -> > plt.figure() -> > -> > plt.title("Grayscale Histogram") -> > plt.xlabel("grayscale value") -> > plt.ylabel("pixel count") -> > plt.xlim([0.0, 1.0]) -> > plt.plot(bin_edges[0:-1], histogram) -> > -> > ~~~ -> > {: .language-python} -> > -> > Your histogram of the masked area should look something like this: -> > -> > ![Grayscale histogram of masked area](../fig/plant-seedling-grayscale-histogram-mask.png) -> {: .solution} -> -{: .challenge} +![](fig/plant-seedling-grayscale-histogram.png){alt='Plant seedling histogram'} + +::::::::::::::::::::::::::::::::::::::::: callout + +## Histograms in matplotlib + +Matplotlib provides a dedicated function to compute and display histograms: +`plt.hist()`. +We will not use it in this lesson in order to understand how to +calculate histograms in more detail. +In practice, it is a good idea to use this function, +because it visualises histograms more appropriately than `plt.plot()`. +Here, you could use it by calling +`plt.hist(image.flatten(), bins=256, range=(0, 1))` +instead of +`np.histogram()` and `plt.plot()` +(`*.flatten()` is a numpy function that converts our two-dimensional +image into a one-dimensional array). + +:::::::::::::::::::::::::::::::::::::::::::::::::: + +::::::::::::::::::::::::::::::::::::::: challenge + +## Using a mask for a histogram (15 min) + +Looking at the histogram above, +you will notice that there is a large number of very dark pixels, +as indicated in the chart by the spike around the grayscale value 0.12. +That is not so surprising, since the original image is mostly black background. +What if we want to focus more closely on the leaf of the seedling? +That is where a mask enters the picture! + +First, hover over the plant seedling image with your mouse to determine the +*(x, y)* coordinates of a bounding box around the leaf of the seedling. +Then, using techniques from +[the *Drawing and Bitwise Operations* episode](04-drawing.md), +create a mask with a white rectangle covering that bounding box. + +After you have created the mask, apply it to the input image before passing +it to the `np.histogram` function. + +::::::::::::::: solution + +## Solution + +```python + +# read the image as grayscale from the outset +plant_seedling = iio.imread(uri="data/plant-seedling.jpg", mode="L") + +# display the image +fig, ax = plt.subplots() +plt.imshow(plant_seedling, cmap="gray") + +# create mask here, using np.zeros() and skimage.draw.rectangle() +mask = np.zeros(shape=plant_seedling.shape, dtype="bool") +rr, cc = skimage.draw.rectangle(start=(199, 410), end=(384, 485)) +mask[rr, cc] = True + +# display the mask +fig, ax = plt.subplots() +plt.imshow(mask, cmap="gray") + +# mask the image and create the new histogram +histogram, bin_edges = np.histogram(plant_seedling[mask], bins=256, range=(0.0, 1.0)) + +# configure and draw the histogram figure +plt.figure() + +plt.title("Grayscale Histogram") +plt.xlabel("grayscale value") +plt.ylabel("pixel count") +plt.xlim([0.0, 1.0]) +plt.plot(bin_edges[0:-1], histogram) + +``` + +Your histogram of the masked area should look something like this: + +![](fig/plant-seedling-grayscale-histogram-mask.png){alt='Grayscale histogram of masked area'} + + +::::::::::::::::::::::::: + +:::::::::::::::::::::::::::::::::::::::::::::::::: ## Colour Histograms We can also create histograms for full colour images, in addition to grayscale histograms. We have seen colour histograms before, -in [the _Image Basics_ episode]({{ page.root }}{% link _episodes/02-image-basics.md %}). +in [the *Image Basics* episode](02-image-basics.md). A program to create colour histograms starts in a familiar way: -~~~ +```python # read original image, in full color plant_seedling = iio.imread(uri="data/plant-seedling.jpg") # display the image fig, ax = plt.subplots() plt.imshow(plant_seedling) -~~~ -{: .language-python} +``` We read the original image, now in full colour, and display it. @@ -256,7 +262,7 @@ We obtain the individual channels, by slicing the image along the last axis. For example, we can obtain the red colour channel by calling `r_chan = image[:, :, 0]`. -~~~ +```python # tuple to select colors of each channel line colors = ("red", "green", "blue") @@ -273,9 +279,7 @@ for channel_id, color in enumerate(colors): plt.title("Color Histogram") plt.xlabel("Color value") plt.ylabel("Pixel count") -~~~ -{: .language-python} - +``` We will draw the histogram line for each channel in a different colour, and so we create a tuple of the colours to use for the three lines with the @@ -293,38 +297,39 @@ so we will take a moment to discuss what is happening in the `for` statement. The Python built-in `enumerate()` function takes a list and returns an *iterator* of *tuples*, where the first element of the tuple is the index and the second element is the element of the list. -> ## Iterators, tuples, and `enumerate()` -> -> In Python, an *iterator*, or an *iterable object*, is -> something that can be iterated over with the `for` control structure. -> A *tuple* is a sequence of objects, just like a list. -> However, a tuple cannot be changed, -> and a tuple is indicated by parentheses instead of square brackets. -> The `enumerate()` function takes an iterable object, -> and returns an iterator of tuples consisting of -> the 0-based index and the corresponding object. -> -> For example, consider this small Python program: -> -> ~~~ -> list = ("a", "b", "c", "d", "e") -> -> for x in enumerate(list): -> print(x) -> ~~~ -> {: .language-python} -> -> Executing this program would produce the following output: -> -> ~~~ -> (0, 'a') -> (1, 'b') -> (2, 'c') -> (3, 'd') -> (4, 'e') -> ~~~ -> {: .output} -{: .callout} +::::::::::::::::::::::::::::::::::::::::: callout + +## Iterators, tuples, and `enumerate()` + +In Python, an *iterator*, or an *iterable object*, is +something that can be iterated over with the `for` control structure. +A *tuple* is a sequence of objects, just like a list. +However, a tuple cannot be changed, +and a tuple is indicated by parentheses instead of square brackets. +The `enumerate()` function takes an iterable object, +and returns an iterator of tuples consisting of +the 0-based index and the corresponding object. + +For example, consider this small Python program: + +```python +list = ("a", "b", "c", "d", "e") + +for x in enumerate(list): + print(x) +``` + +Executing this program would produce the following output: + +```output +(0, 'a') +(1, 'b') +(2, 'c') +(3, 'd') +(4, 'e') +``` + +:::::::::::::::::::::::::::::::::::::::::::::::::: In our colour histogram program, we are using a tuple, `(channel_id, color)`, as the `for` variable. @@ -350,84 +355,100 @@ Note the use of our loop variables, `channel_id` and `c`. Finally we label our axes and display the histogram, shown here: -![Colour histogram](../fig/plant-seedling-colour-histogram.png) - -> ## Colour histogram with a mask (25 min) -> -> We can also apply a mask to the images we apply the colour histogram process to, -> in the same way we did for grayscale histograms. -> Consider this image of a well plate, -> where various chemical sensors have been applied to water and -> various concentrations of hydrochloric acid and sodium hydroxide: -> -> ~~~ -> # read the image -> wellplate = iio.imread(uri="data/wellplate-02.tif") -> -> # display the image -> fig, ax = plt.subplots() -> plt.imshow(wellplate) -> ~~~ -> {: .language-python} -> ![Well plate image](../fig/wellplate-02.jpg) -> -> Suppose we are interested in the colour histogram of one of the sensors in the -> well plate image, -> specifically, the seventh well from the left in the topmost row, -> which shows Erythrosin B reacting with water. -> -> Hover over the image with your mouse to find the centre of that well -> and the radius (in pixels) of the well. -> Then create a circular mask to select only the desired well. -> Then, use that mask to apply the colour histogram operation to that well. -> -> Your masked image should look like this: -> -> ![Masked well plate](../fig/wellplate-02-masked.jpg) -> -> And, the program should produce a colour histogram that looks like this: -> -> ![Well plate histogram](../fig/wellplate-02-histogram.png) -> -> > ## Solution -> > -> > ~~~ -> > # create a circular mask to select the 7th well in the first row -> > mask = np.zeros(shape=wellplate.shape[0:2], dtype="bool") -> > circle = skimage.draw.disk(center=(240, 1053), radius=49, shape=wellplate.shape[0:2]) -> > mask[circle] = 1 -> > -> > # just for display: -> > # make a copy of the image, call it masked_image, and -> > # use np.logical_not() and indexing to apply the mask to it -> > masked_img = wellplate[:] -> > masked_img[np.logical_not(mask)] = 0 -> > -> > # create a new figure and display masked_img, to verify the -> > # validity of your mask -> > fig, ax = plt.subplots() -> > plt.imshow(masked_img) -> > -> > # list to select colors of each channel line -> > colors = ("red", "green", "blue") -> > -> > # create the histogram plot, with three lines, one for -> > # each color -> > plt.figure() -> > plt.xlim([0, 256]) -> > for (channel_id, color) in enumerate(colors): -> > # use your circular mask to apply the histogram -> > # operation to the 7th well of the first row -> > histogram, bin_edges = np.histogram( -> > wellplate[:, :, channel_id][mask], bins=256, range=(0, 256) -> > ) -> > -> > plt.plot(histogram, color=color) -> > -> > plt.xlabel("color value") -> > plt.ylabel("pixel count") -> > -> > ~~~ -> > {: .language-python} -> {: .solution} -{: .challenge} +![](fig/plant-seedling-colour-histogram.png){alt='Colour histogram'} + +::::::::::::::::::::::::::::::::::::::: challenge + +## Colour histogram with a mask (25 min) + +We can also apply a mask to the images we apply the colour histogram process to, +in the same way we did for grayscale histograms. +Consider this image of a well plate, +where various chemical sensors have been applied to water and +various concentrations of hydrochloric acid and sodium hydroxide: + +```python +# read the image +wellplate = iio.imread(uri="data/wellplate-02.tif") + +# display the image +fig, ax = plt.subplots() +plt.imshow(wellplate) +``` + +![](fig/wellplate-02.jpg){alt='Well plate image'} + +Suppose we are interested in the colour histogram of one of the sensors in the +well plate image, +specifically, the seventh well from the left in the topmost row, +which shows Erythrosin B reacting with water. + +Hover over the image with your mouse to find the centre of that well +and the radius (in pixels) of the well. +Then create a circular mask to select only the desired well. +Then, use that mask to apply the colour histogram operation to that well. + +Your masked image should look like this: + +![](fig/wellplate-02-masked.jpg){alt='Masked well plate'} + +And, the program should produce a colour histogram that looks like this: + +![](fig/wellplate-02-histogram.png){alt='Well plate histogram'} + +::::::::::::::: solution + +## Solution + +```python +# create a circular mask to select the 7th well in the first row +mask = np.zeros(shape=wellplate.shape[0:2], dtype="bool") +circle = skimage.draw.disk(center=(240, 1053), radius=49, shape=wellplate.shape[0:2]) +mask[circle] = 1 + +# just for display: +# make a copy of the image, call it masked_image, and +# use np.logical_not() and indexing to apply the mask to it +masked_img = wellplate[:] +masked_img[np.logical_not(mask)] = 0 + +# create a new figure and display masked_img, to verify the +# validity of your mask +fig, ax = plt.subplots() +plt.imshow(masked_img) + +# list to select colors of each channel line +colors = ("red", "green", "blue") + +# create the histogram plot, with three lines, one for +# each color +plt.figure() +plt.xlim([0, 256]) +for (channel_id, color) in enumerate(colors): + # use your circular mask to apply the histogram + # operation to the 7th well of the first row + histogram, bin_edges = np.histogram( + wellplate[:, :, channel_id][mask], bins=256, range=(0, 256) + ) + + plt.plot(histogram, color=color) + +plt.xlabel("color value") +plt.ylabel("pixel count") + +``` + +::::::::::::::::::::::::: + +:::::::::::::::::::::::::::::::::::::::::::::::::: + +:::::::::::::::::::::::::::::::::::::::: keypoints + +- In many cases, we can load images in grayscale by passing the `mode="L"` argument to the `iio.imread()` function. +- We can create histograms of images with the `np.histogram` function. +- We can separate the RGB channels of an image using slicing operations. +- We can display histograms using the `matplotlib pyplot` `figure()`, `title()`, `xlabel()`, `ylabel()`, `xlim()`, `plot()`, and `show()` functions. + +:::::::::::::::::::::::::::::::::::::::::::::::::: + + diff --git a/episodes/06-blurring.md b/episodes/06-blurring.md index 53141a042..2812dfe26 100644 --- a/episodes/06-blurring.md +++ b/episodes/06-blurring.md @@ -1,30 +1,29 @@ --- -title: "Blurring Images" +title: Blurring Images teaching: 35 exercises: 25 -questions: -- "How can we apply a low-pass blurring filter to an image?" -objectives: -- "Explain why applying a low-pass blurring filter to an image is beneficial." -- "Apply a Gaussian blur filter to an image using skimage." -keypoints: -- "Applying a low-pass blurring filter smooths edges and removes noise from -an image." -- "Blurring is often used as a first step before we perform -thresholding or edge detection." -- "The Gaussian blur can be applied to an image with the -`skimage.filters.gaussian()` function." -- "Larger sigma values may remove more noise, but they will also remove detail -from an image." --- +::::::::::::::::::::::::::::::::::::::: objectives + +- Explain why applying a low-pass blurring filter to an image is beneficial. +- Apply a Gaussian blur filter to an image using skimage. + +:::::::::::::::::::::::::::::::::::::::::::::::::: + +:::::::::::::::::::::::::::::::::::::::: questions + +- How can we apply a low-pass blurring filter to an image? + +:::::::::::::::::::::::::::::::::::::::::::::::::: + In this episode, we will learn how to use skimage functions to blur images. When processing an image, we are often interested in identifying objects represented within it so that we can perform some further analysis of these objects e.g. by counting them, measuring their sizes, etc. An important concept associated with the identification of objects in an image -is that of _edges_: the lines that represent a transition from one group of +is that of *edges*: the lines that represent a transition from one group of similar pixels in the image to another different group. One example of an edge is the pixels that represent the boundaries of an object in an image, @@ -35,71 +34,74 @@ we make the colour transition from one side of an edge in the image to another smooth rather than sudden. The effect is to average out rapid changes in pixel intensity. A blur is a very common operation we need to perform before other tasks such as -[thresholding]({{ page.root }}{% link _episodes/07-thresholding.md %}). +[thresholding](07-thresholding.md). There are several different blurring functions in the `skimage.filters` module, so we will focus on just one here, the *Gaussian blur*. +::::::::::::::::::::::::::::::::::::::::: callout + +## Filters + +In the day-to-day, macroscopic world, +we have physical filters which separate out objects by size. +A filter with small holes allows only small objects through, +leaving larger objects behind. +This is a good analogy for image filters. +A high-pass filter will retain the smaller details in an image, +filtering out the larger ones. +A low-pass filter retains the larger features, +analogous to what's left behind by a physical filter mesh. +*High-* and \*low-\*pass, here, +refer to high and low *spatial frequencies* in the image. +Details associated with high spatial frequencies are small, +a lot of these features would fit across an image. +Features associated with low spatial frequencies are large - +maybe a couple of big features per image. + +:::::::::::::::::::::::::::::::::::::::::::::::::: + +::::::::::::::::::::::::::::::::::::::::: callout + +## Blurring + +Blurring is to make something less clear or distinct. +This could be interpreted quite broadly in the context of image analysis - +anything that reduces or distorts the detail of an image might apply. +Applying a low pass filter, which removes detail occurring at high spatial frequencies, +is perceived as a blurring effect. +A Gaussian blur is a filter that makes use of a Gaussian kernel. + +:::::::::::::::::::::::::::::::::::::::::::::::::: + +::::::::::::::::::::::::::::::::::::::::: callout -> ## Filters -> -> In the day-to-day, macroscopic world, -> we have physical filters which separate out objects by size. -> A filter with small holes allows only small objects through, -> leaving larger objects behind. -> This is a good analogy for image filters. -> A high-pass filter will retain the smaller details in an image, -> filtering out the larger ones. -> A low-pass filter retains the larger features, -> analogous to what's left behind by a physical filter mesh. -> *High-* and *low-*pass, here, -> refer to high and low *spatial frequencies* in the image. -> Details associated with high spatial frequencies are small, -> a lot of these features would fit across an image. -> Features associated with low spatial frequencies are large - -> maybe a couple of big features per image. -> -{: .callout} - - -> ## Blurring -> -> Blurring is to make something less clear or distinct. -> This could be interpreted quite broadly in the context of image analysis - -> anything that reduces or distorts the detail of an image might apply. -> Applying a low pass filter, which removes detail occurring at high spatial frequencies, -> is perceived as a blurring effect. -> A Gaussian blur is a filter that makes use of a Gaussian kernel. -> -{: .callout} - - -> ## Kernels -> -> A kernel can be used to implement a filter on an image. -> A kernel, in this context, -> is a small matrix which is combined with the image using -> a mathematical technique: *convolution*. -> Different sizes, shapes and contents of kernel produce different effects. -> The kernel can be thought of as a little image in itself, -> and will favour features of a similar size and shape in the main image. -> On convolution with an image, a big, blobby kernel will retain -> big, blobby, low spatial frequency features. -> -{: .callout} +## Kernels + +A kernel can be used to implement a filter on an image. +A kernel, in this context, +is a small matrix which is combined with the image using +a mathematical technique: *convolution*. +Different sizes, shapes and contents of kernel produce different effects. +The kernel can be thought of as a little image in itself, +and will favour features of a similar size and shape in the main image. +On convolution with an image, a big, blobby kernel will retain +big, blobby, low spatial frequency features. + +:::::::::::::::::::::::::::::::::::::::::::::::::: ## Gaussian blur Consider this image of a cat, in particular the area of the image outlined by the white square. -![Cat image](../fig/cat.jpg) +![](fig/cat.jpg){alt='Cat image'} Now, zoom in on the area of the cat's eye, as shown in the left-hand image below. When we apply a filter, we consider each pixel in the image, one at a time. In this example, the pixel we are currently working on is highlighted in red, as shown in the right-hand image. -![Cat eye pixels](../fig/cat-eye-pixels.jpg) +![](fig/cat-eye-pixels.jpg){alt='Cat eye pixels'} When we apply a filter, we consider rectangular groups of pixels surrounding each pixel in the image, in turn. @@ -116,22 +118,20 @@ weighted by the values in the kernel. In a Gaussian blur, the pixels nearest the centre of the kernel are given more weight than those far away from the centre. The rate at which this weight diminishes is determined by a Gaussian function, hence the name -Gaussian blur. +Gaussian blur. A Gaussian function maps random variables into a normal distribution or "Bell Curve". -![Gaussian function](../fig/Normal_Distribution_PDF.svg) - -| *https://en.wikipedia.org/wiki/Gaussian_function#/media/File:Normal_Distribution_PDF.svg* | - +![](fig/Normal_Distribution_PDF.svg){alt='Gaussian function'} +| *[https://en.wikipedia.org/wiki/Gaussian\_function#/media/File:Normal\_Distribution\_PDF.svg](https://en.wikipedia.org/wiki/Gaussian_function#/media/File:Normal_Distribution_PDF.svg)* | -The shape of the function is described by a mean value μ, and a variance value σ². The mean determines the central point of the bell curve on the x axis, and the variance describes the spread of the curve. +The shape of the function is described by a mean value μ, and a variance value σ². The mean determines the central point of the bell curve on the x axis, and the variance describes the spread of the curve. -In fact, when using Gaussian functions in Gaussian blurring, we use a 2D Gaussian function to account for X and Y dimensions, but the same rules apply. The mean μ is always 0, and represents the middle of the 2D kernel. Increasing values of σ² in either dimension increases the amount of blurring in that dimension. +In fact, when using Gaussian functions in Gaussian blurring, we use a 2D Gaussian function to account for X and Y dimensions, but the same rules apply. The mean μ is always 0, and represents the middle of the 2D kernel. Increasing values of σ² in either dimension increases the amount of blurring in that dimension. -![2D Gaussian function](../fig/Gaussian_2D.png) +![](fig/Gaussian_2D.png){alt='2D Gaussian function'} -| *https://commons.wikimedia.org/wiki/File:Gaussian_2D.png* | +| *[https://commons.wikimedia.org/wiki/File:Gaussian\_2D.png](https://commons.wikimedia.org/wiki/File:Gaussian_2D.png)* | The averaging is done on a channel-by-channel basis, and the average channel values become the new value for the pixel in @@ -142,7 +142,7 @@ that a larger kernel will blur the image more than a smaller kernel. To get an idea of how this works, consider this plot of the two-dimensional Gaussian function: -![2D Gaussian function](../fig/gaussian-kernel.png) +![](fig/gaussian-kernel.png){alt='2D Gaussian function'} Imagine that plot laid over the kernel for the Gaussian blur filter. The height of the plot corresponds to the weight given to the underlying pixel @@ -160,7 +160,7 @@ To illustrate the blur process, consider the blue channel colour values from the seven-by-seven region of the cat image above: -![Image corner pixels](../fig/cat-corner-blue.png) +![](fig/cat-corner-blue.png){alt='Image corner pixels'} The filter is going to determine the new blue channel value for the centre pixel -- the one that currently has the value 86. The filter calculates a @@ -168,7 +168,7 @@ weighted average of all the blue channel values in the kernel giving higher weight to the pixels near the centre of the kernel. -![Image multiplication](../fig/combination.png) +![](fig/combination.png){alt='Image multiplication'} This weighted average, the sum of the multiplications, becomes the new value for the centre pixel (3, 3). @@ -176,72 +176,71 @@ The same process would be used to determine the green and red channel values, and then the kernel would be moved over to apply the filter to the next pixel in the image. -> ## Image edges -> -> Something different needs to happen for pixels near the outer limits of the image, -> since the kernel for the filter may be partially off the image. -> For example, what happens when the filter is applied to -> the upper-left pixel of the image? -> Here are the blue channel pixel values for the upper-left pixel of the cat image, -> again assuming a seven-by-seven kernel: -> -> ~~~ -> x x x x x x x -> x x x x x x x -> x x x x x x x -> x x x 4 5 9 2 -> x x x 5 3 6 7 -> x x x 6 5 7 8 -> x x x 5 4 5 3 -> ~~~ -> {: .output} -> -> The upper-left pixel is the one with value 4. -> Since the pixel is at the upper-left corner, -> there are no pixels underneath much of the kernel; -> here, this is represented by x's. -> So, what does the filter do in that situation? -> -> The default mode is to fill in the *nearest* pixel value from the image. -> For each of the missing x's the image value closest to the x is used. -> If we fill in a few of the missing pixels, you will see how this works: -> -> ~~~ -> x x x 4 x x x -> x x x 4 x x x -> x x x 4 x x x -> 4 4 4 4 5 9 2 -> x x x 5 3 6 7 -> x x x 6 5 7 8 -> x x x 5 4 5 3 -> ~~~ -> {: .output} -> -> Another strategy to fill those missing values is -> to *reflect* the pixels that are in the image to fill in for the pixels that -> are missing from the kernel. -> -> ~~~ -> x x x 5 x x x -> x x x 6 x x x -> x x x 5 x x x -> 2 9 5 4 5 9 2 -> x x x 5 3 6 7 -> x x x 6 5 7 8 -> x x x 5 4 5 3 -> ~~~ -> {: .output} -> -> A similar process would be used to fill in all of the other missing pixels from -> the kernel. Other *border modes* are available; you can learn more about them -> in [the skimage documentation](https://scikit-image.org/docs/dev/user_guide). -> -{: .callout} +::::::::::::::::::::::::::::::::::::::::: callout + +## Image edges + +Something different needs to happen for pixels near the outer limits of the image, +since the kernel for the filter may be partially off the image. +For example, what happens when the filter is applied to +the upper-left pixel of the image? +Here are the blue channel pixel values for the upper-left pixel of the cat image, +again assuming a seven-by-seven kernel: + +```output + x x x x x x x + x x x x x x x + x x x x x x x + x x x 4 5 9 2 + x x x 5 3 6 7 + x x x 6 5 7 8 + x x x 5 4 5 3 +``` + +The upper-left pixel is the one with value 4. +Since the pixel is at the upper-left corner, +there are no pixels underneath much of the kernel; +here, this is represented by x's. +So, what does the filter do in that situation? + +The default mode is to fill in the *nearest* pixel value from the image. +For each of the missing x's the image value closest to the x is used. +If we fill in a few of the missing pixels, you will see how this works: + +```output + x x x 4 x x x + x x x 4 x x x + x x x 4 x x x + 4 4 4 4 5 9 2 + x x x 5 3 6 7 + x x x 6 5 7 8 + x x x 5 4 5 3 +``` + +Another strategy to fill those missing values is +to *reflect* the pixels that are in the image to fill in for the pixels that +are missing from the kernel. + +```output + x x x 5 x x x + x x x 6 x x x + x x x 5 x x x + 2 9 5 4 5 9 2 + x x x 5 3 6 7 + x x x 6 5 7 8 + x x x 5 4 5 3 +``` + +A similar process would be used to fill in all of the other missing pixels from +the kernel. Other *border modes* are available; you can learn more about them +in [the skimage documentation](https://scikit-image.org/docs/dev/user_guide). + +:::::::::::::::::::::::::::::::::::::::::::::::::: This animation shows how the blur kernel moves along in the original image in order to calculate the colour channel values for the blurred image. -![Blur demo animation](../fig/blur-demo.gif) +![](fig/blur-demo.gif){alt='Blur demo animation'} skimage has built-in functions to perform blurring for us, so we do not have to perform all of these mathematical operations ourselves. Let's work through @@ -249,36 +248,36 @@ an example of blurring an image with the skimage Gaussian blur function. First, import the packages needed for this episode -~~~ +```python import matplotlib.pyplot as plt import ipympl import imageio.v3 as iio import skimage import skimage.filters %matplotlib widget -~~~ -{: .language-python} +``` Then, we load the image, and display it: -~~~ + +```python image = iio.imread(uri="data/gaussian-original.png") # display the image fig, ax = plt.subplots() plt.imshow(image) -~~~ -{: .language-python} -![Original image](../data/gaussian-original.png) +``` + +![](data/gaussian-original.png){alt='Original image'} Next, we apply the gaussian blur: -~~~ + +```python sigma = 3.0 # apply Gaussian blur, creating a new image blurred = skimage.filters.gaussian( image, sigma=(sigma, sigma), truncate=3.5, channel_axis=-1) -~~~ -{: .language-python} +``` The first two arguments to `skimage.filters.gaussian()` are the image to blur, `image`, and a tuple defining the sigma to use in ry- and cx-direction, @@ -290,7 +289,7 @@ A Gaussian function is defined from -infinity to +infinity, but our kernel Therefore, we must choose a certain distance from the centre of the function where we stop this approximation, and set the final size of our kernel. In the above example, we set `truncate` to 3.5, -which means the kernel size will be 2 * sigma * 3.5. +which means the kernel size will be 2 \* sigma \* 3.5. For example, for a `sigma` of 1.0 the resulting kernel size would be 7, while for a `sigma` of 2.0 the kernel size would be 14. The default value for `truncate` in scikit-image is 4.0. @@ -302,84 +301,94 @@ recall that, in Python, the `-1` index refers to the last position. In this case, the last dimension is the third dimension (index `2`), since our image has three dimensions: -~~~ +```python print(image.ndim) -~~~ -{: .language-python} -~~~ +``` + +```output 3 -~~~ -{: .output } +``` Finally, we display the blurred image: -~~~ +```python +# display blurred image +fig, ax = plt.subplots() +plt.imshow(blurred) +``` + +![](fig/gaussian-blurred.png){alt='Original image'} + +::::::::::::::::::::::::::::::::::::::: challenge + +## Experimenting with sigma values (10 min) + +The size and shape of the kernel used to blur an image can have a +significant effect on the result of the blurring and any downstream analysis +carried out on the blurred image. +The next two exercises ask you to experiment with the sigma values of the kernel, +which is a good way to develop your understanding of how the choice of kernel +can influence the result of blurring. + +First, try running the code above with a range of smaller and larger sigma values. +Generally speaking, what effect does the sigma value have on the +blurred image? + +::::::::::::::: solution + +## Solution + +Generally speaking, the larger the sigma value, the more blurry the result. +A larger sigma will tend to get rid of more noise in the image, which will +help for other operations we will cover soon, such as thresholding. +However, a larger sigma also tends to eliminate some of the detail from +the image. So, we must strike a balance with the sigma value used for +blur filters. + + + +::::::::::::::::::::::::: + +:::::::::::::::::::::::::::::::::::::::::::::::::: + +::::::::::::::::::::::::::::::::::::::: challenge + +## Experimenting with kernel shape (10 min - optional, not included in timing) + +Now, what is the effect of applying an asymmetric kernel to blurring an image? +Try running the code above with different sigmas in the ry and cx direction. +For example, a sigma of 1.0 in the ry direction, and 6.0 in the cx direction. + +::::::::::::::: solution + +## Solution + +```python +# apply Gaussian blur, with a sigma of 1.0 in the ry direction, and 6.0 in the cx direction +blurred = skimage.filters.gaussian( + image, sigma=(1.0, 6.0), truncate=3.5, channel_axis=-1 +) + # display blurred image fig, ax = plt.subplots() plt.imshow(blurred) -~~~ -{: .language-python} -![Original image](../fig/gaussian-blurred.png) - -> ## Experimenting with sigma values (10 min) -> -> The size and shape of the kernel used to blur an image can have a -> significant effect on the result of the blurring and any downstream analysis -> carried out on the blurred image. -> The next two exercises ask you to experiment with the sigma values of the kernel, -> which is a good way to develop your understanding of how the choice of kernel -> can influence the result of blurring. -> -> First, try running the code above with a range of smaller and larger sigma values. -> Generally speaking, what effect does the sigma value have on the -> blurred image? -> -> > ## Solution -> > -> > Generally speaking, the larger the sigma value, the more blurry the result. -> > A larger sigma will tend to get rid of more noise in the image, which will -> > help for other operations we will cover soon, such as thresholding. -> > However, a larger sigma also tends to eliminate some of the detail from -> > the image. So, we must strike a balance with the sigma value used for -> > blur filters. -> {: .solution} -{: .challenge} - -> ## Experimenting with kernel shape (10 min - optional, not included in timing) -> -> Now, what is the effect of applying an asymmetric kernel to blurring an image? -> Try running the code above with different sigmas in the ry and cx direction. -> For example, a sigma of 1.0 in the ry direction, and 6.0 in the cx direction. -> -> > ## Solution -> > -> > ~~~ -> > # apply Gaussian blur, with a sigma of 1.0 in the ry direction, and 6.0 in the cx direction -> > blurred = skimage.filters.gaussian( -> > image, sigma=(1.0, 6.0), truncate=3.5, channel_axis=-1 -> > ) -> > -> > # display blurred image -> > fig, ax = plt.subplots() -> > plt.imshow(blurred) -> > ~~~ -> > {: .language-python} -> > -> > ![Rectangular kernel blurred image](../fig/rectangle-gaussian-blurred.png) -> > -> > These unequal sigma values produce a kernel that is rectangular instead of square. -> > The result is an image that is much more blurred in the x direction than the -> > y direction. -> > For most use cases, a uniform blurring effect is desirable and -> > this kind of asymmetric blurring should be avoided. -> > However, it can be helpful in specific circumstances e.g. when noise is present in -> > your image in a particular pattern or orientation, such as vertical lines, -> > or when you want to -> > [remove uniform noise without blurring edges present in the image in a particular orientation](https://www.researchgate.net/publication/228567435_An_edge_detection_algorithm_based_on_rectangular_Gaussian_kernels_for_machine_vision_applications). -> > -> > -> {: .solution} -{: .challenge} +``` + +![](fig/rectangle-gaussian-blurred.png){alt='Rectangular kernel blurred image'} + +These unequal sigma values produce a kernel that is rectangular instead of square. +The result is an image that is much more blurred in the x direction than the +y direction. +For most use cases, a uniform blurring effect is desirable and +this kind of asymmetric blurring should be avoided. +However, it can be helpful in specific circumstances e.g. when noise is present in +your image in a particular pattern or orientation, such as vertical lines, +or when you want to +[remove uniform noise without blurring edges present in the image in a particular orientation](https://www.researchgate.net/publication/228567435_An_edge_detection_algorithm_based_on_rectangular_Gaussian_kernels_for_machine_vision_applications). + +::::::::::::::::::::::::: + +:::::::::::::::::::::::::::::::::::::::::::::::::: ## Other methods of blurring @@ -389,3 +398,14 @@ For other kinds of noise, e.g. "salt and pepper", a median filter is typically used. See [the `skimage.filters` documentation](https://scikit-image.org/docs/dev/api/skimage.filters.html#module-skimage.filters) for a list of available filters. + +:::::::::::::::::::::::::::::::::::::::: keypoints + +- Applying a low-pass blurring filter smooths edges and removes noise from an image. +- Blurring is often used as a first step before we perform thresholding or edge detection. +- The Gaussian blur can be applied to an image with the `skimage.filters.gaussian()` function. +- Larger sigma values may remove more noise, but they will also remove detail from an image. + +:::::::::::::::::::::::::::::::::::::::::::::::::: + + diff --git a/episodes/07-thresholding.md b/episodes/07-thresholding.md index f841cc9fa..45564c397 100644 --- a/episodes/07-thresholding.md +++ b/episodes/07-thresholding.md @@ -1,33 +1,28 @@ --- -title: "Thresholding" +title: Thresholding teaching: 60 exercises: 50 -questions: -- "How can we use thresholding to produce a binary image?" -objectives: -- "Explain what thresholding is and how it can be used." -- "Use histograms to determine appropriate threshold values to use for the -thresholding process." -- "Apply simple, fixed-level binary thresholding to an image." -- "Explain the difference between using the operator `>` or the operator `<` to -threshold an image represented by a numpy array." -- "Describe the shape of a binary image produced by thresholding via `>` or `<`." -- "Explain when Otsu's method for automatic thresholding is appropriate." -- "Apply automatic thresholding to an image using Otsu's method." -- "Use the `np.count_nonzero()` function to count the number of non-zero pixels -in an image." -keypoints: -- "Thresholding produces a binary image, where all pixels with intensities -above (or below) a threshold value are turned on, while all other pixels are -turned off." -- "The binary images produced by thresholding are held in two-dimensional NumPy -arrays, since they have only one colour value channel. They are boolean, hence they contain -the values 0 (off) and 1 (on)." -- "Thresholding can be used to create masks that select only the interesting -parts of an image, or as the first step before -edge detection or finding contours." --- +::::::::::::::::::::::::::::::::::::::: objectives + +- Explain what thresholding is and how it can be used. +- Use histograms to determine appropriate threshold values to use for the thresholding process. +- Apply simple, fixed-level binary thresholding to an image. +- Explain the difference between using the operator `>` or the operator `<` to threshold an image represented by a numpy array. +- Describe the shape of a binary image produced by thresholding via `>` or `<`. +- Explain when Otsu's method for automatic thresholding is appropriate. +- Apply automatic thresholding to an image using Otsu's method. +- Use the `np.count_nonzero()` function to count the number of non-zero pixels in an image. + +:::::::::::::::::::::::::::::::::::::::::::::::::: + +:::::::::::::::::::::::::::::::::::::::: questions + +- How can we use thresholding to produce a binary image? + +:::::::::::::::::::::::::::::::::::::::::::::::::: + In this episode, we will learn how to use skimage functions to apply thresholding to an image. Thresholding is a type of *image segmentation*, @@ -39,7 +34,7 @@ we use thresholding as a way to select areas of interest of an image, while ignoring the parts we are not concerned with. We have already done some simple thresholding, in the "Manipulating pixels" section of -[the _Image Representation in skimage_ episode]({{ page.root }}{% link _episodes/03-skimage-images.md %}). +[the *Image Representation in skimage* episode](03-skimage-images.md). In that case, we used a simple NumPy array manipulation to separate the pixels belonging to the root system of a plant from the black background. In this episode, we will learn how to use skimage functions to perform thresholding. @@ -48,7 +43,7 @@ select the parts of an image we are interested in. ## First, import the packages needed for this episode -~~~ +```python import numpy as np import glob import matplotlib.pyplot as plt @@ -57,25 +52,23 @@ import imageio.v3 as iio import skimage.color import skimage.filters %matplotlib widget -~~~ -{: .language-python} +``` ## Simple thresholding Consider the image `data/shapes-01.jpg` with a series of crudely cut shapes set against a white background. -~~~ +```python # load the image shapes01 = iio.imread(uri="data/shapes-01.jpg") fig, ax = plt.subplots() plt.imshow(shapes01) -~~~ -{: .language-python} +``` -![Image with geometric shapes on white background](../data/shapes-01.jpg) -{: .image-with-shadow} +![](data/shapes-01.jpg) +{alt='Image with geometric shapes on white background' .image-with-shadow} Now suppose we want to select only the shapes from the image. In other words, we want to leave the pixels belonging to the shapes "on," @@ -89,9 +82,9 @@ we have to provide a threshold value `t`. The process works like this. First, we will load the original image, convert it to grayscale, -and de-noise it as in [the _Blurring Images_ episode]({{ page.root }}{% link _episodes/06-blurring.md %}). +and de-noise it as in [the *Blurring Images* episode](06-blurring.md). -~~~ +```python # convert the image to grayscale gray_shapes = skimage.color.rgb2gray(shapes01) @@ -100,18 +93,17 @@ blurred_shapes = skimage.filters.gaussian(gray_shapes, sigma=1.0) fig, ax = plt.subplots() plt.imshow(blurred_shapes, cmap="gray") -~~~ -{: .language-python} +``` -![Grayscale image of the geometric shapes](../fig/shapes-01-grayscale.png) -{: .image-with-shadow} +![](fig/shapes-01-grayscale.png) +{alt='Grayscale image of the geometric shapes' .image-with-shadow} Next, we would like to apply the threshold `t` such that pixels with grayscale values on one side of `t` will be turned "on", while pixels with grayscale values on the other side will be turned "off". How might we do that? Remember that grayscale images contain pixel values in the range from 0 to 1, -so we are looking for a threshold `t` in the closed range [0.0, 1.0]. +so we are looking for a threshold `t` in the closed range [0\.0, 1.0]. We see in the image that the geometric shapes are "darker" than the white background but there is also some light gray noise on the background. One way to determine a "good" value for `t` is @@ -120,9 +112,9 @@ and try to identify what grayscale ranges correspond to the shapes in the image or the background. The histogram for the shapes image shown above can be produced as in -[the _Creating Histograms_ episode]({{ page.root }}{% link _episodes/05-creating-histograms.md %}). +[the *Creating Histograms* episode](05-creating-histograms.md). -~~~ +```python # create a histogram of the blurred grayscale image histogram, bin_edges = np.histogram(blurred_shapes, bins=256, range=(0.0, 1.0)) @@ -132,10 +124,9 @@ plt.title("Grayscale Histogram") plt.xlabel("grayscale value") plt.ylabel("pixels") plt.xlim(0, 1.0) -~~~ -{: .language-python} +``` -![Grayscale histogram of the geometric shapes image](../fig/shapes-01-histogram.png) +![](fig/shapes-01-histogram.png){alt='Grayscale histogram of the geometric shapes image'} Since the image has a white background, most of the pixels in the image are white. @@ -159,136 +150,147 @@ where the `False` entries are shown as black pixels (0-valued) and the `True` entries are shown as white pixels (1-valued). -~~~ +```python # create a mask based on the threshold t = 0.8 binary_mask = blurred_shapes < t fig, ax = plt.subplots() plt.imshow(binary_mask, cmap="gray") -~~~ -{: .language-python} +``` -![Binary mask of the geometric shapes created by thresholding](../fig/shapes-01-mask.png) +![](fig/shapes-01-mask.png){alt='Binary mask of the geometric shapes created by thresholding'} You can see that the areas where the shapes were in the original area are now white, while the rest of the mask image is black. -> ## What makes a good threshold? -> -> As is often the case, the answer to this question is "it depends". -> In the example above, we could have just switched off all -> the white background pixels by choosing `t=1.0`, -> but this would leave us with some background noise in the mask image. -> On the other hand, if we choose too low a value for the threshold, -> we could lose some of the shapes that are too bright. -> You can experiment with the threshold by re-running the above code lines with -> different values for `t`. -> In practice, it is a matter of domain knowledge and -> experience to interpret the peaks in the histogram so to determine -> an appropriate threshold. -> The process often involves trial and error, -> which is a drawback of the simple thresholding method. -> Below we will introduce automatic thresholding, -> which uses a quantitative, mathematical definition for a good threshold that -> allows us to determine the value of `t` automatically. -> It is worth noting that the principle for simple and automatic thresholding -> can also be used for images with pixel ranges other than [0.0, 1.0]. -> For example, we could perform thresholding on pixel intensity values -> in the range [0, 255] as we have already seen in -> [the _Image Representation in skimage_ episode]({{ page.root}}{% link _episodes/03-skimage-images.md %}). -{: .callout} +::::::::::::::::::::::::::::::::::::::::: callout + +## What makes a good threshold? + +As is often the case, the answer to this question is "it depends". +In the example above, we could have just switched off all +the white background pixels by choosing `t=1.0`, +but this would leave us with some background noise in the mask image. +On the other hand, if we choose too low a value for the threshold, +we could lose some of the shapes that are too bright. +You can experiment with the threshold by re-running the above code lines with +different values for `t`. +In practice, it is a matter of domain knowledge and +experience to interpret the peaks in the histogram so to determine +an appropriate threshold. +The process often involves trial and error, +which is a drawback of the simple thresholding method. +Below we will introduce automatic thresholding, +which uses a quantitative, mathematical definition for a good threshold that +allows us to determine the value of `t` automatically. +It is worth noting that the principle for simple and automatic thresholding +can also be used for images with pixel ranges other than [0\.0, 1.0]. +For example, we could perform thresholding on pixel intensity values +in the range [0, 255] as we have already seen in +[the *Image Representation in skimage* episode](03-skimage-images.md). + + +:::::::::::::::::::::::::::::::::::::::::::::::::: We can now apply the `binary_mask` to the original coloured image as we -have learned in [the _Drawing and Bitwise Operations_ episode]({{page.root}}{% link _episodes/04-drawing.md %}). +have learned in [the *Drawing and Bitwise Operations* episode](04-drawing.md). What we are left with is only the coloured shapes from the original. -~~~ +```python # use the binary_mask to select the "interesting" part of the image selection = shapes01.copy() selection[~binary_mask] = 0 fig, ax = plt.subplots() plt.imshow(selection) -~~~ -{: .language-python} - -![Selected shapes after applying binary mask](../fig/shapes-01-selected.png) - -> ## More practice with simple thresholding (15 min) -> -> Now, it is your turn to practice. Suppose we want to use simple thresholding -> to select only the coloured shapes (in this particular case we consider grayish to be a colour, too) from the image `data/shapes-02.jpg`: -> -> ![Another image with geometric shapes on white background](../data/shapes-02.jpg) -> -> First, plot the grayscale histogram as in the [Creating -> Histogram]({{ page.root }}/05-creating-histograms/) episode and -> examine the distribution of grayscale values in the image. What do -> you think would be a good value for the threshold `t`? -> -> > ## Solution -> > -> > The histogram for the `data/shapes-02.jpg` image can be shown with -> > -> > ~~~ -> > gray_shapes = iio.imread(uri="data/shapes-02.jpg", mode="L") -> > histogram, bin_edges = np.histogram(gray_shapes, bins=256, range=(0.0, 1.0)) -> > -> > fig, ax = plt.subplots() -> > plt.plot(bin_edges[0:-1], histogram) -> > plt.title("Graylevel histogram") -> > plt.xlabel("gray value") -> > plt.ylabel("pixel count") -> > plt.xlim(0, 1.0) -> > ~~~ -> > {: .language-python} -> > -> > ![Grayscale histogram of the second geometric shapes image](../fig/shapes-02-histogram.png) -> > -> > We can see a large spike around 0.3, and a smaller spike around 0.7. The -> > spike near 0.3 represents the darker background, so it seems like a value -> > close to `t=0.5` would be a good choice. -> {: .solution} -> -> Next, create a mask to turn the pixels above the threshold `t` on -> and pixels below the threshold `t` off. Note that unlike the image -> with a white background we used above, here the peak for the -> background colour is at a lower gray level than the -> shapes. Therefore, change the comparison operator less `<` to -> greater `>` to create the appropriate mask. Then apply the mask to -> the image and view the thresholded image. If everything works as it -> should, your output should show only the coloured shapes on a black -> background. -> > ## Solution -> > -> > Here are the commands to create and view the binary mask -> > ~~~ -> > t = 0.5 -> > binary_mask = gray_shapes > t -> > -> > fig, ax = plt.subplots() -> > plt.imshow(binary_mask, cmap="gray") -> > ~~~ -> > {: .language-python} -> > -> > ![Binary mask created by thresholding the second geometric shapes image](../fig/shapes-02-mask.png) -> > -> > And here are the commands to apply the mask and view the thresholded image -> > ~~~ -> > shapes02 = iio.imread(uri="data/shapes-02.jpg") -> > selection = shapes02.copy() -> > selection[~binary_mask] = 0 -> > -> > fig, ax = plt.subplots() -> > plt.imshow(selection) -> > ~~~ -> > {: .language-python} -> > -> > ![Selected shapes after applying binary mask to the second geometric shapes image](../fig/shapes-02-selected.png) -> > -> {: .solution} -{: .challenge} +``` + +![](fig/shapes-01-selected.png){alt='Selected shapes after applying binary mask'} + +::::::::::::::::::::::::::::::::::::::: challenge + +## More practice with simple thresholding (15 min) + +Now, it is your turn to practice. Suppose we want to use simple thresholding +to select only the coloured shapes (in this particular case we consider grayish to be a colour, too) from the image `data/shapes-02.jpg`: + +![](data/shapes-02.jpg){alt='Another image with geometric shapes on white background'} + +First, plot the grayscale histogram as in the [Creating +Histogram](05-creating-histograms.md) episode and +examine the distribution of grayscale values in the image. What do +you think would be a good value for the threshold `t`? + +::::::::::::::: solution + +## Solution + +The histogram for the `data/shapes-02.jpg` image can be shown with + +```python +gray_shapes = iio.imread(uri="data/shapes-02.jpg", mode="L") +histogram, bin_edges = np.histogram(gray_shapes, bins=256, range=(0.0, 1.0)) + +fig, ax = plt.subplots() +plt.plot(bin_edges[0:-1], histogram) +plt.title("Graylevel histogram") +plt.xlabel("gray value") +plt.ylabel("pixel count") +plt.xlim(0, 1.0) +``` + +![](fig/shapes-02-histogram.png){alt='Grayscale histogram of the second geometric shapes image'} + +We can see a large spike around 0.3, and a smaller spike around 0.7. The +spike near 0.3 represents the darker background, so it seems like a value +close to `t=0.5` would be a good choice. + + +::::::::::::::::::::::::: + +Next, create a mask to turn the pixels above the threshold `t` on +and pixels below the threshold `t` off. Note that unlike the image +with a white background we used above, here the peak for the +background colour is at a lower gray level than the +shapes. Therefore, change the comparison operator less `<` to +greater `>` to create the appropriate mask. Then apply the mask to +the image and view the thresholded image. If everything works as it +should, your output should show only the coloured shapes on a black +background. + +::::::::::::::: solution + +## Solution + +Here are the commands to create and view the binary mask + +```python +t = 0.5 +binary_mask = gray_shapes > t + +fig, ax = plt.subplots() +plt.imshow(binary_mask, cmap="gray") +``` + +![](fig/shapes-02-mask.png){alt='Binary mask created by thresholding the second geometric shapes image'} + +And here are the commands to apply the mask and view the thresholded image + +```python +shapes02 = iio.imread(uri="data/shapes-02.jpg") +selection = shapes02.copy() +selection[~binary_mask] = 0 + +fig, ax = plt.subplots() +plt.imshow(selection) +``` + +![](fig/shapes-02-selected.png){alt='Selected shapes after applying binary mask to the second geometric shapes image'} + +::::::::::::::::::::::::: + +:::::::::::::::::::::::::::::::::::::::::::::::::: ## Automatic thresholding @@ -300,31 +302,34 @@ One such method is *[Otsu's method](https://en.wikipedia.org/wiki/Otsu%27s_metho It is particularly useful for situations where the grayscale histogram of an image has two peaks that correspond to background and objects of interest. -> ## Denoising an image before thresholding -> -> In practice, it is often necessary to denoise the image before -> thresholding, which can be done with one of the methods from -> [the _Blurring Images_ episode]({{ page.root }}{% link _episodes/06-blurring.md %}). -{: .callout} +::::::::::::::::::::::::::::::::::::::::: callout + +## Denoising an image before thresholding + +In practice, it is often necessary to denoise the image before +thresholding, which can be done with one of the methods from +[the *Blurring Images* episode](06-blurring.md). + + +:::::::::::::::::::::::::::::::::::::::::::::::::: Consider the image `data/maize-root-cluster.jpg` of a maize root system which we have seen before in -[the _Image Representation in skimage_ episode]({{ page.root }}{% link _episodes/03-skimage-images.md %}). +[the *Image Representation in skimage* episode](03-skimage-images.md). -~~~ +```python maize_roots = iio.imread(uri="data/maize-root-cluster.jpg") fig, ax = plt.subplots() plt.imshow(maize_roots) -~~~ -{: .language-python} +``` -![Image of a maize root](../data/maize-root-cluster.jpg) +![](data/maize-root-cluster.jpg){alt='Image of a maize root'} We use Gaussian blur with a sigma of 1.0 to denoise the root image. Let us look at the grayscale histogram of the denoised image. -~~~ +```python # convert the image to grayscale gray_image = skimage.color.rgb2gray(maize_roots) @@ -339,10 +344,9 @@ plt.title("Graylevel histogram") plt.xlabel("gray value") plt.ylabel("pixel count") plt.xlim(0, 1.0) -~~~ -{: .language-python} +``` -![Grayscale histogram of the maize root image](../fig/maize-root-cluster-histogram.png) +![](fig/maize-root-cluster-histogram.png){alt='Grayscale histogram of the maize root image'} The histogram has a significant peak around 0.2, and a second, smaller peak very near 1.0. @@ -358,17 +362,15 @@ the threshold automatically via Otsu's method. Then numpy comparison operators can be used to apply it as before. Here are the Python commands to determine the threshold `t` with Otsu's method. -~~~ +```python # perform automatic thresholding t = skimage.filters.threshold_otsu(blurred_image) print("Found automatic threshold t = {}.".format(t)) -~~~ -{: .language-python} +``` -~~~ +```output Found automatic threshold t = 0.4172454549881862. -~~~ -{: .output} +``` For this root image and a Gaussian blur with the chosen sigma of 1.0, the computed threshold value is 0.42. @@ -376,30 +378,28 @@ No we can create a binary mask with the comparison operator `>`. As we have seen before, pixels above the threshold value will be turned on, those below the threshold will be turned off. -~~~ +```python # create a binary mask with the threshold found by Otsu's method binary_mask = blurred_image > t fig, ax = plt.subplots() plt.imshow(binary_mask, cmap="gray") -~~~ -{: .language-python} +``` -![Binary mask of the maize root system](../fig/maize-root-cluster-mask.png) +![](fig/maize-root-cluster-mask.png){alt='Binary mask of the maize root system'} Finally, we use the mask to select the foreground: -~~~ +```python # apply the binary mask to select the foreground selection = maize_roots.copy() selection[~binary_mask] = 0 fig, ax = plt.subplots() plt.imshow(selection) -~~~ -{: .language-python} +``` -![Masked selection of the maize root system](../fig/maize-root-cluster-selected.png) +![](fig/maize-root-cluster-selected.png){alt='Masked selection of the maize root system'} ## Application: measuring root mass @@ -412,7 +412,7 @@ which you can find in the files `data/trial-216.jpg`, and `data/trial-293.jpg`. -![Four images of maize roots](../fig/four-maize-roots.jpg) +![](fig/four-maize-roots.jpg){alt='Four images of maize roots'} Suppose we are interested in the amount of plant material in each image, and in particular how that amount changes from image to image. @@ -425,15 +425,15 @@ We will first construct a Python program to measure this value for a single imag Our strategy will be this: 1. Read the image, converting it to grayscale as it is read. For this -application we do not need the colour image. + application we do not need the colour image. 2. Blur the image. 3. Use Otsu's method of thresholding to create a binary image, where -the pixels that were part of the maize plant are white, and everything -else is black. + the pixels that were part of the maize plant are white, and everything + else is black. 4. Save the binary image so it can be examined later. 5. Count the white pixels in the binary image, and divide by the -number of pixels in the image. This ratio will be a measure of the -root mass of the plant in the image. + number of pixels in the image. This ratio will be a measure of the + root mass of the plant in the image. 6. Output the name of the image processed and the root mass ratio. Our intent is to perform these steps and produce the numeric result - @@ -449,7 +449,7 @@ Almost all of the commands should be familiar, and in fact, it may seem simpler than the code we have worked on thus far, because we are not displaying any of the images. -~~~ +```python def measure_root_mass(filename, sigma=1.0): # read the original image, converting to grayscale on the fly @@ -469,8 +469,7 @@ def measure_root_mass(filename, sigma=1.0): density = rootPixels / (w * h) return density -~~~ -{: .language-python} +``` The function begins with reading the original image from the file `filename`. We use `iio.imread()` with the optional argument `mode="L"` to @@ -499,15 +498,13 @@ If no sigma value is provided, the default value 1.0 will be used. For example, for the file `data/trial-016.jpg` and a sigma value of 1.5, we would call the function like this: -~~~ +```python measure_root_mass(filename="data/trial-016.jpg", sigma=1.5) -~~~ -{: .language-python} +``` -~~~ +```output 0.0482436835106383` -~~~ -{: .output} +``` Now we can use the function to process the series of four images shown above. In a real-world scientific situation, there might be dozens, hundreds, @@ -518,212 +515,236 @@ The following code block assumes that the files are located in the same director and the filenames all start with the **trial-** prefix and end with the **.jpg** suffix. -~~~ +```python all_files = glob.glob("data/trial-*.jpg") for filename in all_files: density = measure_root_mass(filename=filename, sigma=1.5) # output in format suitable for .csv print(filename, density, sep=",") -~~~ -{: .language-python} +``` -~~~ +```output data/trial-016.jpg,0.0482436835106383 data/trial-020.jpg,0.06346941489361702 data/trial-216.jpg,0.14073969414893617 data/trial-293.jpg,0.13607895611702128 -~~~ -{: .output} - -> ## Ignoring more of the images -- brainstorming (10 min) -> -> Let us take a closer look at the binary masks produced by the `measure_root_mass` function. -> -> ![Binary masks of the four maize root images](../fig/four-maize-roots-binary.jpg) -> -> You may have noticed in the section on automatic thresholding that -> the thresholded image does include regions of the image aside of the -> plant root: the numbered labels and the white circles in each image -> are preserved during the thresholding, because their grayscale -> values are above the threshold. -> Therefore, our calculated root mass ratios include the white pixels -> of the label and white circle that are not part of the plant root. -> Those extra pixels affect how accurate the root mass calculation is! -> -> How might we remove the labels and circles before calculating the ratio, -> so that our results are more accurate? -> Think about some options given what we have learned so far. -> -> > ## Solution -> > -> > One approach we might take is to try to completely mask out a region -> > from each image, particularly, -> > the area containing the white circle and the numbered label. -> > If we had coordinates for a rectangular area on the image -> > that contained the circle and the label, -> > we could mask the area out by using techniques we learned in -> > [the _Drawing and Bitwise Operations_ episode]({{ page.root }}{% link _episodes/04-drawing.md %}). -> > -> > However, a closer inspection of the binary images raises some issues with -> > that approach. -> > Since the roots are not always constrained to a certain area in the image, -> > and since the circles and labels are in different locations each time, -> > we would have difficulties coming up with a single rectangle that would -> > work for *every* image. -> > We could create a different masking rectangle for each image, -> > but that is not a practicable approach -> > if we have hundreds or thousands of images to process. -> > -> > Another approach we could take is -> > to apply two thresholding steps to the image. -> > Look at the graylevel histogram of the file `data/trial-016.jpg` shown -> > above again: -> > Notice the peak near 1.0? -> > Recall that a grayscale value of 1.0 corresponds to white pixels: -> > the peak corresponds to the white label and circle. -> > So, we could use simple binary thresholding to mask the white circle and -> > label from the image, -> > and then we could use Otsu's method to select the pixels in -> > the plant portion of the image. -> > -> > Note that most of this extra work in processing the image could have been -> > avoided during the experimental design stage, -> > with some careful consideration of how the resulting images would be used. -> > For example, all of the following measures could have made the images easier -> > to process, by helping us predict and/or detect where the label is in the image -> > and subsequently mask it from further processing: -> > -> > * Using labels with a consistent size and shape -> > * Placing all the labels in the same position, relative to the sample -> > * Using a non-white label, with non-black writing -> > -> {: .solution} -{: .challenge} - -> ## Ignoring more of the images -- implementation (30 min - optional, not included in timing) -> -> Implement an enhanced version of the function `measure_root_mass` -> that applies simple binary thresholding to remove the white circle -> and label from the image before applying Otsu's method. -> -> > ## Solution -> > -> > We can apply a simple binary thresholding with a threshold -> > `t=0.95` to remove the label and circle from the image. We use the -> > binary mask to set the pixels in the blurred image to zero -> > (black). -> > -> > ~~~ -> > def enhanced_root_mass(filename, sigma): -> > -> > # read the original image, converting to grayscale on the fly -> > image = iio.imread(uri=filename, mode="L") -> > -> > # blur before thresholding -> > blurred_image = skimage.filters.gaussian(image, sigma=sigma) -> > -> > # perform binary thresholding to mask the white label and circle -> > binary_mask = blurred_image < 0.95 -> > # use the mask to remove the circle and label from the blurred image -> > blurred_image[~binary_mask] = 0 -> > -> > # perform automatic thresholding to produce a binary image -> > t = skimage.filters.threshold_otsu(blurred_image) -> > binary_mask = blurred_image > t -> > -> > # determine root mass ratio -> > rootPixels = np.count_nonzero(binary_mask) -> > w = binary_mask.shape[1] -> > h = binary_mask.shape[0] -> > density = rootPixels / (w * h) -> > -> > return density -> > -> > all_files = glob.glob("data/trial-*.jpg") -> > for filename in all_files: -> > density = enhanced_root_mass(filename=filename, sigma=1.5) -> > # output in format suitable for .csv -> > print(filename, density, sep=",") -> > ~~~ -> > {: .language-python} -> > -> > The output of the improved program does illustrate that the white circles -> > and labels were skewing our root mass ratios: -> > -> > ~~~ -> > data/trial-016.jpg,0.045935837765957444 -> > data/trial-020.jpg,0.058800033244680854 -> > data/trial-216.jpg,0.13705003324468085 -> > data/trial-293.jpg,0.13164461436170213 -> > ~~~ -> > {: .output} -> > -> > Here are the binary images produced by the additional thresholding. -> > Note that we have not completely removed the offending white pixels. -> > Outlines still remain. -> > However, we have reduced the number of extraneous pixels, -> > which should make the output more accurate. -> > -> > ![Improved binary masks of the four maize root images](../fig/four-maize-roots-binary-improved.jpg) -> > -> {: .solution} -{: .challenge} - -> ## Thresholding a bacteria colony image (15 min) -> -> In the images directory `data/`, you will find an image named `colonies-01.tif`. -> -> ![Image of bacteria colonies in a petri dish](../fig/colonies-01.jpg) -> -> This is one of the images you will be working with in the -> morphometric challenge at the end of the workshop. -> -> 1. Plot and inspect the grayscale histogram of the image to -> determine a good threshold value for the image. -> 2. Create a binary mask that leaves the pixels in the bacteria -> colonies "on" while turning the rest of the pixels in the image -> "off". -> -> > ## Solution -> > Here is the code to create the grayscale histogram: -> > ~~~ -> > bacteria = iio.imread(uri="data/colonies-01.tif") -> > gray_image = skimage.color.rgb2gray(bacteria) -> > blurred_image = skimage.filters.gaussian(gray_image, sigma=1.0) -> > histogram, bin_edges = np.histogram(blurred_image, bins=256, range=(0.0, 1.0)) -> > fig, ax = plt.subplots() -> > plt.plot(bin_edges[0:-1], histogram) -> > plt.title("Graylevel histogram") -> > plt.xlabel("gray value") -> > plt.ylabel("pixel count") -> > plt.xlim(0, 1.0) -> > ~~~ -> > {: .language-python} -> > -> > ![Grayscale histogram of the bacteria colonies image](../fig/colonies-01-histogram.png) -> > -> > The peak near one corresponds to the white image background, -> > and the broader peak around 0.5 corresponds to the yellow/brown -> > culture medium in the dish. -> > The small peak near zero is what we are after: the dark bacteria colonies. -> > A reasonable choice thus might be to leave pixels below `t=0.2` on. -> > -> > Here is the code to create and show the binarized image using the -> > `<` operator with a threshold `t=0.2`: -> > -> > ~~~ -> > t = 0.2 -> > binary_mask = blurred_image < t -> > -> > fig, ax = plt.subplots() -> > plt.imshow(binary_mask, cmap="gray") -> > ~~~ -> > {: .language-python} -> > -> > ![Binary mask of the bacteria colonies image](../fig/colonies-01-mask.png) -> > -> > When you experiment with the threshold a bit, you can see that in -> > particular the size of the bacteria colony near the edge of the -> > dish in the top right is affected by the choice of the threshold. -> {: .solution} -{: .challenge} +``` + +::::::::::::::::::::::::::::::::::::::: challenge + +## Ignoring more of the images -- brainstorming (10 min) + +Let us take a closer look at the binary masks produced by the `measure_root_mass` function. + +![](fig/four-maize-roots-binary.jpg){alt='Binary masks of the four maize root images'} + +You may have noticed in the section on automatic thresholding that +the thresholded image does include regions of the image aside of the +plant root: the numbered labels and the white circles in each image +are preserved during the thresholding, because their grayscale +values are above the threshold. +Therefore, our calculated root mass ratios include the white pixels +of the label and white circle that are not part of the plant root. +Those extra pixels affect how accurate the root mass calculation is! + +How might we remove the labels and circles before calculating the ratio, +so that our results are more accurate? +Think about some options given what we have learned so far. + +::::::::::::::: solution + +## Solution + +One approach we might take is to try to completely mask out a region +from each image, particularly, +the area containing the white circle and the numbered label. +If we had coordinates for a rectangular area on the image +that contained the circle and the label, +we could mask the area out by using techniques we learned in +[the *Drawing and Bitwise Operations* episode](04-drawing.md). + +However, a closer inspection of the binary images raises some issues with +that approach. +Since the roots are not always constrained to a certain area in the image, +and since the circles and labels are in different locations each time, +we would have difficulties coming up with a single rectangle that would +work for *every* image. +We could create a different masking rectangle for each image, +but that is not a practicable approach +if we have hundreds or thousands of images to process. + +Another approach we could take is +to apply two thresholding steps to the image. +Look at the graylevel histogram of the file `data/trial-016.jpg` shown +above again: +Notice the peak near 1.0? +Recall that a grayscale value of 1.0 corresponds to white pixels: +the peak corresponds to the white label and circle. +So, we could use simple binary thresholding to mask the white circle and +label from the image, +and then we could use Otsu's method to select the pixels in +the plant portion of the image. + +Note that most of this extra work in processing the image could have been +avoided during the experimental design stage, +with some careful consideration of how the resulting images would be used. +For example, all of the following measures could have made the images easier +to process, by helping us predict and/or detect where the label is in the image +and subsequently mask it from further processing: + +- Using labels with a consistent size and shape +- Placing all the labels in the same position, relative to the sample +- Using a non-white label, with non-black writing + +::::::::::::::::::::::::: + +:::::::::::::::::::::::::::::::::::::::::::::::::: + +::::::::::::::::::::::::::::::::::::::: challenge + +## Ignoring more of the images -- implementation (30 min - optional, not included in timing) + +Implement an enhanced version of the function `measure_root_mass` +that applies simple binary thresholding to remove the white circle +and label from the image before applying Otsu's method. + +::::::::::::::: solution + +## Solution + +We can apply a simple binary thresholding with a threshold +`t=0.95` to remove the label and circle from the image. We use the +binary mask to set the pixels in the blurred image to zero +(black). + +```python +def enhanced_root_mass(filename, sigma): + + # read the original image, converting to grayscale on the fly + image = iio.imread(uri=filename, mode="L") + + # blur before thresholding + blurred_image = skimage.filters.gaussian(image, sigma=sigma) + + # perform binary thresholding to mask the white label and circle + binary_mask = blurred_image < 0.95 + # use the mask to remove the circle and label from the blurred image + blurred_image[~binary_mask] = 0 + + # perform automatic thresholding to produce a binary image + t = skimage.filters.threshold_otsu(blurred_image) + binary_mask = blurred_image > t + + # determine root mass ratio + rootPixels = np.count_nonzero(binary_mask) + w = binary_mask.shape[1] + h = binary_mask.shape[0] + density = rootPixels / (w * h) + + return density + +all_files = glob.glob("data/trial-*.jpg") +for filename in all_files: + density = enhanced_root_mass(filename=filename, sigma=1.5) + # output in format suitable for .csv + print(filename, density, sep=",") +``` + +The output of the improved program does illustrate that the white circles +and labels were skewing our root mass ratios: + +```output +data/trial-016.jpg,0.045935837765957444 +data/trial-020.jpg,0.058800033244680854 +data/trial-216.jpg,0.13705003324468085 +data/trial-293.jpg,0.13164461436170213 +``` + +Here are the binary images produced by the additional thresholding. +Note that we have not completely removed the offending white pixels. +Outlines still remain. +However, we have reduced the number of extraneous pixels, +which should make the output more accurate. + +![](fig/four-maize-roots-binary-improved.jpg){alt='Improved binary masks of the four maize root images'} + +::::::::::::::::::::::::: + +:::::::::::::::::::::::::::::::::::::::::::::::::: + +::::::::::::::::::::::::::::::::::::::: challenge + +## Thresholding a bacteria colony image (15 min) + +In the images directory `data/`, you will find an image named `colonies-01.tif`. + +![](fig/colonies-01.jpg){alt='Image of bacteria colonies in a petri dish'} + +This is one of the images you will be working with in the +morphometric challenge at the end of the workshop. + +1. Plot and inspect the grayscale histogram of the image to + determine a good threshold value for the image. +2. Create a binary mask that leaves the pixels in the bacteria + colonies "on" while turning the rest of the pixels in the image + "off". + +::::::::::::::: solution + +## Solution + +Here is the code to create the grayscale histogram: + +```python +bacteria = iio.imread(uri="data/colonies-01.tif") +gray_image = skimage.color.rgb2gray(bacteria) +blurred_image = skimage.filters.gaussian(gray_image, sigma=1.0) +histogram, bin_edges = np.histogram(blurred_image, bins=256, range=(0.0, 1.0)) +fig, ax = plt.subplots() +plt.plot(bin_edges[0:-1], histogram) +plt.title("Graylevel histogram") +plt.xlabel("gray value") +plt.ylabel("pixel count") +plt.xlim(0, 1.0) +``` + +![](fig/colonies-01-histogram.png){alt='Grayscale histogram of the bacteria colonies image'} + +The peak near one corresponds to the white image background, +and the broader peak around 0.5 corresponds to the yellow/brown +culture medium in the dish. +The small peak near zero is what we are after: the dark bacteria colonies. +A reasonable choice thus might be to leave pixels below `t=0.2` on. + +Here is the code to create and show the binarized image using the +`<` operator with a threshold `t=0.2`: + +```python +t = 0.2 +binary_mask = blurred_image < t + +fig, ax = plt.subplots() +plt.imshow(binary_mask, cmap="gray") +``` + +![](fig/colonies-01-mask.png){alt='Binary mask of the bacteria colonies image'} + +When you experiment with the threshold a bit, you can see that in +particular the size of the bacteria colony near the edge of the +dish in the top right is affected by the choice of the threshold. + + + +::::::::::::::::::::::::: + +:::::::::::::::::::::::::::::::::::::::::::::::::: + +:::::::::::::::::::::::::::::::::::::::: keypoints + +- Thresholding produces a binary image, where all pixels with intensities above (or below) a threshold value are turned on, while all other pixels are turned off. +- The binary images produced by thresholding are held in two-dimensional NumPy arrays, since they have only one colour value channel. They are boolean, hence they contain the values 0 (off) and 1 (on). +- Thresholding can be used to create masks that select only the interesting parts of an image, or as the first step before edge detection or finding contours. + +:::::::::::::::::::::::::::::::::::::::::::::::::: + + diff --git a/episodes/08-connected-components.md b/episodes/08-connected-components.md index 3d71f73b9..f987731b1 100644 --- a/episodes/08-connected-components.md +++ b/episodes/08-connected-components.md @@ -1,38 +1,41 @@ --- -title: "Connected Component Analysis" +title: Connected Component Analysis teaching: 70 exercises: 55 -questions: -- "How to extract separate objects from an image and describe these objects quantitatively." -objectives: -- "Understand the term object in the context of images." -- "Learn about pixel connectivity." -- "Learn how Connected Component Analysis (CCA) works." -- "Use CCA to produce an image that highlights every object in a different colour." -- "Characterise each object with numbers that describe its appearance." -keypoints: -- "We can use `skimage.measure.label` to find and label connected objects in an image." -- "We can use `skimage.measure.regionprops` to measure properties of labeled objects." -- "We can use `skimage.morphology.remove_small_objects` to mask small objects and remove artifacts from an image." -- "We can display the labeled image to view the objects coloured by label." --- +::::::::::::::::::::::::::::::::::::::: objectives + +- Understand the term object in the context of images. +- Learn about pixel connectivity. +- Learn how Connected Component Analysis (CCA) works. +- Use CCA to produce an image that highlights every object in a different colour. +- Characterise each object with numbers that describe its appearance. + +:::::::::::::::::::::::::::::::::::::::::::::::::: + +:::::::::::::::::::::::::::::::::::::::: questions + +- How to extract separate objects from an image and describe these objects quantitatively. + +:::::::::::::::::::::::::::::::::::::::::::::::::: + ## Objects -In [the _Thresholding_ episode]({{ page.root }}{% link _episodes/07-thresholding.md %}) +In [the *Thresholding* episode](07-thresholding.md) we have covered dividing an image into foreground and background pixels. In the shapes example image, -we considered the coloured shapes as foreground _objects_ on a white background. +we considered the coloured shapes as foreground *objects* on a white background. -![Original shapes image](../data/shapes-01.jpg) -{: .image-with-shadow} +![](data/shapes-01.jpg) +{alt='Original shapes image' .image-with-shadow} In thresholding we went from the original image to this version: -![Mask created by thresholding](../fig/shapes-01-mask.png) +![](fig/shapes-01-mask.png){alt='Mask created by thresholding'} Here, we created a mask that only highlights the parts of the image -that we find interesting, the _objects_. +that we find interesting, the *objects*. All objects have pixel value of `True` while the background pixels are `False`. By looking at the mask image, @@ -57,15 +60,14 @@ For the purpose of illustration, the digit `0` is used to represent background pixels, and the letter `X` is used to represent object pixels foreground). -~~~ +```output 0 0 0 0 0 0 0 0 0 X X 0 0 0 0 0 0 X X 0 0 0 0 0 0 0 0 X X X 0 0 0 0 0 X X X X 0 0 0 0 0 0 0 0 0 -~~~ -{: .output} +``` The pixels are organised in a rectangular grid. In order to understand pixel neighborhoods @@ -76,19 +78,18 @@ Diagonal jumps are not allowed. So, from a centre pixel, denoted with `o`, only the pixels indicated with a `1` are reachable: -~~~ +```output - 1 - 1 o 1 - 1 - -~~~ -{: .output} +``` The pixels on the diagonal (from `o`) are not reachable with a single jump, which is denoted by the `-`. -The pixels reachable with a single jump form the __1-jump__ neighborhood. +The pixels reachable with a single jump form the **1-jump** neighborhood. The second rule states that in a sequence of jumps, -one may only jump in row and column direction once -> they have to be _orthogonal_. +one may only jump in row and column direction once -> they have to be *orthogonal*. An example of a sequence of orthogonal jumps is shown below. Starting from `o` the first jump goes along the row to the right. The second jump then goes along the column direction up. @@ -96,39 +97,36 @@ After this, the sequence cannot be continued as a jump has already been made in both row and column direction. -~~~ +```output - - 2 - o 1 - - - -~~~ -{: .output} +``` -All pixels reachable with one, or two jumps form the __2-jump__ neighborhood. +All pixels reachable with one, or two jumps form the **2-jump** neighborhood. The grid below illustrates the pixels reachable from the centre pixel `o` with a single jump, highlighted with a `1`, and the pixels reachable with 2 jumps with a `2`. -~~~ +```output 2 1 2 1 o 1 2 1 2 -~~~ -{: .output} +``` We want to revisit our example image mask from above and apply the two different neighborhood rules. With a single jump connectivity for each pixel, we get two resulting objects, highlighted in the image with `A`'s and `B`'s. -~~~ +```output 0 0 0 0 0 0 0 0 0 A A 0 0 0 0 0 0 A A 0 0 0 0 0 0 0 0 B B B 0 0 0 0 0 B B B B 0 0 0 0 0 0 0 0 0 -~~~ -{: .output} +``` In the 1-jump version, only pixels that have direct neighbors along rows or columns are considered connected. @@ -136,65 +134,79 @@ Diagonal connections are not included in the 1-jump neighborhood. With two jumps, however, we only get a single object `A` because pixels are also considered connected along the diagonals. -~~~ +```output 0 0 0 0 0 0 0 0 0 A A 0 0 0 0 0 0 A A 0 0 0 0 0 0 0 0 A A A 0 0 0 0 0 A A A A 0 0 0 0 0 0 0 0 0 -~~~ -{: .output} - - -> ## Object counting (optional, not included in timing) -> -> How many objects with 1 orthogonal jump, how many with 2 orthogonal jumps? -> -> ~~~ -> 0 0 0 0 0 0 0 0 -> 0 X 0 0 0 X X 0 -> 0 0 X 0 0 0 0 0 -> 0 X 0 X X X 0 0 -> 0 X 0 X X 0 0 0 -> 0 0 0 0 0 0 0 0 -> ~~~ -> {: .output} -> -> 1 jump -> -> a) 1 -> b) 5 -> c) 2 -> -> > ## Solution -> > b) 5 -> {: .solution} -> -> 2 jumps -> -> a) 2 -> b) 3 -> c) 5 -> -> > ## Solution -> > a) 2 -> {: .solution} -{: .challenge} - - -> ## Jumps and neighborhoods -> -> We have just introduced how you can reach different neighboring -> pixels by performing one or more orthogonal jumps. We have used the -> terms 1-jump and 2-jump neighborhood. There is also a different way -> of referring to these neighborhoods: the 4- and 8-neighborhood. -> With a single jump you can reach four pixels from a given starting -> pixel. Hence, the 1-jump neighborhood corresponds to the -> 4-neighborhood. When two orthogonal jumps are allowed, eight pixels -> can be reached, so the 2-jump neighborhood corresponds to the -> 8-neighborhood. -{: .callout} +``` + +::::::::::::::::::::::::::::::::::::::: challenge + +## Object counting (optional, not included in timing) + +How many objects with 1 orthogonal jump, how many with 2 orthogonal jumps? + +```output +0 0 0 0 0 0 0 0 +0 X 0 0 0 X X 0 +0 0 X 0 0 0 0 0 +0 X 0 X X X 0 0 +0 X 0 X X 0 0 0 +0 0 0 0 0 0 0 0 +``` + +1 jump + +a) 1 +b) 5 +c) 2 + +::::::::::::::: solution + +## Solution + +b) 5 + + +::::::::::::::::::::::::: + +2 jumps + +a) 2 +b) 3 +c) 5 + +::::::::::::::: solution + +## Solution + +a) 2 + + + +::::::::::::::::::::::::: + +:::::::::::::::::::::::::::::::::::::::::::::::::: + +::::::::::::::::::::::::::::::::::::::::: callout + +## Jumps and neighborhoods + +We have just introduced how you can reach different neighboring +pixels by performing one or more orthogonal jumps. We have used the +terms 1-jump and 2-jump neighborhood. There is also a different way +of referring to these neighborhoods: the 4- and 8-neighborhood. +With a single jump you can reach four pixels from a given starting +pixel. Hence, the 1-jump neighborhood corresponds to the +4-neighborhood. When two orthogonal jumps are allowed, eight pixels +can be reached, so the 2-jump neighborhood corresponds to the +8-neighborhood. + + +:::::::::::::::::::::::::::::::::::::::::::::::::: ## Connected Component Analysis @@ -205,14 +217,14 @@ Usually, the `False` value in this image is associated with background pixels, and the `True` value indicates foreground, or object pixels. Such an image can be produced, e.g., with thresholding. Given a thresholded image, -the connected component analysis produces a new _labeled_ image with integer pixel values. +the connected component analysis produces a new *labeled* image with integer pixel values. Pixels with the same value, belong to the same object. Skimage provides connected component analysis in the function `skimage.measure.label()`. Let us add this function to the already familiar steps of thresholding an image. First, import the packages needed for this episode -~~~ +```python import numpy as np import matplotlib.pyplot as plt import ipympl @@ -221,15 +233,14 @@ import skimage.color import skimage.filters import skimage.measure %matplotlib widget -~~~ -{: .language-python} +``` Note the new import of `skimage.measure` in order to use the `skimage.measure.label` function that performs the CCA. Next, we define a reusable Python function `connected_components`: -~~~ +```python def connected_components(filename, sigma=1.0, t=0.5, connectivity=2): # load the image image = iio.imread(filename) @@ -243,11 +254,10 @@ def connected_components(filename, sigma=1.0, t=0.5, connectivity=2): labeled_image, count = skimage.measure.label(binary_mask, connectivity=connectivity, return_num=True) return labeled_image, count -~~~ -{: .language-python} +``` The first four lines of code are familiar from -[the _Thresholding_ episode]({{ page.root }}{% link _episodes/07-thresholding.md %}). +[the *Thresholding* episode](07-thresholding.md). @@ -263,98 +273,101 @@ a unique value corresponding to the object it belongs to. In addition, we pass the optional parameter `return_num=True` to return the maximum label index as `count`. -> ## Optional parameters and return values -> -> The optional parameter `return_num` changes the data type that is -> returned by the function `skimage.measure.label`. -> The number of labels is only returned if `return_num` is _True_. -> Otherwise, the function only returns the labeled image. -> This means that we have to pay attention when assigning -> the return value to a variable. -> If we omit the optional parameter `return_num` or pass `return_num=False`, -> we can call the function as -> -> ~~~ -> labeled_image = skimage.measure.label(binary_mask) -> ~~~ -> {: .language-python} -> -> If we pass `return_num=True`, the function returns a tuple and we -> can assign it as -> -> ~~~ -> labeled_image, count = skimage.measure.label(binary_mask, return_num=True) -> ~~~ -> {: .language-python} -> -> If we used the same assignment as in the first case, -> the variable `labeled_image` would become a tuple, -> in which `labeled_image[0]` is the image -> and `labeled_image[1]` is the number of labels. -> This could cause confusion if we assume that `labeled_image` -> only contains the image and pass it to other functions. -> If you get an -> `AttributeError: 'tuple' object has no attribute 'shape'` -> or similar, check if you have assigned the return values consistently -> with the optional parameters. -{: .callout} +::::::::::::::::::::::::::::::::::::::::: callout + +## Optional parameters and return values + +The optional parameter `return_num` changes the data type that is +returned by the function `skimage.measure.label`. +The number of labels is only returned if `return_num` is *True*. +Otherwise, the function only returns the labeled image. +This means that we have to pay attention when assigning +the return value to a variable. +If we omit the optional parameter `return_num` or pass `return_num=False`, +we can call the function as + +```python +labeled_image = skimage.measure.label(binary_mask) +``` + +If we pass `return_num=True`, the function returns a tuple and we +can assign it as + +```python +labeled_image, count = skimage.measure.label(binary_mask, return_num=True) +``` + +If we used the same assignment as in the first case, +the variable `labeled_image` would become a tuple, +in which `labeled_image[0]` is the image +and `labeled_image[1]` is the number of labels. +This could cause confusion if we assume that `labeled_image` +only contains the image and pass it to other functions. +If you get an +`AttributeError: 'tuple' object has no attribute 'shape'` +or similar, check if you have assigned the return values consistently +with the optional parameters. + + +:::::::::::::::::::::::::::::::::::::::::::::::::: We can call the above function `connected_components` and display the labeled image like so: -~~~ +```python labeled_image, count = connected_components(filename="data/shapes-01.jpg", sigma=2.0, t=0.9, connectivity=2) fig, ax = plt.subplots() plt.imshow(labeled_image) plt.axis("off"); -~~~ -{: .language-python} - -> ## Color mappings -> -> Here you might get a warning -> `UserWarning: Low image data range; displaying image with stretched contrast.` -> or just see an all black image -> (Note: this behavior might change in future versions or -> not occur with a different image viewer). -> -> What went wrong? -> When you hover over the black image, -> the pixel values are shown as numbers in the lower corner of the viewer. -> You can see that some pixels have values different from `0`, -> so they are not actually pure black. -> Let's find out more by examining `labeled_image`. -> Properties that might be interesting in this context are `dtype`, -> the minimum and maximum value. -> We can print them with the following lines: -> -> ~~~ -> print("dtype:", labeled_image.dtype) -> print("min:", np.min(labeled_image)) -> print("max:", np.max(labeled_image)) -> ~~~ -> {: .language-python} -> -> Examining the output can give us a clue why the image appears black. -> -> ~~~ -> dtype: int32 -> min: 0 -> max: 11 -> ~~~ -> {: .output} -> -> The `dtype` of `labeled_image` is `int64`. -> This means that values in this image range from `-2 ** 63` to `2 ** 63 - 1`. -> Those are really big numbers. -> From this available space we only use the range from `0` to `11`. -> When showing this image in the viewer, -> it squeezes the complete range into 256 gray values. -> Therefore, the range of our numbers does not produce any visible change. -> -> Fortunately, the skimage library has tools to cope with this situation. -{: .solution } +``` + +:::::::::::::: solution + +## Color mappings + +Here you might get a warning +`UserWarning: Low image data range; displaying image with stretched contrast.` +or just see an all black image +(Note: this behavior might change in future versions or +not occur with a different image viewer). + +What went wrong? +When you hover over the black image, +the pixel values are shown as numbers in the lower corner of the viewer. +You can see that some pixels have values different from `0`, +so they are not actually pure black. +Let's find out more by examining `labeled_image`. +Properties that might be interesting in this context are `dtype`, +the minimum and maximum value. +We can print them with the following lines: + +```python +print("dtype:", labeled_image.dtype) +print("min:", np.min(labeled_image)) +print("max:", np.max(labeled_image)) +``` + +Examining the output can give us a clue why the image appears black. + +```output +dtype: int32 +min: 0 +max: 11 +``` + +The `dtype` of `labeled_image` is `int64`. +This means that values in this image range from `-2 ** 63` to `2 ** 63 - 1`. +Those are really big numbers. +From this available space we only use the range from `0` to `11`. +When showing this image in the viewer, +it squeezes the complete range into 256 gray values. +Therefore, the range of our numbers does not produce any visible change. + +Fortunately, the skimage library has tools to cope with this situation. + + +::::::::::::::::::::::::: We can use the function `skimage.color.label2rgb()` to convert the colours in the image @@ -364,76 +377,78 @@ With `skimage.color.label2rgb()`, all objects are coloured according to a list of colours that can be customised. We can use the following commands to convert and show the image: -~~~ +```python # convert the label image to color image colored_label_image = skimage.color.label2rgb(labeled_image, bg_label=0) fig, ax = plt.subplots() plt.imshow(colored_label_image) plt.axis("off"); -~~~ -{: .language-python} - -![Labeled objects](../fig/shapes-01-labeled.png) - - -> ## How many objects are in that image (15 min) -> -> -> Now, it is your turn to practice. -> Using the function `connected_components`, -> find two ways of printing out the number of objects found in the image. -> -> What number of objects would you expect to get? -> -> How does changing the `sigma` and `threshold` values influence the result? -> -> > ## Solution -> > -> > As you might have guessed, the return value `count` already -> > contains the number of found images. So it can simply be printed -> > with -> > -> > ~~~ -> > print("Found", count, "objects in the image.") -> > ~~~ -> > {: .language-python} -> > -> > But there is also a way to obtain the number of found objects from -> > the labeled image itself. -> > Recall that all pixels that belong to a single object -> > are assigned the same integer value. -> > The connected component algorithm produces consecutive numbers. -> > The background gets the value `0`, -> > the first object gets the value `1`, -> > the second object the value `2`, and so on. -> > This means that by finding the object with the maximum value, -> > we also know how many objects there are in the image. -> > We can thus use the `np.max` function from Numpy to -> > find the maximum value that equals the number of found objects: -> > -> > ~~~ -> > num_objects = np.max(labeled_image) -> > print("Found", num_objects, "objects in the image.") -> > ~~~ -> > {: .language-python} -> > -> > Invoking the function with `sigma=2.0`, and `threshold=0.9`, -> > both methods will print -> > -> > ~~~ -> > Found 11 objects in the image. -> > ~~~ -> > {: .output} -> > -> > Lowering the threshold will result in fewer objects. -> > The higher the threshold is set, the more objects are found. -> > More and more background noise gets picked up as objects. -> > Larger sigmas produce binary masks with less noise and hence -> > a smaller number of objects. -> > Setting sigma too high bears the danger of merging objects. -> {: .solution} -{: .challenge} +``` + +![](fig/shapes-01-labeled.png){alt='Labeled objects'} + +::::::::::::::::::::::::::::::::::::::: challenge + +## How many objects are in that image (15 min) + +Now, it is your turn to practice. +Using the function `connected_components`, +find two ways of printing out the number of objects found in the image. + +What number of objects would you expect to get? + +How does changing the `sigma` and `threshold` values influence the result? + +::::::::::::::: solution + +## Solution + +As you might have guessed, the return value `count` already +contains the number of found images. So it can simply be printed +with + +```python +print("Found", count, "objects in the image.") +``` + +But there is also a way to obtain the number of found objects from +the labeled image itself. +Recall that all pixels that belong to a single object +are assigned the same integer value. +The connected component algorithm produces consecutive numbers. +The background gets the value `0`, +the first object gets the value `1`, +the second object the value `2`, and so on. +This means that by finding the object with the maximum value, +we also know how many objects there are in the image. +We can thus use the `np.max` function from Numpy to +find the maximum value that equals the number of found objects: + +```python +num_objects = np.max(labeled_image) +print("Found", num_objects, "objects in the image.") +``` + +Invoking the function with `sigma=2.0`, and `threshold=0.9`, +both methods will print + +```output +Found 11 objects in the image. +``` + +Lowering the threshold will result in fewer objects. +The higher the threshold is set, the more objects are found. +More and more background noise gets picked up as objects. +Larger sigmas produce binary masks with less noise and hence +a smaller number of objects. +Setting sigma too high bears the danger of merging objects. + + + +::::::::::::::::::::::::: + +:::::::::::::::::::::::::::::::::::::::::::::::::: You might wonder why the connected component analysis with `sigma=2.0`, and `threshold=0.9` finds 11 objects, whereas we would expect only 7 objects. @@ -441,7 +456,7 @@ Where are the four additional objects? With a bit of detective work, we can spot some small objects in the image, for example, near the left border. -![shapes-01.jpg mask detail](../fig/shapes-01-cca-detail.png) +![](fig/shapes-01-cca-detail.png){alt='shapes-01.jpg mask detail'} For us it is clear that these small spots are artifacts and not objects we are interested in. @@ -466,7 +481,7 @@ So we could use a minimum area as a criterion for when an object should be detec To apply such a criterion, we need a way to calculate the area of objects found by connected components. Recall how we determined the root mass in -[the _Thresholding_ episode]({{ page.root }}{% link _episodes/07-thresholding.md %}) +[the *Thresholding* episode](07-thresholding.md) by counting the pixels in the binary mask. But here we want to calculate the area of several objects in the labeled image. The skimage library provides the function `skimage.measure.regionprops` @@ -478,295 +493,334 @@ You can explore the skimage documentation to learn about other properties availa We can get a list of areas of the labeled objects as follows: -~~~ +```python # compute object features and extract object areas object_features = skimage.measure.regionprops(labeled_image) object_areas = [objf["area"] for objf in object_features] object_areas -~~~ -{: .language-python} +``` This will produce the output -~~~ +```output [318542, 1, 523204, 496613, 517331, 143, 256215, 1, 68, 338784, 265755] -~~~ -{: .output} - -> ## Plot a histogram of the object area distribution (10 min) -> -> Similar to how we determined a "good" threshold in -> [the _Thresholding_ episode]({{ page.root }}{% link _episodes/07-thresholding.md %}), -> it is often helpful to inspect the histogram of an object property. -> For example, we want to look at the distribution of the object areas. -> -> 1. Create and examine a [histogram]({{ page.root }}/05-creating-histograms) -> of the object areas obtained with `skimage.measure.regionprops`. -> 2. What does the histogram tell you about the objects? -> -> > ## Solution -> > -> > The histogram can be plotted with -> > ~~~ -> > fig, ax = plt.subplots() -> > plt.hist(object_areas) -> > plt.xlabel("Area (pixels)") -> > plt.ylabel("Number of objects"); -> > ~~~ -> > {: .language-python} -> > -> > ![Histogram of object areas](../fig/shapes-01-areas-histogram.png) -> > -> > The histogram shows the number of objects (vertical axis) -> > whose area is within a certain range (horizontal axis). -> > The height of the bars in the histogram indicates -> > the prevalence of objects with a certain area. -> > The whole histogram tells us about the distribution of object sizes in the image. -> > It is often possible to identify gaps between groups of bars -> > (or peaks if we draw the histogram as a continuous curve) -> > that tell us about certain groups in the image. -> > -> > In this example, we can see that there are four small objects that -> > contain less than 50000 pixels. -> > Then there is a group of four (1+1+2) objects in -> > the range between 200000 and 400000, -> > and three objects with a size around 500000. -> > For our object count, we might want to disregard the small objects as artifacts, -> > i.e, we want to ignore the leftmost bar of the histogram. -> > We could use a threshold of 50000 as the minimum area to count. -> > In fact, the `object_areas` list already tells us that -> > there are fewer than 200 pixels in these objects. -> > Therefore, it is reasonable to require a minimum area of at least 200 pixels -> > for a detected object. -> > In practice, finding the "right" threshold can be tricky and -> > usually involves an educated guess based on domain knowledge. -> {: .solution} -{: .challenge} - -> ## Filter objects by area (10 min) -> -> Now we would like to use a minimum area criterion to obtain a more -> accurate count of the objects in the image. -> -> 1. Find a way to calculate the number of objects by only counting -> objects above a certain area. -> -> > ## Solution -> > -> > One way to count only objects above a certain area is to first -> > create a list of those objects, and then take the length of that -> > list as the object count. This can be done as follows: -> > -> > ~~~ -> > min_area = 200 -> > large_objects = [] -> > for objf in object_features: -> > if objf["area"] > min_area: -> > large_objects.append(objf["label"]) -> > print("Found", len(large_objects), "objects!") -> > ~~~ -> > {: .language-python} -> > -> > Another option is to use Numpy arrays to create the list of large objects. -> > We first create an array `object_areas` containing the object areas, -> > and an array `object_labels` containing the object labels. -> > The labels of the objects are also returned by `skimage.measure.regionprops`. -> > We have already seen that we can create boolean arrays using comparison operators. -> > Here we can use `object_areas > min_area` -> > to produce an array that has the same dimension as `object_labels`. -> > It can then used to select the labels of objects whose area is -> > greater than `min_area` by indexing: -> > -> > ~~~ -> > object_areas = np.array([objf["area"] for objf in object_features]) -> > object_labels = np.array([objf["label"] for objf in object_features]) -> > large_objects = object_labels[object_areas > min_area] -> > print("Found", len(large_objects), "objects!") -> > ~~~ -> > {: .language-python} -> > -> > The advantage of using Numpy arrays is that -> > `for` loops and `if` statements in Python can be slow, -> > and in practice the first approach may not be feasible -> > if the image contains a large number of objects. -> > In that case, Numpy array functions turn out to be very useful because -> > they are much faster. -> > -> > In this example, we can also use the `np.count_nonzero` function -> > that we have seen earlier together with the `>` operator to count -> > the objects whose area is above `min_area`. -> > -> > ~~~ -> > n = np.count_nonzero(object_areas > min_area) -> > print("Found", n, "objects!") -> > ~~~ -> > {: .language-python} -> > -> > For all three alternatives, the output is the same and gives the -> > expected count of 7 objects. -> {: .solution} -{: .challenge} - -> ## Using functions from Numpy and other Python packages -> Functions from Python packages such as Numpy are often more efficient and -> require less code to write. -> It is a good idea to browse the reference pages of `numpy` and `skimage` to -> look for an availabe function that can solve a given task. -{: .callout} - -> ## Remove small objects (20 min) -> -> We might also want to exclude (mask) the small objects when plotting -> the labeled image. -> -> 2. Enhance the `connected_components` function such that -> it automatically removes objects that are below a certain area that is -> passed to the function as an optional parameter. -> -> > ## Solution -> > -> > -> > To remove the small objects from the labeled image, -> > we change the value of all pixels that belong to the small objects to -> > the background label 0. -> > One way to do this is to loop over all objects and -> > set the pixels that match the label of the object to 0. -> > -> > ~~~ -> > for object_id, objf in enumerate(object_features, start=1): -> > if objf["area"] < min_area: -> > labeled_image[labeled_image == objf["label"]] = 0 -> > ~~~ -> > {: .language-python} -> > -> > Here Numpy functions can also be used to eliminate -> > `for` loops and `if` statements. -> > Like above, we can create an array of the small object labels with -> > the comparison `object_areas < min_area`. -> > We can use another Numpy function, `np.isin`, -> > to set the pixels of all small objects to 0. -> > `np.isin` takes two arrays and returns a boolean array with values -> > `True` if the entry of the first array is found in the second array, -> > and `False` otherwise. -> > This array can then be used to index the `labeled_image` and -> > set the entries that belong to small objects to `0`. -> > -> > ~~~ -> > object_areas = np.array([objf["area"] for objf in object_features]) -> > object_labels = np.array([objf["label"] for objf in object_features]) -> > small_objects = object_labels[object_areas < min_area] -> > labeled_image[np.isin(labeled_image,small_objects)] = 0 -> > ~~~ -> > {: .language-python} -> > -> > An even more elegant way to remove small objects from the image is -> > to leverage the `skimage.morphology` module. -> > It provides a function `skimage.morphology.remove_small_objects` that -> > does exactly what we are looking for. -> > It can be applied to a binary image and -> > returns a mask in which all objects smaller than `min_area` are excluded, -> > i.e, their pixel values are set to `False`. -> > We can then apply `skimage.measure.label` to the masked image: -> > -> > ~~~ -> > object_mask = skimage.morphology.remove_small_objects(binary_mask,min_area) -> > labeled_image, n = skimage.measure.label(object_mask, -> > connectivity=connectivity, return_num=True) -> > ~~~ -> > {: .language-python} -> > -> > Using the `skimage` features, we can implement -> > the `enhanced_connected_component` as follows: -> > -> > ~~~ -> > def enhanced_connected_components(filename, sigma=1.0, t=0.5, connectivity=2, min_area=0): -> > image = iio.imread(filename) -> > gray_image = skimage.color.rgb2gray(image) -> > blurred_image = skimage.filters.gaussian(gray_image, sigma=sigma) -> > binary_mask = blurred_image < t -> > object_mask = skimage.morphology.remove_small_objects(binary_mask,min_area) -> > labeled_image, count = skimage.measure.label(object_mask, -> > connectivity=connectivity, return_num=True) -> > return labeled_image, count -> > ~~~ -> > {: .language-python} -> > -> > We can now call the function with a chosen `min_area` and -> > display the resulting labeled image: -> > -> > ~~~ -> > labeled_image, count = enhanced_connected_components(filename="data/shapes-01.jpg", sigma=2.0, t=0.9, -> > connectivity=2, min_area=min_area) -> > colored_label_image = skimage.color.label2rgb(labeled_image, bg_label=0) -> > -> > fig, ax = plt.subplots() -> > plt.imshow(colored_label_image) -> > plt.axis("off"); -> > -> > print("Found", count, "objects in the image.") -> > ~~~ -> > {: .language-python} -> > -> > ![Objects filtered by area](../fig/shapes-01-filtered-objects.png) -> > -> > ~~~ -> > Found 7 objects in the image. -> > ~~~ -> > {: .output} -> > -> > Note that the small objects are "gone" and we obtain the correct -> > number of 7 objects in the image. -> {: .solution} -{: .challenge} - -> ## Colour objects by area (optional, not included in timing) -> -> Finally, we would like to display the image with the objects coloured -> according to the magnitude of their area. -> In practice, this can be used with other properties to give -> visual cues of the object properties. -> -> > ## Solution -> > -> > We already know how to get the areas of the objects from the `regionprops`. -> > We just need to insert a zero area value for the background -> > (to colour it like a zero size object). -> > The background is also labeled `0` in the `labeled_image`, -> > so we insert the zero area value in front of the first element of -> > `object_areas` with `np.insert`. -> > Then we can create a `colored_area_image` where we assign each pixel value -> > the area by indexing the `object_areas` with the label values in `labeled_image`. -> > -> > ~~~ -> > object_areas = np.array([objf["area"] for objf in skimage.measure.regionprops(labeled_image)]) -> > object_areas = np.insert(0,1,object_areas) -> > colored_area_image = object_areas[labeled_image] -> > -> > fig, ax = plt.subplots() -> > im = plt.imshow(colored_area_image) -> > cbar = fig.colorbar(im, ax=ax, shrink=0.85) -> > cbar.ax.set_title("Area") -> > plt.axis("off"); -> > ~~~ -> > {: .language-python} -> > -> > ![Objects colored by area](../fig/shapes-01-objects-coloured-by-area.png) -> > -> > > You may have noticed that in the solution, we have used the -> > > `labeled_image` to index the array `object_areas`. This is an -> > > example of [advanced indexing in -> > > Numpy](https://numpy.org/doc/stable/user/basics.indexing.html#advanced-indexing) -> > > The result is an array of the same shape as the `labeled_image` -> > > whose pixel values are selected from `object_areas` according to -> > > the object label. Hence the objects will be colored by area when -> > > the result is displayed. Note that advanced indexing with an -> > > integer array works slightly different than the indexing with a -> > > Boolean array that we have used for masking. While Boolean array -> > > indexing returns only the entries corresponding to the `True` -> > > values of the index, integer array indexing returns an array -> > > with the same shape as the index. You can read more about advanced -> > > indexing in the [Numpy -> > > documentation](https://numpy.org/doc/stable/user/basics.indexing.html#advanced-indexing). -> > {: .callout} -> {: .solution} -{: .challenge} +``` + +::::::::::::::::::::::::::::::::::::::: challenge + +## Plot a histogram of the object area distribution (10 min) + +Similar to how we determined a "good" threshold in +[the *Thresholding* episode](07-thresholding.md), +it is often helpful to inspect the histogram of an object property. +For example, we want to look at the distribution of the object areas. + +1. Create and examine a [histogram](05-creating-histograms.md) + of the object areas obtained with `skimage.measure.regionprops`. +2. What does the histogram tell you about the objects? + +::::::::::::::: solution + +## Solution + +The histogram can be plotted with + +```python +fig, ax = plt.subplots() +plt.hist(object_areas) +plt.xlabel("Area (pixels)") +plt.ylabel("Number of objects"); +``` + +![](fig/shapes-01-areas-histogram.png){alt='Histogram of object areas'} + +The histogram shows the number of objects (vertical axis) +whose area is within a certain range (horizontal axis). +The height of the bars in the histogram indicates +the prevalence of objects with a certain area. +The whole histogram tells us about the distribution of object sizes in the image. +It is often possible to identify gaps between groups of bars +(or peaks if we draw the histogram as a continuous curve) +that tell us about certain groups in the image. + +In this example, we can see that there are four small objects that +contain less than 50000 pixels. +Then there is a group of four (1+1+2) objects in +the range between 200000 and 400000, +and three objects with a size around 500000. +For our object count, we might want to disregard the small objects as artifacts, +i.e, we want to ignore the leftmost bar of the histogram. +We could use a threshold of 50000 as the minimum area to count. +In fact, the `object_areas` list already tells us that +there are fewer than 200 pixels in these objects. +Therefore, it is reasonable to require a minimum area of at least 200 pixels +for a detected object. +In practice, finding the "right" threshold can be tricky and +usually involves an educated guess based on domain knowledge. + + + +::::::::::::::::::::::::: + +:::::::::::::::::::::::::::::::::::::::::::::::::: + +::::::::::::::::::::::::::::::::::::::: challenge + +## Filter objects by area (10 min) + +Now we would like to use a minimum area criterion to obtain a more +accurate count of the objects in the image. + +1. Find a way to calculate the number of objects by only counting + objects above a certain area. + +::::::::::::::: solution + +## Solution + +One way to count only objects above a certain area is to first +create a list of those objects, and then take the length of that +list as the object count. This can be done as follows: + +```python +min_area = 200 +large_objects = [] +for objf in object_features: + if objf["area"] > min_area: + large_objects.append(objf["label"]) +print("Found", len(large_objects), "objects!") +``` + +Another option is to use Numpy arrays to create the list of large objects. +We first create an array `object_areas` containing the object areas, +and an array `object_labels` containing the object labels. +The labels of the objects are also returned by `skimage.measure.regionprops`. +We have already seen that we can create boolean arrays using comparison operators. +Here we can use `object_areas > min_area` +to produce an array that has the same dimension as `object_labels`. +It can then used to select the labels of objects whose area is +greater than `min_area` by indexing: + +```python +object_areas = np.array([objf["area"] for objf in object_features]) +object_labels = np.array([objf["label"] for objf in object_features]) +large_objects = object_labels[object_areas > min_area] +print("Found", len(large_objects), "objects!") +``` + +The advantage of using Numpy arrays is that +`for` loops and `if` statements in Python can be slow, +and in practice the first approach may not be feasible +if the image contains a large number of objects. +In that case, Numpy array functions turn out to be very useful because +they are much faster. + +In this example, we can also use the `np.count_nonzero` function +that we have seen earlier together with the `>` operator to count +the objects whose area is above `min_area`. + +```python +n = np.count_nonzero(object_areas > min_area) +print("Found", n, "objects!") +``` + +For all three alternatives, the output is the same and gives the +expected count of 7 objects. + + + +::::::::::::::::::::::::: + +:::::::::::::::::::::::::::::::::::::::::::::::::: + +::::::::::::::::::::::::::::::::::::::::: callout + +## Using functions from Numpy and other Python packages + +Functions from Python packages such as Numpy are often more efficient and +require less code to write. +It is a good idea to browse the reference pages of `numpy` and `skimage` to +look for an availabe function that can solve a given task. + + +:::::::::::::::::::::::::::::::::::::::::::::::::: + +::::::::::::::::::::::::::::::::::::::: challenge + +## Remove small objects (20 min) + +We might also want to exclude (mask) the small objects when plotting +the labeled image. + +2. Enhance the `connected_components` function such that + it automatically removes objects that are below a certain area that is + passed to the function as an optional parameter. + +::::::::::::::: solution + +## Solution + +To remove the small objects from the labeled image, +we change the value of all pixels that belong to the small objects to +the background label 0. +One way to do this is to loop over all objects and +set the pixels that match the label of the object to 0. + +```python +for object_id, objf in enumerate(object_features, start=1): + if objf["area"] < min_area: + labeled_image[labeled_image == objf["label"]] = 0 +``` + +Here Numpy functions can also be used to eliminate +`for` loops and `if` statements. +Like above, we can create an array of the small object labels with +the comparison `object_areas < min_area`. +We can use another Numpy function, `np.isin`, +to set the pixels of all small objects to 0. +`np.isin` takes two arrays and returns a boolean array with values +`True` if the entry of the first array is found in the second array, +and `False` otherwise. +This array can then be used to index the `labeled_image` and +set the entries that belong to small objects to `0`. + +```python +object_areas = np.array([objf["area"] for objf in object_features]) +object_labels = np.array([objf["label"] for objf in object_features]) +small_objects = object_labels[object_areas < min_area] +labeled_image[np.isin(labeled_image,small_objects)] = 0 +``` + +An even more elegant way to remove small objects from the image is +to leverage the `skimage.morphology` module. +It provides a function `skimage.morphology.remove_small_objects` that +does exactly what we are looking for. +It can be applied to a binary image and +returns a mask in which all objects smaller than `min_area` are excluded, +i.e, their pixel values are set to `False`. +We can then apply `skimage.measure.label` to the masked image: + +```python +object_mask = skimage.morphology.remove_small_objects(binary_mask,min_area) +labeled_image, n = skimage.measure.label(object_mask, + connectivity=connectivity, return_num=True) +``` + +Using the `skimage` features, we can implement +the `enhanced_connected_component` as follows: + +```python +def enhanced_connected_components(filename, sigma=1.0, t=0.5, connectivity=2, min_area=0): + image = iio.imread(filename) + gray_image = skimage.color.rgb2gray(image) + blurred_image = skimage.filters.gaussian(gray_image, sigma=sigma) + binary_mask = blurred_image < t + object_mask = skimage.morphology.remove_small_objects(binary_mask,min_area) + labeled_image, count = skimage.measure.label(object_mask, + connectivity=connectivity, return_num=True) + return labeled_image, count +``` + +We can now call the function with a chosen `min_area` and +display the resulting labeled image: + +```python +labeled_image, count = enhanced_connected_components(filename="data/shapes-01.jpg", sigma=2.0, t=0.9, + connectivity=2, min_area=min_area) +colored_label_image = skimage.color.label2rgb(labeled_image, bg_label=0) + +fig, ax = plt.subplots() +plt.imshow(colored_label_image) +plt.axis("off"); + +print("Found", count, "objects in the image.") +``` + +![](fig/shapes-01-filtered-objects.png){alt='Objects filtered by area'} + +```output +Found 7 objects in the image. +``` + +Note that the small objects are "gone" and we obtain the correct +number of 7 objects in the image. + + + +::::::::::::::::::::::::: + +:::::::::::::::::::::::::::::::::::::::::::::::::: + +::::::::::::::::::::::::::::::::::::::: challenge + +## Colour objects by area (optional, not included in timing) + +Finally, we would like to display the image with the objects coloured +according to the magnitude of their area. +In practice, this can be used with other properties to give +visual cues of the object properties. + +::::::::::::::: solution + +## Solution + +We already know how to get the areas of the objects from the `regionprops`. +We just need to insert a zero area value for the background +(to colour it like a zero size object). +The background is also labeled `0` in the `labeled_image`, +so we insert the zero area value in front of the first element of +`object_areas` with `np.insert`. +Then we can create a `colored_area_image` where we assign each pixel value +the area by indexing the `object_areas` with the label values in `labeled_image`. + +```python +object_areas = np.array([objf["area"] for objf in skimage.measure.regionprops(labeled_image)]) +object_areas = np.insert(0,1,object_areas) +colored_area_image = object_areas[labeled_image] + +fig, ax = plt.subplots() +im = plt.imshow(colored_area_image) +cbar = fig.colorbar(im, ax=ax, shrink=0.85) +cbar.ax.set_title("Area") +plt.axis("off"); +``` + +![](fig/shapes-01-objects-coloured-by-area.png){alt='Objects colored by area'} + +::::::::::::::::::::::::::::::::::::::::: callout + +You may have noticed that in the solution, we have used the +`labeled_image` to index the array `object_areas`. This is an +example of [advanced indexing in +Numpy](https://numpy.org/doc/stable/user/basics.indexing.html#advanced-indexing) +The result is an array of the same shape as the `labeled_image` +whose pixel values are selected from `object_areas` according to +the object label. Hence the objects will be colored by area when +the result is displayed. Note that advanced indexing with an +integer array works slightly different than the indexing with a +Boolean array that we have used for masking. While Boolean array +indexing returns only the entries corresponding to the `True` +values of the index, integer array indexing returns an array +with the same shape as the index. You can read more about advanced +indexing in the [Numpy +documentation](https://numpy.org/doc/stable/user/basics.indexing.html#advanced-indexing). + + + + +:::::::::::::::::::::::::::::::::::::::::::::::::: + +::::::::::::::::::::::::: + +:::::::::::::::::::::::::::::::::::::::::::::::::: + +:::::::::::::::::::::::::::::::::::::::: keypoints + +- We can use `skimage.measure.label` to find and label connected objects in an image. +- We can use `skimage.measure.regionprops` to measure properties of labeled objects. +- We can use `skimage.morphology.remove_small_objects` to mask small objects and remove artifacts from an image. +- We can display the labeled image to view the objects coloured by label. + +:::::::::::::::::::::::::::::::::::::::::::::::::: + + diff --git a/episodes/09-challenges.md b/episodes/09-challenges.md index ec876ef17..58c0e466d 100644 --- a/episodes/09-challenges.md +++ b/episodes/09-challenges.md @@ -1,197 +1,209 @@ --- -title: "Capstone Challenge" +title: Capstone Challenge teaching: 10 exercises: 40 -questions: -- "How can we automatically count bacterial colonies with image analysis?" -objectives: -- "Bring together everything you've learnt so far to count bacterial colonies -in 3 images." -keypoints: -- "Using thresholding, connected component analysis and other tools we can automatically segment -images of bacterial colonies." -- "These methods are useful for many scientific problems, especially those involving -morphometrics." --- +::::::::::::::::::::::::::::::::::::::: objectives + +- Bring together everything you've learnt so far to count bacterial colonies in 3 images. + +:::::::::::::::::::::::::::::::::::::::::::::::::: + +:::::::::::::::::::::::::::::::::::::::: questions + +- How can we automatically count bacterial colonies with image analysis? + +:::::::::::::::::::::::::::::::::::::::::::::::::: + In this episode, we will provide a final challenge for you to attempt, based on all the skills you have acquired so far. This challenge will be related to the shape of objects in images (*morphometrics*). ## Morphometrics: Bacteria Colony Counting -As mentioned in [the workshop introduction]({{ page.root }}{% link _episodes/01-introduction.md %}), +As mentioned in [the workshop introduction](01-introduction.md), your morphometric challenge is to determine how many bacteria colonies are in each of these images: -![Colony image 1](../fig/colonies-01.jpg) +![](fig/colonies-01.jpg){alt='Colony image 1'} -![Colony image 2](../fig/colonies-02.jpg) +![](fig/colonies-02.jpg){alt='Colony image 2'} -![Colony image 3](../fig/colonies-03.jpg) +![](fig/colonies-03.jpg){alt='Colony image 3'} The image files can be found at `data/colonies-01.tif`, `data/colonies-02.tif`, and `data/colonies-03.tif`. -> ## Morphometrics for bacterial colonies -> -> Write a Python program that uses skimage to -> count the number of bacteria colonies in each image, -> and for each, produce a new image that highlights the colonies. -> The image should look similar to this one: -> -> ![Sample morphometric output](../fig/colonies-01-summary.png) -> -> Additionally, print out the number of colonies for each image. -> -> Use what you have learnt about [histograms]({{ page.root }}{% link _episodes/05-creating-histograms.md %}), -> [thresholding]({{ page.root }}{% link _episodes/07-thresholding.md %}) and -> [connected component analysis]({{ page.root }}{% link _episodes/08-connected-components.md %}). -> Try to put your code into a re-usable function, -> so that it can be applied conveniently to any image file. -> -> > ## Solution -> > -> > First, let's work through the process for one image: -> > ~~~ -> > import numpy as np -> > import imageio.v3 as iio -> > import skimage.color -> > import skimage.filters -> > import matplotlib.pyplot as plt -> > import ipympl -> > %matplotlib widget -> > -> > bacteria_image = iio.imread(uri="data/colonies-01.tif") -> > -> > # display the image -> > fig, ax = plt.subplots() -> > plt.imshow(bacteria_image) -> > ~~~ -> > {: .language-python} -> > -> > ![Colony image 1](../fig/colonies-01.jpg) -> > -> > Next, we need to threshold the image to create a mask that covers only -> > the dark bacterial colonies. -> > This is easier using a grayscale image, so we convert it here: -> > -> > ~~~ -> > gray_bacteria = skimage.color.rgb2gray(bacteria_image) -> > -> > # display the gray image -> > fig, ax = plt.subplots() -> > plt.imshow(gray_bacteria, cmap="gray") -> > ~~~ -> > {: .language-python} -> > -> > ![Gray Colonies](../fig/colonies-01-gray.png) -> > -> > Next, we blur the image and create a histogram: -> > -> > ~~~ -> > blurred_image = skimage.filters.gaussian(gray_bacteria, sigma=1.0) -> > histogram, bin_edges = np.histogram(blurred_image, bins=256, range=(0.0, 1.0)) -> > fig, ax = plt.subplots() -> > plt.plot(bin_edges[0:-1], histogram) -> > plt.title("Graylevel histogram") -> > plt.xlabel("gray value") -> > plt.ylabel("pixel count") -> > plt.xlim(0, 1.0) -> > ~~~ -> > {: .language-python} -> > -> > ![Histogram image](../fig/colonies-01-histogram.png) -> > -> > In this histogram, we see three peaks - -> > the left one (i.e. the darkest pixels) is our colonies, -> > the central peak is the yellow/brown culture medium in the dish, -> > and the right one (i.e. the brightest pixels) is the white image background. -> > Therefore, we choose a threshold that selects the small left peak: -> > -> > ~~~ -> > mask = blurred_image < 0.2 -> > fig, ax = plt.subplots() -> > plt.imshow(mask, cmap="gray") -> > ~~~ -> > {: .language-python} -> > -> > ![Colony mask image](../fig/colonies-01-mask.png) -> > -> > This mask shows us where the colonies are in the image - -> > but how can we count how many there are? -> > This requires connected component analysis: -> > -> > ~~~ -> > labeled_image, count = skimage.measure.label(mask, return_num=True) -> > print(count) -> > ~~~ -> > {: .language-python} -> > -> > Finally, we create the summary image of the coloured colonies on top of -> > the grayscale image: -> > -> > ~~~ -> > # color each of the colonies a different color -> > colored_label_image = skimage.color.label2rgb(labeled_image, bg_label=0) -> > # give our grayscale image rgb channels, so we can add the colored colonies -> > summary_image = skimage.color.gray2rgb(gray_bacteria) -> > summary_image[mask] = colored_label_image[mask] -> > -> > # plot overlay -> > fig, ax = plt.subplots() -> > plt.imshow(summary_image) -> > ~~~ -> > {: .language-python} -> > -> > ![Sample morphometric output](../fig/colonies-01-summary.png) -> > -> > Now that we've completed the task for one image, -> > we need to repeat this for the remaining two images. -> > This is a good point to collect the lines above into a re-usable function: -> > -> > ~~~ -> > def count_colonies(image_filename): -> > bacteria_image = iio.imread(image_filename) -> > gray_bacteria = skimage.color.rgb2gray(bacteria_image) -> > blurred_image = skimage.filters.gaussian(gray_bacteria, sigma=1.0) -> > mask = blurred_image < 0.2 -> > labeled_image, count = skimage.measure.label(mask, return_num=True) -> > print(f"There are {count} colonies in {image_filename}") -> > -> > colored_label_image = skimage.color.label2rgb(labeled_image, bg_label=0) -> > summary_image = skimage.color.gray2rgb(gray_bacteria) -> > summary_image[mask] = colored_label_image[mask] -> > fig, ax = plt.subplots() -> > plt.imshow(summary_image) -> > ~~~ -> > {: .language-python} -> > -> > Now we can do this analysis on all the images via a for loop: -> > -> > ~~~ -> > for image_filename in ["data/colonies-01.tif", "data/colonies-02.tif", "data/colonies-03.tif"]: -> > count_colonies(image_filename=image_filename) -> > ~~~ -> > {: .language-python} -> > -> > ![Colony 1 output](../fig/colonies-01-summary.png) -> > ![Colony 2 output](../fig/colonies-02-summary.png) -> > ![Colony 3 output](../fig/colonies-03-summary.png) -> > -> > You'll notice that for the images with more colonies, the results aren't perfect. -> > For example, some small colonies are missing, -> > and there are likely some small black spots being labelled incorrectly as colonies. -> > You could expand this solution to, for example, -> > use an automatically determined threshold for each image, -> > which may fit each better. -> > Also, you could filter out colonies below a certain size -> > (as we did in [the _Connected Component Analysis_ episode]({{ page.root }}{% link _episodes/08-connected-components.md %})). -> > You'll also see that some touching colonies are merged into one big colony. -> > This could be fixed with more complicated segmentation methods -> > (outside of the scope of this lesson) like -> > [watershed](https://scikit-image.org/docs/dev/auto_examples/segmentation/plot_watershed.html). -> {: .solution} -{: .challenge} +::::::::::::::::::::::::::::::::::::::: challenge + +## Morphometrics for bacterial colonies + +Write a Python program that uses skimage to +count the number of bacteria colonies in each image, +and for each, produce a new image that highlights the colonies. +The image should look similar to this one: + +![](fig/colonies-01-summary.png){alt='Sample morphometric output'} + +Additionally, print out the number of colonies for each image. + +Use what you have learnt about [histograms](05-creating-histograms.md), +[thresholding](07-thresholding.md) and +[connected component analysis](08-connected-components.md). +Try to put your code into a re-usable function, +so that it can be applied conveniently to any image file. + +::::::::::::::: solution + +## Solution + +First, let's work through the process for one image: + +```python +import numpy as np +import imageio.v3 as iio +import skimage.color +import skimage.filters +import matplotlib.pyplot as plt +import ipympl +%matplotlib widget + +bacteria_image = iio.imread(uri="data/colonies-01.tif") + +# display the image +fig, ax = plt.subplots() +plt.imshow(bacteria_image) +``` + +![](fig/colonies-01.jpg){alt='Colony image 1'} + +Next, we need to threshold the image to create a mask that covers only +the dark bacterial colonies. +This is easier using a grayscale image, so we convert it here: + +```python +gray_bacteria = skimage.color.rgb2gray(bacteria_image) + +# display the gray image +fig, ax = plt.subplots() +plt.imshow(gray_bacteria, cmap="gray") +``` + +![](fig/colonies-01-gray.png){alt='Gray Colonies'} + +Next, we blur the image and create a histogram: + +```python +blurred_image = skimage.filters.gaussian(gray_bacteria, sigma=1.0) +histogram, bin_edges = np.histogram(blurred_image, bins=256, range=(0.0, 1.0)) +fig, ax = plt.subplots() +plt.plot(bin_edges[0:-1], histogram) +plt.title("Graylevel histogram") +plt.xlabel("gray value") +plt.ylabel("pixel count") +plt.xlim(0, 1.0) +``` + +![](fig/colonies-01-histogram.png){alt='Histogram image'} + +In this histogram, we see three peaks - +the left one (i.e. the darkest pixels) is our colonies, +the central peak is the yellow/brown culture medium in the dish, +and the right one (i.e. the brightest pixels) is the white image background. +Therefore, we choose a threshold that selects the small left peak: + +```python +mask = blurred_image < 0.2 +fig, ax = plt.subplots() +plt.imshow(mask, cmap="gray") +``` + +![](fig/colonies-01-mask.png){alt='Colony mask image'} + +This mask shows us where the colonies are in the image - +but how can we count how many there are? +This requires connected component analysis: + +```python +labeled_image, count = skimage.measure.label(mask, return_num=True) +print(count) +``` + +Finally, we create the summary image of the coloured colonies on top of +the grayscale image: + +```python +# color each of the colonies a different color +colored_label_image = skimage.color.label2rgb(labeled_image, bg_label=0) +# give our grayscale image rgb channels, so we can add the colored colonies +summary_image = skimage.color.gray2rgb(gray_bacteria) +summary_image[mask] = colored_label_image[mask] + +# plot overlay +fig, ax = plt.subplots() +plt.imshow(summary_image) +``` + +![](fig/colonies-01-summary.png){alt='Sample morphometric output'} + +Now that we've completed the task for one image, +we need to repeat this for the remaining two images. +This is a good point to collect the lines above into a re-usable function: + +```python +def count_colonies(image_filename): + bacteria_image = iio.imread(image_filename) + gray_bacteria = skimage.color.rgb2gray(bacteria_image) + blurred_image = skimage.filters.gaussian(gray_bacteria, sigma=1.0) + mask = blurred_image < 0.2 + labeled_image, count = skimage.measure.label(mask, return_num=True) + print(f"There are {count} colonies in {image_filename}") + + colored_label_image = skimage.color.label2rgb(labeled_image, bg_label=0) + summary_image = skimage.color.gray2rgb(gray_bacteria) + summary_image[mask] = colored_label_image[mask] + fig, ax = plt.subplots() + plt.imshow(summary_image) +``` + +Now we can do this analysis on all the images via a for loop: + +```python +for image_filename in ["data/colonies-01.tif", "data/colonies-02.tif", "data/colonies-03.tif"]: + count_colonies(image_filename=image_filename) +``` + +![](fig/colonies-01-summary.png){alt='Colony 1 output'} +![](fig/colonies-02-summary.png){alt='Colony 2 output'} +![](fig/colonies-03-summary.png){alt='Colony 3 output'} + +You'll notice that for the images with more colonies, the results aren't perfect. +For example, some small colonies are missing, +and there are likely some small black spots being labelled incorrectly as colonies. +You could expand this solution to, for example, +use an automatically determined threshold for each image, +which may fit each better. +Also, you could filter out colonies below a certain size +(as we did in [the *Connected Component Analysis* episode](08-connected-components.md)). +You'll also see that some touching colonies are merged into one big colony. +This could be fixed with more complicated segmentation methods +(outside of the scope of this lesson) like +[watershed](https://scikit-image.org/docs/dev/auto_examples/segmentation/plot_watershed.html). + + + +::::::::::::::::::::::::: + +:::::::::::::::::::::::::::::::::::::::::::::::::: + +:::::::::::::::::::::::::::::::::::::::: keypoints + +- Using thresholding, connected component analysis and other tools we can automatically segment images of bacterial colonies. +- These methods are useful for many scientific problems, especially those involving morphometrics. + +:::::::::::::::::::::::::::::::::::::::::::::::::: + + diff --git a/data/beads.jpg b/episodes/data/beads.jpg similarity index 100% rename from data/beads.jpg rename to episodes/data/beads.jpg diff --git a/data/board.jpg b/episodes/data/board.jpg similarity index 100% rename from data/board.jpg rename to episodes/data/board.jpg diff --git a/data/centers.txt b/episodes/data/centers.txt similarity index 100% rename from data/centers.txt rename to episodes/data/centers.txt diff --git a/data/chair.jpg b/episodes/data/chair.jpg similarity index 100% rename from data/chair.jpg rename to episodes/data/chair.jpg diff --git a/data/colonies-01.tif b/episodes/data/colonies-01.tif similarity index 100% rename from data/colonies-01.tif rename to episodes/data/colonies-01.tif diff --git a/data/colonies-02.tif b/episodes/data/colonies-02.tif similarity index 100% rename from data/colonies-02.tif rename to episodes/data/colonies-02.tif diff --git a/data/colonies-03.tif b/episodes/data/colonies-03.tif similarity index 100% rename from data/colonies-03.tif rename to episodes/data/colonies-03.tif diff --git a/data/eight.tif b/episodes/data/eight.tif similarity index 100% rename from data/eight.tif rename to episodes/data/eight.tif diff --git a/data/gaussian-original.png b/episodes/data/gaussian-original.png similarity index 100% rename from data/gaussian-original.png rename to episodes/data/gaussian-original.png diff --git a/data/maize-root-cluster.jpg b/episodes/data/maize-root-cluster.jpg similarity index 100% rename from data/maize-root-cluster.jpg rename to episodes/data/maize-root-cluster.jpg diff --git a/data/maize-roots-grayscale.jpg b/episodes/data/maize-roots-grayscale.jpg similarity index 100% rename from data/maize-roots-grayscale.jpg rename to episodes/data/maize-roots-grayscale.jpg diff --git a/data/maize-seedlings.tif b/episodes/data/maize-seedlings.tif similarity index 100% rename from data/maize-seedlings.tif rename to episodes/data/maize-seedlings.tif diff --git a/data/plant-seedling.jpg b/episodes/data/plant-seedling.jpg similarity index 100% rename from data/plant-seedling.jpg rename to episodes/data/plant-seedling.jpg diff --git a/data/remote-control.jpg b/episodes/data/remote-control.jpg similarity index 100% rename from data/remote-control.jpg rename to episodes/data/remote-control.jpg diff --git a/data/shapes-01.jpg b/episodes/data/shapes-01.jpg similarity index 100% rename from data/shapes-01.jpg rename to episodes/data/shapes-01.jpg diff --git a/data/shapes-02.jpg b/episodes/data/shapes-02.jpg similarity index 100% rename from data/shapes-02.jpg rename to episodes/data/shapes-02.jpg diff --git a/data/sudoku.png b/episodes/data/sudoku.png similarity index 100% rename from data/sudoku.png rename to episodes/data/sudoku.png diff --git a/data/tree.jpg b/episodes/data/tree.jpg similarity index 100% rename from data/tree.jpg rename to episodes/data/tree.jpg diff --git a/data/trial-016.jpg b/episodes/data/trial-016.jpg similarity index 100% rename from data/trial-016.jpg rename to episodes/data/trial-016.jpg diff --git a/data/trial-020.jpg b/episodes/data/trial-020.jpg similarity index 100% rename from data/trial-020.jpg rename to episodes/data/trial-020.jpg diff --git a/data/trial-216.jpg b/episodes/data/trial-216.jpg similarity index 100% rename from data/trial-216.jpg rename to episodes/data/trial-216.jpg diff --git a/data/trial-293.jpg b/episodes/data/trial-293.jpg similarity index 100% rename from data/trial-293.jpg rename to episodes/data/trial-293.jpg diff --git a/data/wellplate-01.jpg b/episodes/data/wellplate-01.jpg similarity index 100% rename from data/wellplate-01.jpg rename to episodes/data/wellplate-01.jpg diff --git a/data/wellplate-02.tif b/episodes/data/wellplate-02.tif similarity index 100% rename from data/wellplate-02.tif rename to episodes/data/wellplate-02.tif diff --git a/fig/Gaussian_2D.png b/episodes/fig/Gaussian_2D.png similarity index 100% rename from fig/Gaussian_2D.png rename to episodes/fig/Gaussian_2D.png diff --git a/fig/Normal_Distribution_PDF.svg b/episodes/fig/Normal_Distribution_PDF.svg similarity index 100% rename from fig/Normal_Distribution_PDF.svg rename to episodes/fig/Normal_Distribution_PDF.svg diff --git a/fig/beads-canny-ui.png b/episodes/fig/beads-canny-ui.png similarity index 100% rename from fig/beads-canny-ui.png rename to episodes/fig/beads-canny-ui.png diff --git a/fig/beads-out.png b/episodes/fig/beads-out.png similarity index 100% rename from fig/beads-out.png rename to episodes/fig/beads-out.png diff --git a/fig/black-and-white-edge-pixels.jpg b/episodes/fig/black-and-white-edge-pixels.jpg similarity index 100% rename from fig/black-and-white-edge-pixels.jpg rename to episodes/fig/black-and-white-edge-pixels.jpg diff --git a/fig/black-and-white-gradient.png b/episodes/fig/black-and-white-gradient.png similarity index 100% rename from fig/black-and-white-gradient.png rename to episodes/fig/black-and-white-gradient.png diff --git a/fig/black-and-white.jpg b/episodes/fig/black-and-white.jpg similarity index 100% rename from fig/black-and-white.jpg rename to episodes/fig/black-and-white.jpg diff --git a/fig/blur-demo.gif b/episodes/fig/blur-demo.gif similarity index 100% rename from fig/blur-demo.gif rename to episodes/fig/blur-demo.gif diff --git a/fig/board-coordinates.jpg b/episodes/fig/board-coordinates.jpg similarity index 100% rename from fig/board-coordinates.jpg rename to episodes/fig/board-coordinates.jpg diff --git a/fig/board-final.jpg b/episodes/fig/board-final.jpg similarity index 100% rename from fig/board-final.jpg rename to episodes/fig/board-final.jpg diff --git a/fig/cartesian-coordinates.png b/episodes/fig/cartesian-coordinates.png similarity index 100% rename from fig/cartesian-coordinates.png rename to episodes/fig/cartesian-coordinates.png diff --git a/fig/cat-corner-blue.png b/episodes/fig/cat-corner-blue.png similarity index 100% rename from fig/cat-corner-blue.png rename to episodes/fig/cat-corner-blue.png diff --git a/fig/cat-eye-pixels.jpg b/episodes/fig/cat-eye-pixels.jpg similarity index 100% rename from fig/cat-eye-pixels.jpg rename to episodes/fig/cat-eye-pixels.jpg diff --git a/fig/cat.jpg b/episodes/fig/cat.jpg similarity index 100% rename from fig/cat.jpg rename to episodes/fig/cat.jpg diff --git a/fig/chair-layers-rgb.png b/episodes/fig/chair-layers-rgb.png similarity index 100% rename from fig/chair-layers-rgb.png rename to episodes/fig/chair-layers-rgb.png diff --git a/fig/chair-original.jpg b/episodes/fig/chair-original.jpg similarity index 100% rename from fig/chair-original.jpg rename to episodes/fig/chair-original.jpg diff --git a/fig/checkerboard-blue-channel.png b/episodes/fig/checkerboard-blue-channel.png similarity index 100% rename from fig/checkerboard-blue-channel.png rename to episodes/fig/checkerboard-blue-channel.png diff --git a/fig/checkerboard-green-channel.png b/episodes/fig/checkerboard-green-channel.png similarity index 100% rename from fig/checkerboard-green-channel.png rename to episodes/fig/checkerboard-green-channel.png diff --git a/fig/checkerboard-red-channel.png b/episodes/fig/checkerboard-red-channel.png similarity index 100% rename from fig/checkerboard-red-channel.png rename to episodes/fig/checkerboard-red-channel.png diff --git a/fig/checkerboard.png b/episodes/fig/checkerboard.png similarity index 100% rename from fig/checkerboard.png rename to episodes/fig/checkerboard.png diff --git a/fig/colonies-01-gray.png b/episodes/fig/colonies-01-gray.png similarity index 100% rename from fig/colonies-01-gray.png rename to episodes/fig/colonies-01-gray.png diff --git a/fig/colonies-01-histogram.png b/episodes/fig/colonies-01-histogram.png similarity index 100% rename from fig/colonies-01-histogram.png rename to episodes/fig/colonies-01-histogram.png diff --git a/fig/colonies-01-mask.png b/episodes/fig/colonies-01-mask.png similarity index 100% rename from fig/colonies-01-mask.png rename to episodes/fig/colonies-01-mask.png diff --git a/fig/colonies-01-summary.png b/episodes/fig/colonies-01-summary.png similarity index 100% rename from fig/colonies-01-summary.png rename to episodes/fig/colonies-01-summary.png diff --git a/fig/colonies-01.jpg b/episodes/fig/colonies-01.jpg similarity index 100% rename from fig/colonies-01.jpg rename to episodes/fig/colonies-01.jpg diff --git a/fig/colonies-02-summary.png b/episodes/fig/colonies-02-summary.png similarity index 100% rename from fig/colonies-02-summary.png rename to episodes/fig/colonies-02-summary.png diff --git a/fig/colonies-02.jpg b/episodes/fig/colonies-02.jpg similarity index 100% rename from fig/colonies-02.jpg rename to episodes/fig/colonies-02.jpg diff --git a/fig/colonies-03-summary.png b/episodes/fig/colonies-03-summary.png similarity index 100% rename from fig/colonies-03-summary.png rename to episodes/fig/colonies-03-summary.png diff --git a/fig/colonies-03.jpg b/episodes/fig/colonies-03.jpg similarity index 100% rename from fig/colonies-03.jpg rename to episodes/fig/colonies-03.jpg diff --git a/fig/colonies01.png b/episodes/fig/colonies01.png similarity index 100% rename from fig/colonies01.png rename to episodes/fig/colonies01.png diff --git a/fig/colony-mask.png b/episodes/fig/colony-mask.png similarity index 100% rename from fig/colony-mask.png rename to episodes/fig/colony-mask.png diff --git a/fig/colour-table.png b/episodes/fig/colour-table.png similarity index 100% rename from fig/colour-table.png rename to episodes/fig/colour-table.png diff --git a/fig/combination.png b/episodes/fig/combination.png similarity index 100% rename from fig/combination.png rename to episodes/fig/combination.png diff --git a/fig/drawing-practice.jpg b/episodes/fig/drawing-practice.jpg similarity index 100% rename from fig/drawing-practice.jpg rename to episodes/fig/drawing-practice.jpg diff --git a/fig/eight.png b/episodes/fig/eight.png similarity index 100% rename from fig/eight.png rename to episodes/fig/eight.png diff --git a/fig/five.png b/episodes/fig/five.png similarity index 100% rename from fig/five.png rename to episodes/fig/five.png diff --git a/fig/four-maize-roots-binary-improved.jpg b/episodes/fig/four-maize-roots-binary-improved.jpg similarity index 100% rename from fig/four-maize-roots-binary-improved.jpg rename to episodes/fig/four-maize-roots-binary-improved.jpg diff --git a/fig/four-maize-roots-binary.jpg b/episodes/fig/four-maize-roots-binary.jpg similarity index 100% rename from fig/four-maize-roots-binary.jpg rename to episodes/fig/four-maize-roots-binary.jpg diff --git a/fig/four-maize-roots.jpg b/episodes/fig/four-maize-roots.jpg similarity index 100% rename from fig/four-maize-roots.jpg rename to episodes/fig/four-maize-roots.jpg diff --git a/fig/gaussian-blurred.png b/episodes/fig/gaussian-blurred.png similarity index 100% rename from fig/gaussian-blurred.png rename to episodes/fig/gaussian-blurred.png diff --git a/fig/gaussian-kernel.png b/episodes/fig/gaussian-kernel.png similarity index 100% rename from fig/gaussian-kernel.png rename to episodes/fig/gaussian-kernel.png diff --git a/fig/grayscale.png b/episodes/fig/grayscale.png similarity index 100% rename from fig/grayscale.png rename to episodes/fig/grayscale.png diff --git a/fig/image-coordinates.png b/episodes/fig/image-coordinates.png similarity index 100% rename from fig/image-coordinates.png rename to episodes/fig/image-coordinates.png diff --git a/fig/jupyter_overview.png b/episodes/fig/jupyter_overview.png similarity index 100% rename from fig/jupyter_overview.png rename to episodes/fig/jupyter_overview.png diff --git a/fig/left-hand-coordinates.png b/episodes/fig/left-hand-coordinates.png similarity index 100% rename from fig/left-hand-coordinates.png rename to episodes/fig/left-hand-coordinates.png diff --git a/fig/maize-root-cluster-histogram.png b/episodes/fig/maize-root-cluster-histogram.png similarity index 100% rename from fig/maize-root-cluster-histogram.png rename to episodes/fig/maize-root-cluster-histogram.png diff --git a/fig/maize-root-cluster-mask.png b/episodes/fig/maize-root-cluster-mask.png similarity index 100% rename from fig/maize-root-cluster-mask.png rename to episodes/fig/maize-root-cluster-mask.png diff --git a/fig/maize-root-cluster-selected.png b/episodes/fig/maize-root-cluster-selected.png similarity index 100% rename from fig/maize-root-cluster-selected.png rename to episodes/fig/maize-root-cluster-selected.png diff --git a/fig/maize-root-cluster-threshold.jpg b/episodes/fig/maize-root-cluster-threshold.jpg similarity index 100% rename from fig/maize-root-cluster-threshold.jpg rename to episodes/fig/maize-root-cluster-threshold.jpg diff --git a/fig/maize-roots-threshold.png b/episodes/fig/maize-roots-threshold.png similarity index 100% rename from fig/maize-roots-threshold.png rename to episodes/fig/maize-roots-threshold.png diff --git a/fig/maize-seedling-enlarged.jpg b/episodes/fig/maize-seedling-enlarged.jpg similarity index 100% rename from fig/maize-seedling-enlarged.jpg rename to episodes/fig/maize-seedling-enlarged.jpg diff --git a/fig/maize-seedling-original.jpg b/episodes/fig/maize-seedling-original.jpg similarity index 100% rename from fig/maize-seedling-original.jpg rename to episodes/fig/maize-seedling-original.jpg diff --git a/fig/maize-seedlings-mask.png b/episodes/fig/maize-seedlings-mask.png similarity index 100% rename from fig/maize-seedlings-mask.png rename to episodes/fig/maize-seedlings-mask.png diff --git a/fig/maize-seedlings-masked.jpg b/episodes/fig/maize-seedlings-masked.jpg similarity index 100% rename from fig/maize-seedlings-masked.jpg rename to episodes/fig/maize-seedlings-masked.jpg diff --git a/fig/maize-seedlings.jpg b/episodes/fig/maize-seedlings.jpg similarity index 100% rename from fig/maize-seedlings.jpg rename to episodes/fig/maize-seedlings.jpg diff --git a/fig/plant-seedling-colour-histogram.png b/episodes/fig/plant-seedling-colour-histogram.png similarity index 100% rename from fig/plant-seedling-colour-histogram.png rename to episodes/fig/plant-seedling-colour-histogram.png diff --git a/fig/plant-seedling-grayscale-histogram-mask.png b/episodes/fig/plant-seedling-grayscale-histogram-mask.png similarity index 100% rename from fig/plant-seedling-grayscale-histogram-mask.png rename to episodes/fig/plant-seedling-grayscale-histogram-mask.png diff --git a/fig/plant-seedling-grayscale-histogram.png b/episodes/fig/plant-seedling-grayscale-histogram.png similarity index 100% rename from fig/plant-seedling-grayscale-histogram.png rename to episodes/fig/plant-seedling-grayscale-histogram.png diff --git a/fig/plant-seedling-grayscale.png b/episodes/fig/plant-seedling-grayscale.png similarity index 100% rename from fig/plant-seedling-grayscale.png rename to episodes/fig/plant-seedling-grayscale.png diff --git a/fig/quality-histogram.jpg b/episodes/fig/quality-histogram.jpg similarity index 100% rename from fig/quality-histogram.jpg rename to episodes/fig/quality-histogram.jpg diff --git a/fig/quality-jpg.jpg b/episodes/fig/quality-jpg.jpg similarity index 100% rename from fig/quality-jpg.jpg rename to episodes/fig/quality-jpg.jpg diff --git a/fig/quality-original.jpg b/episodes/fig/quality-original.jpg similarity index 100% rename from fig/quality-original.jpg rename to episodes/fig/quality-original.jpg diff --git a/fig/quality-tif.jpg b/episodes/fig/quality-tif.jpg similarity index 100% rename from fig/quality-tif.jpg rename to episodes/fig/quality-tif.jpg diff --git a/fig/rectangle-gaussian-blurred.png b/episodes/fig/rectangle-gaussian-blurred.png similarity index 100% rename from fig/rectangle-gaussian-blurred.png rename to episodes/fig/rectangle-gaussian-blurred.png diff --git a/fig/remote-control-masked.jpg b/episodes/fig/remote-control-masked.jpg similarity index 100% rename from fig/remote-control-masked.jpg rename to episodes/fig/remote-control-masked.jpg diff --git a/fig/shapes-01-areas-histogram.png b/episodes/fig/shapes-01-areas-histogram.png similarity index 100% rename from fig/shapes-01-areas-histogram.png rename to episodes/fig/shapes-01-areas-histogram.png diff --git a/fig/shapes-01-canny-edge-output.png b/episodes/fig/shapes-01-canny-edge-output.png similarity index 100% rename from fig/shapes-01-canny-edge-output.png rename to episodes/fig/shapes-01-canny-edge-output.png diff --git a/fig/shapes-01-canny-edges.png b/episodes/fig/shapes-01-canny-edges.png similarity index 100% rename from fig/shapes-01-canny-edges.png rename to episodes/fig/shapes-01-canny-edges.png diff --git a/fig/shapes-01-canny-track-edges.png b/episodes/fig/shapes-01-canny-track-edges.png similarity index 100% rename from fig/shapes-01-canny-track-edges.png rename to episodes/fig/shapes-01-canny-track-edges.png diff --git a/fig/shapes-01-cca-detail.png b/episodes/fig/shapes-01-cca-detail.png similarity index 100% rename from fig/shapes-01-cca-detail.png rename to episodes/fig/shapes-01-cca-detail.png diff --git a/fig/shapes-01-filtered-objects.png b/episodes/fig/shapes-01-filtered-objects.png similarity index 100% rename from fig/shapes-01-filtered-objects.png rename to episodes/fig/shapes-01-filtered-objects.png diff --git a/fig/shapes-01-grayscale.png b/episodes/fig/shapes-01-grayscale.png similarity index 100% rename from fig/shapes-01-grayscale.png rename to episodes/fig/shapes-01-grayscale.png diff --git a/fig/shapes-01-histogram.png b/episodes/fig/shapes-01-histogram.png similarity index 100% rename from fig/shapes-01-histogram.png rename to episodes/fig/shapes-01-histogram.png diff --git a/fig/shapes-01-labeled.png b/episodes/fig/shapes-01-labeled.png similarity index 100% rename from fig/shapes-01-labeled.png rename to episodes/fig/shapes-01-labeled.png diff --git a/fig/shapes-01-mask.png b/episodes/fig/shapes-01-mask.png similarity index 100% rename from fig/shapes-01-mask.png rename to episodes/fig/shapes-01-mask.png diff --git a/fig/shapes-01-objects-coloured-by-area.png b/episodes/fig/shapes-01-objects-coloured-by-area.png similarity index 100% rename from fig/shapes-01-objects-coloured-by-area.png rename to episodes/fig/shapes-01-objects-coloured-by-area.png diff --git a/fig/shapes-01-selected.png b/episodes/fig/shapes-01-selected.png similarity index 100% rename from fig/shapes-01-selected.png rename to episodes/fig/shapes-01-selected.png diff --git a/fig/shapes-02-histogram.png b/episodes/fig/shapes-02-histogram.png similarity index 100% rename from fig/shapes-02-histogram.png rename to episodes/fig/shapes-02-histogram.png diff --git a/fig/shapes-02-mask.png b/episodes/fig/shapes-02-mask.png similarity index 100% rename from fig/shapes-02-mask.png rename to episodes/fig/shapes-02-mask.png diff --git a/fig/shapes-02-selected.png b/episodes/fig/shapes-02-selected.png similarity index 100% rename from fig/shapes-02-selected.png rename to episodes/fig/shapes-02-selected.png diff --git a/fig/sudoku-gray.png b/episodes/fig/sudoku-gray.png similarity index 100% rename from fig/sudoku-gray.png rename to episodes/fig/sudoku-gray.png diff --git a/fig/three-colours.png b/episodes/fig/three-colours.png similarity index 100% rename from fig/three-colours.png rename to episodes/fig/three-colours.png diff --git a/fig/wellplate-01-masked.jpg b/episodes/fig/wellplate-01-masked.jpg similarity index 100% rename from fig/wellplate-01-masked.jpg rename to episodes/fig/wellplate-01-masked.jpg diff --git a/fig/wellplate-02-histogram.png b/episodes/fig/wellplate-02-histogram.png similarity index 100% rename from fig/wellplate-02-histogram.png rename to episodes/fig/wellplate-02-histogram.png diff --git a/fig/wellplate-02-masked.jpg b/episodes/fig/wellplate-02-masked.jpg similarity index 100% rename from fig/wellplate-02-masked.jpg rename to episodes/fig/wellplate-02-masked.jpg diff --git a/fig/wellplate-02.jpg b/episodes/fig/wellplate-02.jpg similarity index 100% rename from fig/wellplate-02.jpg rename to episodes/fig/wellplate-02.jpg diff --git a/fig/zero.png b/episodes/fig/zero.png similarity index 100% rename from fig/zero.png rename to episodes/fig/zero.png diff --git a/index.md b/index.md index 93e1856bb..27d4cf7f9 100644 --- a/index.md +++ b/index.md @@ -1,21 +1,26 @@ --- -layout: lesson -root: . +site: sandpaper::sandpaper_site --- This lesson shows how to use Python and skimage to do basic image processing. -> ## Prerequisites -> -> This lesson assumes you have a working knowledge of Python and some previous exposure to the Bash shell. -> These requirements can be fulfilled by: -> a) completing a Software Carpentry Python workshop **or** -> b) completing a Data Carpentry Ecology workshop (with Python) **and** a Data Carpentry Genomics workshop **or** -> c) independent exposure to both Python and the Bash shell. -> -> If you're unsure whether you have enough experience to participate in this workshop, please read over -> [this detailed list]({{ page.root }}{% link _extras/prereqs.md %}), which gives all of the functions, operators, and other concepts you will need -> to be familiar with. -{: .prereq} - -Before following the lesson, please [make sure you have the software and data required]({{ page.root }}{% link setup.md %}). +:::::::::::::::::::::::::::::::::::::::::: prereq + +## Prerequisites + +This lesson assumes you have a working knowledge of Python and some previous exposure to the Bash shell. +These requirements can be fulfilled by: +a) completing a Software Carpentry Python workshop **or** +b) completing a Data Carpentry Ecology workshop (with Python) **and** a Data Carpentry Genomics workshop **or** +c) independent exposure to both Python and the Bash shell. + +If you're unsure whether you have enough experience to participate in this workshop, please read over +[this detailed list](instructors/prereqs.md), which gives all of the functions, operators, and other concepts you will need +to be familiar with. + + +:::::::::::::::::::::::::::::::::::::::::::::::::: + +Before following the lesson, please [make sure you have the software and data required](learners/setup.md). + + diff --git a/_extras/edge-detection.md b/instructors/edge-detection.md similarity index 64% rename from _extras/edge-detection.md rename to instructors/edge-detection.md index c3df2b3ec..81ef3e144 100644 --- a/_extras/edge-detection.md +++ b/instructors/edge-detection.md @@ -1,19 +1,18 @@ --- -title: "Extra Episode: Edge Detection" -layout: episode +title: 'Extra Episode: Edge Detection' teaching: ?? exercises: ?? -questions: -- "How can we automatically detect the edges of the objects in an image?" +questions: How can we automatically detect the edges of the objects in an image? objectives: -- "Apply Canny edge detection to an image." -- "Explain how we can use sliders to expedite finding appropriate parameter -values for our skimage function calls." -- "Create skimage windows with sliders and associated callback functions." +- Apply Canny edge detection to an image. +- Explain how we can use sliders to expedite finding appropriate parameter values + for our skimage function calls. +- Create skimage windows with sliders and associated callback functions. keypoints: -- "The `skimage.viewer.ImageViewer` is extended using a `skimage.viewer.plugins.Plugin`." -- "We supply a filter function callback when creating a Plugin." -- "Parameters of the callback function are manipulated interactively by creating sliders with the `skimage.viewer.widgets.slider()` function and adding them to the plugin." +- The `skimage.viewer.ImageViewer` is extended using a `skimage.viewer.plugins.Plugin`. +- We supply a filter function callback when creating a Plugin. +- Parameters of the callback function are manipulated interactively by creating sliders + with the `skimage.viewer.widgets.slider()` function and adding them to the plugin. --- In this episode, we will learn how to use skimage functions to apply *edge @@ -32,7 +31,7 @@ For example, once we have found the edges of the objects in the image (or once we have converted the image to binary using thresholding), we can use that information to find the image *contours*, which we will learn about in -[the _Connected Component Analysis_ episode]({{ page.root }}{% link _episodes/08-connected-components.md %}). +[the *Connected Component Analysis* episode](../episodes/08-connected-components.md). With the contours, we can do things like counting the number of objects in the image, measure the size of the objects, classify the shapes of the objects, and so on. @@ -48,7 +47,7 @@ let us look at an image with a very simple edge - this grayscale image of two overlapped pieces of paper, one black and and one white: -![Black and white image](../fig/black-and-white.jpg) +![](fig/black-and-white.jpg){alt='Black and white image'} The obvious edge in the image is the vertical line between the black paper and the white paper. @@ -59,7 +58,7 @@ But, at a pixel-by-pixel level, is the transition really that sudden? If we zoom in on the edge more closely, as in this image, we can see that the edge between the black and white areas of the image is not a clear-cut line. -![Black and white edge pixels](../fig/black-and-white-edge-pixels.jpg) +![](fig/black-and-white-edge-pixels.jpg){alt='Black and white edge pixels'} We can learn more about the edge by examining the colour values of some of the pixels. Imagine a short line segment, @@ -68,7 +67,7 @@ This plot shows the pixel values (between 0 and 255, since this is a grayscale image) for forty pixels spanning the transition from black to white. -![Gradient near transition](../fig/black-and-white-gradient.png) +![](fig/black-and-white-gradient.png){alt='Gradient near transition'} It is obvious that the "edge" here is not so sudden! So, any skimage method to detect edges in an image must be able to @@ -82,35 +81,35 @@ This method uses a series of steps, some incorporating other types of edge detec The skimage `skimage.feature.canny()` function performs the following steps: 1. A Gaussian blur - (that is characterised by the `sigma` parameter, - see [_Blurring Images_]({{ page.root }}{% link _episodes/06-blurring.md %})) - is applied to remove noise from the image. - (So if we are doing edge detection via this function, - we should not perform our own blurring step.) + (that is characterised by the `sigma` parameter, + see [*Blurring Images*](../episodes/06-blurring.md) + is applied to remove noise from the image. + (So if we are doing edge detection via this function, + we should not perform our own blurring step.) 2. Sobel edge detection is performed on both the cx and ry dimensions, - to find the intensity gradients of the edges in the image. - Sobel edge detection computes - the derivative of a curve fitting the gradient between light and dark areas - in an image, and then finds the peak of the derivative, - which is interpreted as the location of an edge pixel. + to find the intensity gradients of the edges in the image. + Sobel edge detection computes + the derivative of a curve fitting the gradient between light and dark areas + in an image, and then finds the peak of the derivative, + which is interpreted as the location of an edge pixel. 3. Pixels that would be highlighted, but seem too far from any edge, - are removed. - This is called *non-maximum suppression*, and - the result is edge lines that are thinner than those produced by other methods. + are removed. + This is called *non-maximum suppression*, and + the result is edge lines that are thinner than those produced by other methods. 4. A double threshold is applied to determine potential edges. - Here extraneous pixels caused by noise or milder colour variation than desired - are eliminated. - If a pixel's gradient value - based on the Sobel differential - - is above the high threshold value, - it is considered a strong candidate for an edge. - If the gradient is below the low threshold value, it is turned off. - If the gradient is in between, - the pixel is considered a weak candidate for an edge pixel. + Here extraneous pixels caused by noise or milder colour variation than desired + are eliminated. + If a pixel's gradient value - based on the Sobel differential - + is above the high threshold value, + it is considered a strong candidate for an edge. + If the gradient is below the low threshold value, it is turned off. + If the gradient is in between, + the pixel is considered a weak candidate for an edge pixel. 5. Final detection of edges is performed using *hysteresis*. - Here, weak candidate pixels are examined, and - if they are connected to strong candidate pixels, - they are considered to be edge pixels; - the remaining, non-connected weak candidates are turned off. + Here, weak candidate pixels are examined, and + if they are connected to strong candidate pixels, + they are considered to be edge pixels; + the remaining, non-connected weak candidates are turned off. For a user of the `skimage.feature.canny()` edge detection function, there are three important parameters to pass in: @@ -123,9 +122,9 @@ The following program illustrates how the `skimage.feature.canny()` method can be used to detect the edges in an image. We will execute the program on the `data/shapes-01.jpg` image, which we used before in -[the _Thresholding_ episode]({{ page.root }}{% link _episodes/07-thresholding.md %}): +[the *Thresholding* episode](../episodes/07-thresholding.md): -![coloured shapes](../data/shapes-01.jpg) +![](data/shapes-01.jpg){alt='coloured shapes'} We are interested in finding the edges of the shapes in the image, and so the colours are not important. @@ -143,7 +142,7 @@ After the required libraries are imported, the program reads the command-line arguments and saves them in their respective variables. -~~~ +```python """ * Python script to demonstrate Canny edge detection. * @@ -159,29 +158,26 @@ filename = sys.argv[1] sigma = float(sys.argv[2]) low_threshold = float(sys.argv[3]) high_threshold = float(sys.argv[4]) -~~~ -{: .language-python} +``` Next, the original images is read, in grayscale, and displayed. -~~~ +```python # load and display original image as grayscale image = iio.imread(uri=filename, mode="L") plt.imshow(image) -~~~ -{: .language-python} +``` Then, we apply Canny edge detection with this function call: -~~~ +```python edges = skimage.feature.canny( image=image, sigma=sigma, low_threshold=low_threshold, high_threshold=high_threshold, ) -~~~ -{: .language-python} +``` As we are using it here, the `skimage.feature.canny()` function takes four parameters. The first parameter is the input image. @@ -197,16 +193,15 @@ while everything else is black. Finally, the program displays the `edges` image, showing the edges that were found in the original. -~~~ +```python # display edges skimage.io.imshow(edges) -~~~ -{: .language-python} +``` Here is the result, for the coloured shape image above, with sigma value 2.0, low threshold value 0.1 and high threshold value 0.3: -![Output file of Canny edge detection](../fig/shapes-01-canny-edges.png) +![](fig/shapes-01-canny-edges.png){alt='Output file of Canny edge detection'} Note that the edge output shown in an skimage window may look significantly worse than the image would look @@ -214,8 +209,7 @@ if it were saved to a file due to resampling artefacts in the interactive image The image above is the edges of the junk image, saved in a PNG file. Here is how the same image looks when displayed in an skimage output window: -![Output window of Canny edge detection](../fig/shapes-01-canny-edge-output.png) - +![](fig/shapes-01-canny-edge-output.png){alt='Output window of Canny edge detection'} ## Interacting with the image viewer using viewer plugins @@ -228,7 +222,7 @@ based on the contents of the image(s) to be processed. Here is an image of some glass beads that we can use as input into a Canny edge detection program: -![Beads image](../data/beads.jpg) +![](data/beads.jpg){alt='Beads image'} We could use the `code/edge-detection/CannyEdge.py` program above to find edges in this image. @@ -242,7 +236,7 @@ create a viewer plugin that uses skimage *sliders*, that allow us to vary the function parameters while the program is running. In other words, we can write a program that presents us with a window like this: -![Canny UI](../fig/beads-canny-ui.png) +![](fig/beads-canny-ui.png){alt='Canny UI'} Then, when we run the program, we can use the sliders to vary the values of the sigma and threshold parameters @@ -260,10 +254,10 @@ The added complexity comes from setting up the sliders for the parameters that were previously read from the command line: In particular, we have added -* The `canny()` filter function that returns an edge image, -* The `cannyPlugin` plugin object, to which we add -* The sliders for *sigma*, and *low* and *high threshold* values, and -* The main program, i.e., the code that is executed when the program runs. +- The `canny()` filter function that returns an edge image, +- The `cannyPlugin` plugin object, to which we add +- The sliders for *sigma*, and *low* and *high threshold* values, and +- The main program, i.e., the code that is executed when the program runs. We will look at the main program part first, and then return to writing the plugin. The first several lines of the main program are easily recognizable at this point: @@ -271,7 +265,7 @@ saving the command-line argument, reading the image in grayscale, and creating a window. -~~~ +```python """ * Python script to demonstrate Canny edge detection * with sliders to adjust the thresholds. @@ -288,8 +282,7 @@ import sys filename = sys.argv[1] image = iio.imread(uri=filename, mode="L") viewer = plt.imshow(image) -~~~ -{: .language-python} +``` The `skimage.viewer.plugins.Plugin` class is designed to manipulate images. It takes an `image_filter` argument in the constructor that should be a function. @@ -297,12 +290,11 @@ This function should produce a new image as an output, given an image as the first argument, which then will be automatically displayed in the image viewer. -~~~ +```python # Create the plugin and give it a name canny_plugin = skimage.viewer.plugins.Plugin(image_filter=skimage.feature.canny) canny_plugin.name = "Canny Filter Plugin" -~~~ -{: .language-python} +``` We want to interactively modify the parameters of the filter function interactively. Skimage allows us to further enrich the plugin by adding widgets, like @@ -314,7 +306,7 @@ the filter function is called with the updated parameters. This function is also called a callback function. The following code adds sliders for `sigma`, `low_threshold` and `high_thresholds`. -~~~ +```python # Add sliders for the parameters canny_plugin += skimage.viewer.widgets.Slider( name="sigma", low=0.0, high=7.0, value=2.0 @@ -325,8 +317,7 @@ canny_plugin += skimage.viewer.widgets.Slider( canny_plugin += skimage.viewer.widgets.Slider( name="high_threshold", low=0.0, high=1.0, value=0.2 ) -~~~ -{: .language-python} +``` A slider is a widget that lets you choose a number by dragging a handle along a line. On the left side of the line, we have the lowest value, @@ -341,121 +332,139 @@ so where the handle is located when the plugin is started. Adding the slider to the plugin makes the values available as parameters to the `filter_function`. -> ## How does the plugin know how to call the filter function with the parameters? -> -> The filter function will be called with the slider parameters -> according to their *names* as *keyword* arguments. -> So it is very important to name the sliders appropriately. -{: .callout} +::::::::::::::::::::::::::::::::::::::::: callout + +## How does the plugin know how to call the filter function with the parameters? + +The filter function will be called with the slider parameters +according to their *names* as *keyword* arguments. +So it is very important to name the sliders appropriately. + + +:::::::::::::::::::::::::::::::::::::::::::::::::: Finally, we add the plugin the viewer and display the resulting user interface: -~~~ +```python # add the plugin to the viewer and show the window viewer += canny_plugin viewer.show() -~~~ -{: .language-python} +``` Here is the result of running the preceding program on the beads image, with a sigma value 1.0, low threshold value 0.1 and high threshold value 0.3. The image shows the edges in an output file. -![Beads edges (file)](../fig/beads-out.png) - -> ## Applying Canny edge detection to another image (5 min) -> -> Now, run the program above on the image of coloured shapes, -> `data/shapes-01.jpg`. -> Use a sigma of 1.0 and adjust low and high threshold sliders -> to produce an edge image that looks like this: -> -> ![coloured shape edges](../fig/shapes-01-canny-track-edges.png) -> -> What values for the low and high threshold values did you use to -> produce an image similar to the one above? -> -> > ## Solution -> > -> > The coloured shape edge image above was produced with a low threshold -> > value of 0.05 and a high threshold value of 0.07. -> > You may be able to achieve similar results with other threshold values. -> {: .solution} -{: .challenge} - -> ## Using sliders for thresholding (30 min) -> -> Now, let us apply what we know about creating sliders to another, -> similar situation. -> Consider this image of a collection of maize seedlings, -> and suppose we wish to use simple fixed-level thresholding to -> mask out everything that is not part of one of the plants. -> -> ![Maize roots image](../data/maize-roots-grayscale.jpg) -> -> To perform the thresholding, we could first create a histogram, -> then examine it, and select an appropriate threshold value. -> Here, however, let us create an application with a slider to set the threshold value. -> Create a program that reads in the image, -> displays it in a window with a slider, -> and allows the slider value to vary the threshold value used. -> You will find the image at `data/maize-roots-grayscale.jpg`. -> -> > ## Solution -> > -> > Here is a program that uses a slider to vary the threshold value used in -> > a simple, fixed-level thresholding process. -> > -> > ~~~ -> > """ -> > * Python program to use a slider to control fixed-level -> > * thresholding value. -> > * -> > * usage: python interactive_thresholding.py -> > """ -> > -> > import imageio.v3 as iio -> > import skimage -> > import skimage.viewer -> > import sys -> > -> > filename = sys.argv[1] -> > -> > -> > def filter_function(image, sigma, threshold): -> > masked = image.copy() -> > masked[skimage.filters.gaussian(image, sigma=sigma) <= threshold] = 0 -> > return masked -> > -> > -> > smooth_threshold_plugin = skimage.viewer.plugins.Plugin( -> > image_filter=filter_function -> > ) -> > -> > smooth_threshold_plugin.name = "Smooth and Threshold Plugin" -> > -> > smooth_threshold_plugin += skimage.viewer.widgets.Slider( -> > "sigma", low=0.0, high=7.0, value=1.0 -> > ) -> > smooth_threshold_plugin += skimage.viewer.widgets.Slider( -> > "threshold", low=0.0, high=1.0, value=0.5 -> > ) -> > -> > image = iio.imread(uri=filename, mode="L") -> > -> > viewer = skimage.viewer.ImageViewer(image=image) -> > viewer += smooth_threshold_plugin -> > viewer.show() -> > ~~~ -> > {: .language-python} -> > -> > Here is the output of the program, -> > blurring with a sigma of 1.5 and a threshold value of 0.45: -> > -> > ![Thresholded maize roots](../fig/maize-roots-threshold.png) -> {: .solution} -{: .challenge} +![](fig/beads-out.png){alt='Beads edges (file)'} + +::::::::::::::::::::::::::::::::::::::: challenge + +## Applying Canny edge detection to another image (5 min) + +Now, run the program above on the image of coloured shapes, +`data/shapes-01.jpg`. +Use a sigma of 1.0 and adjust low and high threshold sliders +to produce an edge image that looks like this: + +![](fig/shapes-01-canny-track-edges.png){alt='coloured shape edges'} + +What values for the low and high threshold values did you use to +produce an image similar to the one above? + +::::::::::::::: solution + +## Solution + +The coloured shape edge image above was produced with a low threshold +value of 0.05 and a high threshold value of 0.07. +You may be able to achieve similar results with other threshold values. + + + +::::::::::::::::::::::::: + +:::::::::::::::::::::::::::::::::::::::::::::::::: + +::::::::::::::::::::::::::::::::::::::: challenge + +## Using sliders for thresholding (30 min) + +Now, let us apply what we know about creating sliders to another, +similar situation. +Consider this image of a collection of maize seedlings, +and suppose we wish to use simple fixed-level thresholding to +mask out everything that is not part of one of the plants. + +![](data/maize-roots-grayscale.jpg){alt='Maize roots image'} + +To perform the thresholding, we could first create a histogram, +then examine it, and select an appropriate threshold value. +Here, however, let us create an application with a slider to set the threshold value. +Create a program that reads in the image, +displays it in a window with a slider, +and allows the slider value to vary the threshold value used. +You will find the image at `data/maize-roots-grayscale.jpg`. + +::::::::::::::: solution + +## Solution + +Here is a program that uses a slider to vary the threshold value used in +a simple, fixed-level thresholding process. + +```python +""" + * Python program to use a slider to control fixed-level + * thresholding value. + * + * usage: python interactive_thresholding.py +""" + +import imageio.v3 as iio +import skimage +import skimage.viewer +import sys + +filename = sys.argv[1] + + +def filter_function(image, sigma, threshold): + masked = image.copy() + masked[skimage.filters.gaussian(image, sigma=sigma) <= threshold] = 0 + return masked + + +smooth_threshold_plugin = skimage.viewer.plugins.Plugin( + image_filter=filter_function +) + +smooth_threshold_plugin.name = "Smooth and Threshold Plugin" + +smooth_threshold_plugin += skimage.viewer.widgets.Slider( + "sigma", low=0.0, high=7.0, value=1.0 +) +smooth_threshold_plugin += skimage.viewer.widgets.Slider( + "threshold", low=0.0, high=1.0, value=0.5 +) + +image = iio.imread(uri=filename, mode="L") + +viewer = skimage.viewer.ImageViewer(image=image) +viewer += smooth_threshold_plugin +viewer.show() +``` + +Here is the output of the program, +blurring with a sigma of 1.5 and a threshold value of 0.45: + +![](fig/maize-roots-threshold.png){alt='Thresholded maize roots'} + + + +::::::::::::::::::::::::: + +:::::::::::::::::::::::::::::::::::::::::::::::::: Keep this plugin technique in your image processing "toolbox." You can use sliders (or other interactive elements, @@ -474,3 +483,5 @@ As with blurring, there are other options for finding edges in skimage. These include `skimage.filters.sobel()`, which you will recognise as part of the Canny method. Another choice is `skimage.filters.laplace()`. + + diff --git a/_extras/guide.md b/instructors/instructor-notes.md similarity index 67% rename from _extras/guide.md rename to instructors/instructor-notes.md index 12bbd3d34..4201a2de9 100644 --- a/_extras/guide.md +++ b/instructors/instructor-notes.md @@ -1,57 +1,66 @@ --- -title: "Instructor Notes" +title: Instructor Notes --- ## Working with Jupyter notebooks -* This lesson is designed to be taught using Jupyter notebooks. We recommend that instructors guide learners to create a new Jupyter notebook for each episode. +- This lesson is designed to be taught using Jupyter notebooks. We recommend that instructors guide learners to create a new Jupyter notebook for each episode. -* Python `import` statements typically appear in the first code block near the top of each episode. In some cases, the purpose of specific libraries is briefly explained as part of the exercises. +- Python `import` statements typically appear in the first code block near the top of each episode. In some cases, the purpose of specific libraries is briefly explained as part of the exercises. -* The possibility of executing the code cells in a notebook in arbitrary order can cause confusion. Using the "restart kernel and run all cells" feature is one way to accomplish linear execution of the notebook and may help locate and identify coding issues. +- The possibility of executing the code cells in a notebook in arbitrary order can cause confusion. Using the "restart kernel and run all cells" feature is one way to accomplish linear execution of the notebook and may help locate and identify coding issues. -* Many episodes in this lesson load image files from disk. To avoid name clashes in episodes that load multiple image files, we have used unique variable names (instead of generic names such as `image` or `img`). When copying code snippets between exercises, the variable names may have to be changed. The maintainers are keen to receive feedback on whether this convention proves practical in workshops. +- Many episodes in this lesson load image files from disk. To avoid name clashes in episodes that load multiple image files, we have used unique variable names (instead of generic names such as `image` or `img`). When copying code snippets between exercises, the variable names may have to be changed. The maintainers are keen to receive feedback on whether this convention proves practical in workshops. ## Working with imageio and skimage -* `imageio.v3` allows to load images in different modes by passing the `mode=` argument to `imread()`. Depending on the image file and mode, the `dtype` of the resulting Numpy array can be different (e.g., `dtype('uint8')` or `dtype('float64')`. In the lesson, `skimage.util.img_as_ubyte()` and `skimage.util.img_as_float()` are used to convert the data type when necessary. +- `imageio.v3` allows to load images in different modes by passing the `mode=` argument to `imread()`. Depending on the image file and mode, the `dtype` of the resulting Numpy array can be different (e.g., `dtype('uint8')` or `dtype('float64')`. In the lesson, `skimage.util.img_as_ubyte()` and `skimage.util.img_as_float()` are used to convert the data type when necessary. -* Some `skimage` functions implicitly convert the pixel values to floating-point numbers. Several callout boxes have been added throughout the lesson to raise awareness, but this may still prompt questions from learners. +- Some `skimage` functions implicitly convert the pixel values to floating-point numbers. Several callout boxes have been added throughout the lesson to raise awareness, but this may still prompt questions from learners. -* In certain situations, `imread()` returns a read-only array. This depends on the image file type and on the backend (e.g., Pillow). If a read-only error is encountered, `image = np.array(image)` can be used to create a writable copy of the array before manipulating its pixel values. +- In certain situations, `imread()` returns a read-only array. This depends on the image file type and on the backend (e.g., Pillow). If a read-only error is encountered, `image = np.array(image)` can be used to create a writable copy of the array before manipulating its pixel values. -* Be aware that learners might get surprising results in the *Keeping only low intensity pixels* exercise, if `plt.imshow` is called without the `vmax` parameter. +- Be aware that learners might get surprising results in the *Keeping only low intensity pixels* exercise, if `plt.imshow` is called without the `vmax` parameter. A detailed explanation is given in the *Plotting single channel images (cmap, vmin, vmax)* callout box. ## Blurring -* Take care to avoid mixing up the term "edge" to describe the edges of objects - _within_ an image and the outer boundaries of the images themselves. Lack of a clear distinction here may be confusing for learners. +- Take care to avoid mixing up the term "edge" to describe the edges of objects + *within* an image and the outer boundaries of the images themselves. Lack of a clear distinction here may be confusing for learners. ## Questions from Learners ### Q: Where would I find out that coordinates are `x,y` not `r,c`? + A: In an image viewer, hover your cursor over top-left (origin) the move down and see which number increases. ### Q: Why does saving the image take such a long time? (skimage-images/saving images PNG example) + A: It is a large image. ### Q: Are the coordinates represented `x,y` or `r,c` in the code (e.g. in `array.shape`)? -A: Always `r,c` with numpy arrays, unless clearly specified otherwise - only represented `x,y` when image is displayed by a viewer. -Take home is don’t rely on it - always check! + +A: Always `r,c` with numpy arrays, unless clearly specified otherwise - only represented `x,y` when image is displayed by a viewer. +Take home is don't rely on it - always check! ### Q: What if I want to increase size? How does `skimage` upsample? (image resizing) -A: When resizing or rescaling an image, `skimage` performs interpolation to up-size or down-size the image. Technically, this is done by fitting a [spline](https://en.wikipedia.org/wiki/Spline_(mathematics)) function to the image data. The spline function is based on the intensity values in the original image and can be used to approximate the intensity at any given coordinate in the resized/rescaled image. Note that the intensity values in the new image are an approximation of the original values but should not be treated as the actual, observed data. `skimage.transform.resize` has a number of optional parameters that allow the user to control, e.g., the order of the spline interpolation. The [scikit-image documentation](https://scikit-image.org/docs/stable/api/skimage.transform.html#skimage.transform.resize) provides additional information on other parameters. -### Q: Why are some lines missing from the sudoku image when it is displayed inline in a Jupyter Notebook? (skimage-images/low intensity pixels exercise) +A: When resizing or rescaling an image, `skimage` performs interpolation to up-size or down-size the image. Technically, this is done by fitting a [spline](https://en.wikipedia.org/wiki/Spline_\(mathematics\)) function to the image data. The spline function is based on the intensity values in the original image and can be used to approximate the intensity at any given coordinate in the resized/rescaled image. Note that the intensity values in the new image are an approximation of the original values but should not be treated as the actual, observed data. `skimage.transform.resize` has a number of optional parameters that allow the user to control, e.g., the order of the spline interpolation. The [scikit-image documentation](https://scikit-image.org/docs/stable/api/skimage.transform.html#skimage.transform.resize) provides additional information on other parameters. + +### Q: Why are some lines missing from the sudoku image when it is displayed inline in a Jupyter Notebook? (skimage-images/low intensity pixels exercise) + A: They are actually present in image but not shown due to interpolation. ### Q: Does blurring take values of pixels already blurred, or is blurring done on original pixel values only? + A: Blurring is done on original pixel values only. ### Q: Can you blur while retaining edges? + A: Yes, many different filters/kernels exist, some of which are designed to be edge-preserving. ## Troubleshooting -Learners reported a problem on some operating systems, that Shift+Enter is prevented from running a cell in Jupyter when the caps lock key is active. +Learners reported a problem on some operating systems, that Shift\+Enter is prevented from running a cell in Jupyter when the caps lock key is active. + + diff --git a/instructors/prereqs.md b/instructors/prereqs.md new file mode 100644 index 000000000..a8e190f31 --- /dev/null +++ b/instructors/prereqs.md @@ -0,0 +1,55 @@ +--- +title: Prerequisites +--- + +This lesson assumes you have a working knowledge of Python and some previous exposure to the Bash shell. + +These requirements can be fulfilled by: + +1. completing a Software Carpentry Python workshop **or** +2. completing a Data Carpentry Ecology workshop (with Python) **and** a Data Carpentry Genomics workshop **or** +3. coursework in or independent learning of both Python and the Bash shell. + +### Bash shell skills + +The skill set listed below is covered in any Software Carpentry workshop, as well +as in Data Carpentry's Genomics workshop. These skills can also be learned +through coursework or independent learning. + +Be able to: + +- Identify and navigate to your home directory. +- Identify your current working directory. +- Navigating directories using `pwd`, `ls`, `cd `, and `cd ..` +- Run a Python script from the command line. + +### Python skills + +This skill set listed below is covered in both Software Carpentry's Python workshop and +in Data Carpentry's Ecology workshop with Python. These skills can also be learned +through coursework or independent learning. + +Be able to: + +- Use the assignment operator to create `int`, `float`, and `str` variables. +- Perform basic arithmetic operations (e.g. addition, subtraction) on variables. +- Convert strings to ints or floats where appropriate. +- Create a `list` and alter lists by appending, inserting, or removing values. +- Use indexing and slicing to access elements of strings, lists, and Numpy arrays. +- Use good coding practices to comment your code and choose appropriate variable names. +- Write a `for` loop that increments a variable. +- Write conditional statements using `if`, `elif`, and `else`. +- Use comparison operators (`==`, `!=`, `<`, `<=`, `>`, `>=`) in conditional statements. +- Read data from a file using `read()`, `readline()`, and `readlines()`. +- Open, read from, write to, and close input and output files. +- Use `print()` and `len()` to inspect variables. + +The following skills are useful, but not required: + +- Apply a function to an entire Numpy array or to a single array axis. +- Write a user-defined function. + +If you are signed up, or considering signing up for a workshop, and aren't sure whether you meet these reqirements, please +get in touch with the workshop instructors or host. + + diff --git a/_extras/discuss.md b/learners/discuss.md similarity index 95% rename from _extras/discuss.md rename to learners/discuss.md index cea0a4ef3..c4e82490e 100644 --- a/_extras/discuss.md +++ b/learners/discuss.md @@ -24,12 +24,11 @@ GUI](https://matplotlib.org/stable/users/interactive.html). The [`ipympl` package](https://github.com/matplotlib/ipympl) is required to enable the interactive features of Matplotlib in Jupyter notebooks and in Jupyter Lab. This package is included in the setup -instructions, and the backend can be enabled using the `%matplotlib -widget` magic. +instructions, and the backend can be enabled using the `%matplotlib widget` magic. The maintainers discussed the possibility of using [napari](https://napari.org/) as an image viewer in the lesson, acknowledging its growing popularity -and some of the advantages it holds over the `matplotlib`-based +and some of the advantages it holds over the `matplotlib`\-based approach, especially for working with image data in more than two dimensions. However, at the time of discussion, napari was still in an alpha state of development, and could not be relied on for easy and @@ -39,3 +38,5 @@ well-suited to use in an official Data Carpentry curriculum. The lesson Maintainers and/or Curriculum Advisory Committee (when it exists) will monitor the progress of napari and other image viewers, and may opt to adopt a new platform in future. + + diff --git a/reference.md b/learners/reference.md similarity index 98% rename from reference.md rename to learners/reference.md index dd18c2466..a41011cf5 100644 --- a/reference.md +++ b/learners/reference.md @@ -1,150 +1,120 @@ --- -layout: reference +title: 'FIXME' --- ## Glossary +## Glossary + (Some definitions are taken from [Glosario](https://glosario.carpentries.org). Follow the links from terms to see definitions in languages other than English.) -{:auto_ids} +{:auto\_ids} adaptive thresholding : thresholding that uses a cut-off value that varies for pixels in different regions of the image. - additive colour model : a colour model that predicts the appearance of colours by summing the numeric representations of the component colours. - bacterial colony : a visible cluster of bacteria growing on the surface of or within a solid medium, presumably cultured from a single cell. - binary image : an image of pixels with only two possible values, 0 and 1. Typically, the two colours used for a binary image are black and white. - [bit](https://glosario.carpentries.org/en/#bit) : a unit of information representing alternatives, yes/no, true/false. In computing a state of either 0 or 1. - blur : the averaging of pixel intensities within a neighbourhood. This has the effect of "softening" the features of the image, reducing noise and finer detail. - BMP (bitmap image file) : a raster graphics image file format used to store bitmap digital images, independently of the display device. - bounding box : the smallest enclosing box for a set of points. - [byte](https://glosario.carpentries.org/en/#byte) : a unit of digital information that typically consists of eight binary digits, or bits. - colorimetrics : the processing and analysis of objects based on their colour. - compression : a class of data encoding methods that aims to reduce the size of a file while retaining some or all of the information it contains. - channel : a set of pixel intensities within an image that were measured in the same way e.g. at a given wavelength. - crop : the removal of unwanted outer areas from an image. - colour histogram : a representation of the number of pixels that have colours in each of a fixed list of colour ranges. - edge detection : a variety of methods that attempt to automatically identify the boundaries of objects within an image. - fixed-level thresholding : thresholding that uses a single, constant cut-off value for every pixel in the image. - grayscale : an image in which the value of each pixel is a single value representing only the amount of light (or intensity) of that pixel. - [histogram](https://glosario.carpentries.org/en/#histogram) : a graphical representation of the distribution of a set of numeric data, usually a vertical bar graph. - image segmentation : the process of dividing an image into multiple sections, to be processed or analysed independently. - intensity : the value measured at a given pixel in the image. - JPEG : a commonly used method of lossy compression for digital images, particularly for those images produced by digital photography - kernel : a matrix, usually relatively small, defining a neighbourhood of pixel intensities that will be considered during blurring, edge detection, and other operations. - left-hand coordinate system : a system of coordinates where the origin is at the top-left extreme of the image, and coordinates increase as you move down the y axis. - lossy compression : a class of data compression methods that uses inexact approximations and partial data discarding to represent the content. - lossless compression : a class of data compression methods that allows the original data to be perfectly reconstructed from the compressed data. - maize : a common crop plant grown in many regions of the world. Also known as corn. - mask : a binary matrix, usually of the same dimensions as the target image, representing which pixels should be included and excluded in further processing and analysis. - morphometrics : the processing and analysis of objects based on their size and shape. - noise : random variation of brightness or colour information in images. An undesirable by-product of image capture that obscures the desired information. - pixel : the individual units of intensity that make up an image. - [raster graphics](https://glosario.carpentries.org/en/#raster_image) : images stored as a matrix of pixels. - RGB colour model : an additive colour model describing colour in a image with a combination of pixel intensities in three channels: red, green, and blue. - thresholding : the process of creating a binary version of a grayscale image, based on whether pixel values fall above or below a given limit or cut-off value. - TIFF (Tagged Image File Format) : a computer file format for storing raster graphics images; also - abbreviated TIF - +abbreviated TIF titration : a common laboratory method of quantitative chemical analysis to determine the concentration of an identified analyte (a substance to be analyzed) + + diff --git a/learners/setup.md b/learners/setup.md new file mode 100644 index 000000000..334f596bc --- /dev/null +++ b/learners/setup.md @@ -0,0 +1,137 @@ +--- +title: Setup +permalink: /setup/ +--- + +Before joining the workshop or following the lesson, please complete the data and software setup described in this page. + +## Data + +The example images used in this lesson are available on [FigShare](https://figshare.com/). +To download the data, please visit [the dataset page for this workshop][figshare-data] +and click the "Download all" button. +Unzip the downloaded file, and save the contents as a folder called `data` somewhere you will easily find it again, +e.g. your Desktop or a folder you have created for using in this workshop. +(The name `data` is optional but recommended, as this is the name we will use to refer to the folder throughout the lesson.) + +## Software + +1. Download and install the latest [Anaconda + distribution](https://www.anaconda.com/distribution/) for your + operating system. Make sure to choose the Python 3 version (as + opposed to the one with Python 2). If you wish to use an existing + installation, be sure to upgrade your scikit-image to at least 0.19. + You can upgrade to the latest scikit-image using the shell command that follows. + + ::::::::::::::::::::::::::::::::::::::::: callout + + ## Updating scikit-image in an existing Anaconda distribution + + ```shell + conda upgrade -y scikit-image + ``` + + :::::::::::::::::::::::::::::::::::::::::::::::::: + +2. This lesson uses Matplotlib features to display images, and some + interactive features will be valuable. To enable the interactive + tools in JupyterLab, the `ipympl` package is required. The package + can be installed with the command + + ```shell + conda install -c conda-forge ipympl + ``` + + ::::::::::::::::::::::::::::::::::::::::: callout + + ## Enabling the `ipympl` backend in Jupyter notebooks + + The `ipympl` backend can be enabled with the `%matplotlib` Jupyter + magic. Put the following command in a cell in your notebooks + (e.g., at the top) and execute the cell before any plotting commands. + + ```python + %matplotlib widget + ``` + + :::::::::::::::::::::::::::::::::::::::::::::::::: + + ::::::::::::::::::::::::::::::::::::::::: callout + + ## Older JupyterLab versions + + If you are using an older version of JupyterLab, you may also need + to install the labextensions manually, as explained in the [README + file](https://github.com/matplotlib/ipympl#readme) for the `ipympl` + package. + + + :::::::::::::::::::::::::::::::::::::::::::::::::: + +3. Open a Jupyter notebook: + + :::::::::::::: solution + + ## Instructions for Linux \& Mac + + Open a terminal and type `jupyter lab`. + + + ::::::::::::::::::::::::: + + :::::::::::::: solution + + ## Instructions for Windows + + Launch the Anaconda Prompt program and type `jupyter lab`. + (Running this command on the standard Command Prompt will return an error: + `'conda' is not recognized as an internal or external command, operable program or batch file.`) + + + ::::::::::::::::::::::::: + + After Jupyter Lab has launched, click the "Python 3" button under "Notebook" in the launcher window, + or use the "File" menu, to open a new Python 3 notebook. + +4. To test your environment, run the following lines in a cell of the notebook: + + ```python + import imageio.v3 as iio + from skimage import transform + import matplotlib.pyplot as plt + %matplotlib widget + + # load an image + image = iio.imread(uri='data/colonies-01.tif') + + # rotate it by 45 degrees + rotated = transform.rotate(image=image, angle=45) + + # display the original image and its rotated version side by side + fig, ax = plt.subplots(1, 2) + ax[0].imshow(image) + ax[1].imshow(rotated) + ``` + + Upon execution of the cell, a figure with two images should be displayed in an interactive widget. When hovering over the images with the mouse pointer, the pixel coordinates and colour values are displayed below the image. + + :::::::::::::: solution + + ## Running Cells in a Notebook + + ![](fig/jupyter_overview.png){alt='Overview of the Jupyter Notebook graphical user interface'} + To run Python code in a Jupyter notebook cell, click on a cell in the notebook + (or add a new one by clicking the `+` button in the toolbar), + make sure that the cell type is set to "Code" (check the dropdown in the toolbar), + and add the Python code in that cell. + After you have added the code, + you can run the cell by selecting "Run" -> "Run selected cell" in the top menu, + or pressing Shift\+Enter. + + + ::::::::::::::::::::::::: + +[figshare-data]: https://figshare.com/articles/dataset/Data_Carpentry_Image_Processing_Data_beta_/19260677 + + + diff --git a/profiles/learner-profiles.md b/profiles/learner-profiles.md new file mode 100644 index 000000000..434e335aa --- /dev/null +++ b/profiles/learner-profiles.md @@ -0,0 +1,5 @@ +--- +title: FIXME +--- + +This is a placeholder file. Please add content here. diff --git a/setup.md b/setup.md deleted file mode 100644 index 2ae67e289..000000000 --- a/setup.md +++ /dev/null @@ -1,118 +0,0 @@ ---- -layout: page -title: Setup -permalink: /setup/ ---- - -Before joining the workshop or following the lesson, please complete the data and software setup described in this page. - - -## Data - -The example images used in this lesson are available on [FigShare](https://figshare.com/). -To download the data, please visit [the dataset page for this workshop][figshare-data] -and click the "Download all" button. -Unzip the downloaded file, and save the contents as a folder called `data` somewhere you will easily find it again, -e.g. your Desktop or a folder you have created for using in this workshop. -(The name `data` is optional but recommended, as this is the name we will use to refer to the folder throughout the lesson.) - -[figshare-data]: https://figshare.com/articles/dataset/Data_Carpentry_Image_Processing_Data_beta_/19260677 - - -## Software - -1. Download and install the latest [Anaconda - distribution](https://www.anaconda.com/distribution/) for your - operating system. Make sure to choose the Python 3 version (as - opposed to the one with Python 2). If you wish to use an existing - installation, be sure to upgrade your scikit-image to at least 0.19. - You can upgrade to the latest scikit-image using the shell command that follows. - - > ## Updating scikit-image in an existing Anaconda distribution - > - > ~~~ - > conda upgrade -y scikit-image - > ~~~ - > {: .language-shell} - {: .callout} - -2. This lesson uses Matplotlib features to display images, and some - interactive features will be valuable. To enable the interactive - tools in JupyterLab, the `ipympl` package is required. The package - can be installed with the command - - ~~~ - conda install -c conda-forge ipympl - ~~~ - {: .language-shell} - - > ## Enabling the `ipympl` backend in Jupyter notebooks - > - > The `ipympl` backend can be enabled with the `%matplotlib` Jupyter - > magic. Put the following command in a cell in your notebooks - > (e.g., at the top) and execute the cell before any plotting commands. - > - > ~~~ - > %matplotlib widget - > ~~~ - > {: .language-python} - {: .callout} - - > ## Older JupyterLab versions - > - > If you are using an older version of JupyterLab, you may also need - > to install the labextensions manually, as explained in the [README - > file](https://github.com/matplotlib/ipympl#readme) for the `ipympl` - > package. - {: .callout} - -3. Open a Jupyter notebook: - - > ## Instructions for Linux & Mac - > - > Open a terminal and type `jupyter lab`. - {: .solution } - - > ## Instructions for Windows - > - > Launch the Anaconda Prompt program and type `jupyter lab`. - > (Running this command on the standard Command Prompt will return an error: - > `'conda' is not recognized as an internal or external command, operable program or batch file.`) - {: .solution } - - After Jupyter Lab has launched, click the "Python 3" button under "Notebook" in the launcher window, - or use the "File" menu, to open a new Python 3 notebook. - -4. To test your environment, run the following lines in a cell of the notebook: - ~~~ - import imageio.v3 as iio - from skimage import transform - import matplotlib.pyplot as plt - %matplotlib widget - - # load an image - image = iio.imread(uri='data/colonies-01.tif') - - # rotate it by 45 degrees - rotated = transform.rotate(image=image, angle=45) - - # display the original image and its rotated version side by side - fig, ax = plt.subplots(1, 2) - ax[0].imshow(image) - ax[1].imshow(rotated) - ~~~ - {: .language-python} - Upon execution of the cell, a figure with two images should be displayed in an interactive widget. When hovering over the images with the mouse pointer, the pixel coordinates and colour values are displayed below the image. - - > ## Running Cells in a Notebook - > - > - > ![Overview of the Jupyter Notebook graphical user interface](../fig/jupyter_overview.png) - > To run Python code in a Jupyter notebook cell, click on a cell in the notebook - > (or add a new one by clicking the `+` button in the toolbar), - > make sure that the cell type is set to "Code" (check the dropdown in the toolbar), - > and add the Python code in that cell. - > After you have added the code, - > you can run the cell by selecting "Run" -> "Run selected cell" in the top menu, - > or pressing Shift+Enter. - {: .solution } diff --git a/site/README.md b/site/README.md new file mode 100644 index 000000000..42997e3d0 --- /dev/null +++ b/site/README.md @@ -0,0 +1,2 @@ +This directory contains rendered lesson materials. Please do not edit files +here.