From ff22f47b90fe84ed31b44adbaddb9878f35e1fdb Mon Sep 17 00:00:00 2001
From: 0815Cracky <0815Cracky@gmail.com>
Date: Tue, 27 Feb 2024 11:46:37 +0100
Subject: [PATCH] update
---
.github/ISSUE_TEMPLATE/bug_report.yml | 79 ++
.github/ISSUE_TEMPLATE/config.yml | 1 +
.github/ISSUE_TEMPLATE/feature_request.yml | 31 +
.github/PULL_REQUEST_TEMPLATE.md | 26 +
.github/dependabot.yml | 6 +
.github/stale.yml | 24 +
.github/workflows/deploy-docker.yml | 61 ++
.../workflows/github-clone-count-badge.yml | 90 ++
.../workflows/github-traffic-count-badge.yml | 83 ++
.gitignore | 157 ++++
.pre-commit-config.yaml | 26 +
.vscode/launch.json | 16 +
CLONE.md | 13 +
CODE_OF_CONDUCT.md | 76 ++
CONTRIBUTING.md | 110 +++
DELETE_PYCACHE.bat | 5 +
Dockerfile | 43 +
README.md | 743 +++++++++++++++
TRAFFIC.md | 11 +
.../TwitchChannelPointsMiner.py | 496 ++++++++++
TwitchChannelPointsMiner/__init__.py | 7 +
.../classes/AnalyticsServer.py | 295 ++++++
TwitchChannelPointsMiner/classes/Chat.py | 105 +++
TwitchChannelPointsMiner/classes/Discord.py | 24 +
.../classes/Exceptions.py | 14 +
TwitchChannelPointsMiner/classes/Matrix.py | 40 +
TwitchChannelPointsMiner/classes/Pushover.py | 30 +
TwitchChannelPointsMiner/classes/Settings.py | 53 ++
TwitchChannelPointsMiner/classes/Telegram.py | 29 +
TwitchChannelPointsMiner/classes/Twitch.py | 859 ++++++++++++++++++
.../classes/TwitchLogin.py | 360 ++++++++
.../classes/TwitchWebSocket.py | 65 ++
.../classes/WebSocketsPool.py | 434 +++++++++
TwitchChannelPointsMiner/classes/Webhook.py | 26 +
TwitchChannelPointsMiner/classes/__init__.py | 0
.../classes/entities/Bet.py | 315 +++++++
.../classes/entities/Campaign.py | 74 ++
.../classes/entities/Drop.py | 103 +++
.../classes/entities/EventPrediction.py | 94 ++
.../classes/entities/Message.py | 69 ++
.../classes/entities/PubsubTopic.py | 16 +
.../classes/entities/Raid.py | 12 +
.../classes/entities/Stream.py | 107 +++
.../classes/entities/Streamer.py | 284 ++++++
.../classes/entities/__init__.py | 0
TwitchChannelPointsMiner/constants.py | 199 ++++
TwitchChannelPointsMiner/logger.py | 342 +++++++
TwitchChannelPointsMiner/utils.py | 212 +++++
assets/banner.png | Bin 0 -> 105907 bytes
assets/chart-analytics-dark.png | Bin 0 -> 130178 bytes
assets/chart-analytics-light.png | Bin 0 -> 125507 bytes
assets/charts.html | 209 +++++
assets/dark-theme.css | 39 +
assets/prediction.png | Bin 0 -> 30993 bytes
assets/script.js | 389 ++++++++
assets/style.css | 70 ++
example.py | 128 +++
pickle_view.py | 13 +
requirements.txt | 12 +
setup.py | 58 ++
60 files changed, 7183 insertions(+)
create mode 100644 .github/ISSUE_TEMPLATE/bug_report.yml
create mode 100644 .github/ISSUE_TEMPLATE/config.yml
create mode 100644 .github/ISSUE_TEMPLATE/feature_request.yml
create mode 100644 .github/PULL_REQUEST_TEMPLATE.md
create mode 100644 .github/dependabot.yml
create mode 100644 .github/stale.yml
create mode 100644 .github/workflows/deploy-docker.yml
create mode 100644 .github/workflows/github-clone-count-badge.yml
create mode 100644 .github/workflows/github-traffic-count-badge.yml
create mode 100644 .gitignore
create mode 100644 .pre-commit-config.yaml
create mode 100644 .vscode/launch.json
create mode 100644 CLONE.md
create mode 100644 CODE_OF_CONDUCT.md
create mode 100644 CONTRIBUTING.md
create mode 100644 DELETE_PYCACHE.bat
create mode 100644 Dockerfile
create mode 100644 README.md
create mode 100644 TRAFFIC.md
create mode 100644 TwitchChannelPointsMiner/TwitchChannelPointsMiner.py
create mode 100644 TwitchChannelPointsMiner/__init__.py
create mode 100644 TwitchChannelPointsMiner/classes/AnalyticsServer.py
create mode 100644 TwitchChannelPointsMiner/classes/Chat.py
create mode 100644 TwitchChannelPointsMiner/classes/Discord.py
create mode 100644 TwitchChannelPointsMiner/classes/Exceptions.py
create mode 100644 TwitchChannelPointsMiner/classes/Matrix.py
create mode 100644 TwitchChannelPointsMiner/classes/Pushover.py
create mode 100644 TwitchChannelPointsMiner/classes/Settings.py
create mode 100644 TwitchChannelPointsMiner/classes/Telegram.py
create mode 100644 TwitchChannelPointsMiner/classes/Twitch.py
create mode 100644 TwitchChannelPointsMiner/classes/TwitchLogin.py
create mode 100644 TwitchChannelPointsMiner/classes/TwitchWebSocket.py
create mode 100644 TwitchChannelPointsMiner/classes/WebSocketsPool.py
create mode 100644 TwitchChannelPointsMiner/classes/Webhook.py
create mode 100644 TwitchChannelPointsMiner/classes/__init__.py
create mode 100644 TwitchChannelPointsMiner/classes/entities/Bet.py
create mode 100644 TwitchChannelPointsMiner/classes/entities/Campaign.py
create mode 100644 TwitchChannelPointsMiner/classes/entities/Drop.py
create mode 100644 TwitchChannelPointsMiner/classes/entities/EventPrediction.py
create mode 100644 TwitchChannelPointsMiner/classes/entities/Message.py
create mode 100644 TwitchChannelPointsMiner/classes/entities/PubsubTopic.py
create mode 100644 TwitchChannelPointsMiner/classes/entities/Raid.py
create mode 100644 TwitchChannelPointsMiner/classes/entities/Stream.py
create mode 100644 TwitchChannelPointsMiner/classes/entities/Streamer.py
create mode 100644 TwitchChannelPointsMiner/classes/entities/__init__.py
create mode 100644 TwitchChannelPointsMiner/constants.py
create mode 100644 TwitchChannelPointsMiner/logger.py
create mode 100644 TwitchChannelPointsMiner/utils.py
create mode 100644 assets/banner.png
create mode 100644 assets/chart-analytics-dark.png
create mode 100644 assets/chart-analytics-light.png
create mode 100644 assets/charts.html
create mode 100644 assets/dark-theme.css
create mode 100644 assets/prediction.png
create mode 100644 assets/script.js
create mode 100644 assets/style.css
create mode 100644 example.py
create mode 100644 pickle_view.py
create mode 100644 requirements.txt
create mode 100644 setup.py
diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml
new file mode 100644
index 0000000..72244af
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/bug_report.yml
@@ -0,0 +1,79 @@
+name: Bug report
+description: Create a report to help us improve
+labels: bug
+
+body:
+ - type: textarea
+ id: description
+ attributes:
+ label: Describe the bug
+ description: |
+ A clear and concise description of what the bug is.
+ validations:
+ required: true
+ - type: textarea
+ id: steps-to-reproduce
+ attributes:
+ label: Steps to reproduce
+ placeholder: |
+ 1. Go to '...'
+ 2. Click on '....'
+ 3. Scroll down to '....'
+ 4. See error
+ validations:
+ required: true
+ - type: textarea
+ id: expected-behavior
+ attributes:
+ label: Expected behavior
+ description: What do you expect to happen?
+ validations:
+ required: true
+ - type: input
+ id: operating-system
+ attributes:
+ label: Operating system
+ placeholder: Windows 11 Version 21H2 (OS Build 22000.1574)
+ validations:
+ required: true
+ - type: input
+ id: python-version
+ attributes:
+ label: Python version
+ placeholder: "3.11.1"
+ validations:
+ required: true
+ - type: input
+ id: miner-version
+ attributes:
+ label: Miner version
+ placeholder: "1.7.7"
+ validations:
+ required: true
+ - type: textarea
+ id: other-environment-info
+ attributes:
+ label: Other relevant software versions
+ - type: textarea
+ id: logs
+ attributes:
+ label: Logs
+ description: |
+ How to provide a DEBUG log:
+ 1. Set this in your runner script (`run.py`):
+ ```py
+ logger_settings=LoggerSettings(
+ save=True,
+ console_level=logging.INFO,
+ file_level=logging.DEBUG,
+ less=True,
+ ```
+ 2. Start the miner, wait for the error, then stop the miner and post the contents of the log file (`logs\username.log`) to https://gist.github.com/ and post a link here.
+ 3. Create another gist with your console output, just in case. Paste a link here as well.
+ validations:
+ required: true
+ - type: textarea
+ id: other-info
+ attributes:
+ label: Additional context
+ description: Add any other context about the problem here.
diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml
new file mode 100644
index 0000000..3ba13e0
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/config.yml
@@ -0,0 +1 @@
+blank_issues_enabled: false
diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml
new file mode 100644
index 0000000..d4f2a80
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/feature_request.yml
@@ -0,0 +1,31 @@
+name: Feature request
+description: Suggest an idea for this project
+labels: enhancement
+
+body:
+ - type: textarea
+ id: description
+ attributes:
+ label: Is your feature request related to a problem?
+ description: A clear and concise description of what the problem is.
+ placeholder: I'm always frustrated when [...]
+ - type: textarea
+ id: solution
+ attributes:
+ label: Proposed solution
+ description: |
+ Suggest your feature here. What benefit would it bring?
+
+ Do you have any ideas on how to implement it?
+ validations:
+ required: true
+ - type: textarea
+ id: alternatives
+ attributes:
+ label: Alternatives you've considered
+ description: Suggest any alternative solutions or features you've considered.
+ - type: textarea
+ id: other-info
+ attributes:
+ label: Additional context
+ description: Add any other context or screenshots about the feature request here.
diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
new file mode 100644
index 0000000..4aef640
--- /dev/null
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -0,0 +1,26 @@
+# Description
+
+Please include a summary of the change and which issue is fixed. Please also include relevant motivation and context. List any dependencies that are required for this change.
+
+Fixes # (issue)
+
+## Type of change
+
+Please delete options that are not relevant.
+
+- [ ] Bug fix (non-breaking change which fixes an issue)
+- [ ] New feature (non-breaking change which adds functionality)
+- [ ] Breaking change (fix or feature that would cause existing functionality not to work as expected)
+
+# How Has This Been Tested?
+
+Please describe the tests that you ran to verify your changes. Provide instructions so we can reproduce. Please also list any relevant details for your test configuration.
+
+# Checklist:
+
+- [ ] My code follows the style guidelines of this project
+- [ ] I have performed a self-review of my code
+- [ ] I have commented on my code, particularly in hard-to-understand areas
+- [ ] I have made corresponding changes to the documentation (README.md)
+- [ ] My changes generate no new warnings
+- [ ] Any dependent changes have been updated in requirements.txt
diff --git a/.github/dependabot.yml b/.github/dependabot.yml
new file mode 100644
index 0000000..1230149
--- /dev/null
+++ b/.github/dependabot.yml
@@ -0,0 +1,6 @@
+version: 2
+updates:
+ - package-ecosystem: "github-actions"
+ directory: "/"
+ schedule:
+ interval: "daily"
diff --git a/.github/stale.yml b/.github/stale.yml
new file mode 100644
index 0000000..073f80c
--- /dev/null
+++ b/.github/stale.yml
@@ -0,0 +1,24 @@
+# Number of days of inactivity before an issue becomes stale
+daysUntilStale: 150
+
+# Number of days of inactivity before a stale issue is closed
+daysUntilClose: 7
+
+# Issues with these labels will never be considered stale
+exemptLabels:
+ - pinned
+ - security
+ - bug
+ - enhancement
+
+# Label to use when marking an issue as stale
+staleLabel: wontfix
+
+# Comment to post when marking an issue as stale. Set to `false` to disable
+markComment: >
+ This issue has been automatically marked as stale because it has not had
+ recent activity. It will be closed if no further activity occurs. Thank you
+ for your contributions.
+
+# Comment to post when closing a stale issue. Set to `false` to disable
+closeComment: false
diff --git a/.github/workflows/deploy-docker.yml b/.github/workflows/deploy-docker.yml
new file mode 100644
index 0000000..751b40d
--- /dev/null
+++ b/.github/workflows/deploy-docker.yml
@@ -0,0 +1,61 @@
+name: deploy-docker
+
+on:
+ push:
+ # branches: [master]
+ tags:
+ - '*'
+ workflow_dispatch:
+
+jobs:
+ deploy-docker:
+ name: Deploy Docker Hub
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout source
+ uses: actions/checkout@v4
+
+ - name: Set up QEMU
+ uses: docker/setup-qemu-action@v3.0.0
+
+ - name: Set up Docker Buildx
+ uses: docker/setup-buildx-action@v3.0.0
+
+ - name: Login to DockerHub
+ uses: docker/login-action@v3.0.0
+ with:
+ username: ${{ secrets.DOCKER_USERNAME }}
+ password: ${{ secrets.DOCKER_TOKEN }}
+
+ - name: Docker meta
+ id: meta
+ uses: docker/metadata-action@v5
+ with:
+ images: rdavidoff/twitch-channel-points-miner-v2
+ tags: |
+ type=semver,pattern={{version}},enable=${{ startsWith(github.ref, 'refs/tags') }}
+ type=raw,value=latest
+
+ - name: Build and push AMD64, ARM64, ARMv7
+ id: docker_build
+ uses: docker/build-push-action@v5.1.0
+ with:
+ context: .
+ push: true
+ tags: ${{ steps.meta.outputs.tags }}
+ labels: ${{ steps.meta.outputs.labels }}
+ platforms: linux/amd64,linux/arm64,linux/arm/v7
+ build-args: BUILDX_QEMU_ENV=true
+ cache-from: type=gha
+ cache-to: type=gha,mode=max
+
+ # File size exceeds the maximum allowed 25000 bytes
+ # - name: Docker Hub Description
+ # uses: peter-evans/dockerhub-description@v2
+ # with:
+ # username: ${{ secrets.DOCKER_USERNAME }}
+ # password: ${{ secrets.DOCKER_TOKEN }}
+ # repository: rdavidoff/twitch-channel-points-miner-v2
+
+ - name: Image digest AMD64, ARM64, ARMv7
+ run: echo ${{ steps.docker_build.outputs.digest }}
diff --git a/.github/workflows/github-clone-count-badge.yml b/.github/workflows/github-clone-count-badge.yml
new file mode 100644
index 0000000..00a7e88
--- /dev/null
+++ b/.github/workflows/github-clone-count-badge.yml
@@ -0,0 +1,90 @@
+name: GitHub Clone Count Update Everyday
+
+on:
+ schedule:
+ - cron: "0 */24 * * *"
+ workflow_dispatch:
+
+jobs:
+ build:
+ runs-on: ubuntu-latest
+
+ steps:
+ - uses: actions/checkout@v4
+
+ - name: gh login
+ run: echo "${{ secrets.SECRET_TOKEN }}" | gh auth login --with-token
+
+ - name: parse latest clone count
+ run: |
+ curl --user "${{ github.actor }}:${{ secrets.SECRET_TOKEN }}" \
+ -H "Accept: application/vnd.github.v3+json" \
+ https://api.github.com/repos/${{ github.repository }}/traffic/clones \
+ > clone.json
+
+ - name: create gist and download previous count
+ id: set_id
+ run: |
+ if gh secret list | grep -q "GIST_ID"
+ then
+ echo "GIST_ID found"
+ echo ::set-output name=GIST::${{ secrets.GIST_ID }}
+ curl https://gist.githubusercontent.com/${{ github.actor }}/${{ secrets.GIST_ID }}/raw/clone.json > clone_before.json
+ if cat clone_before.json | grep '404: Not Found'; then
+ echo "GIST_ID not valid anymore. Creating another gist..."
+ gist_id=$(gh gist create clone.json | awk -F / '{print $NF}')
+ echo $gist_id | gh secret set GIST_ID
+ echo ::set-output name=GIST::$gist_id
+ cp clone.json clone_before.json
+ git rm --ignore-unmatch CLONE.md
+ fi
+ else
+ echo "GIST_ID not found. Creating a gist..."
+ gist_id=$(gh gist create clone.json | awk -F / '{print $NF}')
+ echo $gist_id | gh secret set GIST_ID
+ echo ::set-output name=GIST::$gist_id
+ cp clone.json clone_before.json
+ fi
+
+ - name: update clone.json
+ run: |
+ curl https://raw.githubusercontent.com/MShawon/github-clone-count-badge/master/main.py > main.py
+ python3 main.py
+
+ - name: Update gist with latest count
+ run: |
+ content=$(sed -e 's/\\/\\\\/g' -e 's/\t/\\t/g' -e 's/\"/\\"/g' -e 's/\r//g' "clone.json" | sed -E ':a;N;$!ba;s/\r{0,1}\n/\\n/g')
+ echo '{"description": "${{ github.repository }} clone statistics", "files": {"clone.json": {"content": "'"$content"'"}}}' > post_clone.json
+ curl -s -X PATCH \
+ --user "${{ github.actor }}:${{ secrets.SECRET_TOKEN }}" \
+ -H "Content-Type: application/json" \
+ -d @post_clone.json https://api.github.com/gists/${{ steps.set_id.outputs.GIST }} > /dev/null 2>&1
+
+ if [ ! -f CLONE.md ]; then
+ shields="https://img.shields.io/badge/dynamic/json?color=success&label=Clone&query=count&url="
+ url="https://gist.githubusercontent.com/${{ github.actor }}/${{ steps.set_id.outputs.GIST }}/raw/clone.json"
+ repo="https://github.com/MShawon/github-clone-count-badge"
+ echo ''> CLONE.md
+ echo '
+ **Markdown**
+
+ ```markdown' >> CLONE.md
+ echo "[]($repo)" >> CLONE.md
+ echo '
+ ```
+
+ **HTML**
+ ```html' >> CLONE.md
+ echo "
" >> CLONE.md
+ echo '```' >> CLONE.md
+
+ git add CLONE.md
+ git config --global user.name "GitHub Action"
+ git config --global user.email "action@github.com"
+ git commit -m "create clone count badge"
+ fi
+
+ - name: Push
+ uses: ad-m/github-push-action@master
+ with:
+ github_token: ${{ secrets.GITHUB_TOKEN }}
diff --git a/.github/workflows/github-traffic-count-badge.yml b/.github/workflows/github-traffic-count-badge.yml
new file mode 100644
index 0000000..07b54cd
--- /dev/null
+++ b/.github/workflows/github-traffic-count-badge.yml
@@ -0,0 +1,83 @@
+name: GitHub Traffic Count Update Everyday
+
+on:
+ schedule:
+ - cron: "0 */24 * * *"
+ workflow_dispatch:
+
+jobs:
+ build:
+ runs-on: ubuntu-latest
+
+ steps:
+ - uses: actions/checkout@v4
+
+ - name: gh login
+ run: echo "${{ secrets.SECRET_TOKEN }}" | gh auth login --with-token
+
+ - name: parse latest traffic count
+ run: |
+ curl --user "${{ github.actor }}:${{ secrets.SECRET_TOKEN }}" \
+ -H "Accept: application/vnd.github.v3+json" \
+ https://api.github.com/repos/${{ github.repository }}/traffic/views \
+ > traffic.json
+ - name: create gist and download previous count
+ id: set_id
+ run: |
+ if gh secret list | grep -q "TRAFFIC_ID"
+ then
+ echo "TRAFFIC_ID found"
+ echo ::set-output name=GIST::${{ secrets.TRAFFIC_ID }}
+ curl https://gist.githubusercontent.com/${{ github.actor }}/${{ secrets.TRAFFIC_ID }}/raw/traffic.json > traffic_before.json
+ if cat traffic_before.json | grep '404: Not Found'; then
+ echo "TRAFFIC_ID not valid anymore. Creating another gist..."
+ traffic_id=$(gh gist create traffic.json | awk -F / '{print $NF}')
+ echo $traffic_id | gh secret set TRAFFIC_ID
+ echo ::set-output name=GIST::$traffic_id
+ cp traffic.json traffic_before.json
+ git rm --ignore-unmatch TRAFFIC.md
+ fi
+ else
+ echo "TRAFFIC_ID not found. Creating a gist..."
+ traffic_id=$(gh gist create traffic.json | awk -F / '{print $NF}')
+ echo $traffic_id | gh secret set TRAFFIC_ID
+ echo ::set-output name=GIST::$traffic_id
+ cp traffic.json traffic_before.json
+ fi
+ - name: update traffic.json
+ run: |
+ curl https://gist.githubusercontent.com/MShawon/d37c49ee4ce03f64b92ab58b0cec289f/raw/traffic.py > traffic.py
+ python3 traffic.py
+ - name: Update gist with latest count
+ run: |
+ content=$(sed -e 's/\\/\\\\/g' -e 's/\t/\\t/g' -e 's/\"/\\"/g' -e 's/\r//g' "traffic.json" | sed -E ':a;N;$!ba;s/\r{0,1}\n/\\n/g')
+ echo '{"description": "${{ github.repository }} traffic statistics", "files": {"traffic.json": {"content": "'"$content"'"}}}' > post_traffic.json
+ curl -s -X PATCH \
+ --user "${{ github.actor }}:${{ secrets.SECRET_TOKEN }}" \
+ -H "Content-Type: application/json" \
+ -d @post_traffic.json https://api.github.com/gists/${{ steps.set_id.outputs.GIST }} > /dev/null 2>&1
+ if [ ! -f TRAFFIC.md ]; then
+ shields="https://img.shields.io/badge/dynamic/json?color=success&label=Views&query=count&url="
+ url="https://gist.githubusercontent.com/${{ github.actor }}/${{ steps.set_id.outputs.GIST }}/raw/traffic.json"
+ repo="https://github.com/MShawon/github-clone-count-badge"
+ echo ''> TRAFFIC.md
+ echo '
+ **Markdown**
+ ```markdown' >> TRAFFIC.md
+ echo "[]($repo)" >> TRAFFIC.md
+ echo '
+ ```
+ **HTML**
+ ```html' >> TRAFFIC.md
+ echo "
" >> TRAFFIC.md
+ echo '```' >> TRAFFIC.md
+
+ git add TRAFFIC.md
+ git config --global user.name "GitHub Action"
+ git config --global user.email "action@github.com"
+ git commit -m "create traffic count badge"
+ fi
+ - name: Push
+ uses: ad-m/github-push-action@master
+ with:
+ github_token: ${{ secrets.GITHUB_TOKEN }}
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..5ed07d3
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,157 @@
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+*$py.class
+
+# C extensions
+*.so
+
+# Distribution / packaging
+.Python
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+wheels/
+share/python-wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+MANIFEST
+
+# PyInstaller
+# Usually these files are written by a python script from a template
+# before PyInstaller builds the exe, so as to inject date/other infos into it.
+*.manifest
+*.spec
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.nox/
+.coverage
+.coverage.*
+.cache
+nosetests.xml
+coverage.xml
+*.cover
+*.py,cover
+.hypothesis/
+.pytest_cache/
+cover/
+
+# Translations
+*.mo
+*.pot
+
+# Django stuff:
+*.log
+local_settings.py
+db.sqlite3
+db.sqlite3-journal
+
+# Flask stuff:
+instance/
+.webassets-cache
+
+# Scrapy stuff:
+.scrapy
+
+# Sphinx documentation
+docs/_build/
+
+# PyBuilder
+.pybuilder/
+target/
+
+# Jupyter Notebook
+.ipynb_checkpoints
+
+# IPython
+profile_default/
+ipython_config.py
+
+# pyenv
+# For a library or package, you might want to ignore these files since the code is
+# intended to run in multiple environments; otherwise, check them in:
+# .python-version
+
+# pipenv
+# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
+# However, in case of collaboration, if having platform-specific dependencies or dependencies
+# having no cross-platform support, pipenv may install dependencies that don't work, or not
+# install all needed dependencies.
+#Pipfile.lock
+
+# PEP 582; used by e.g. github.com/David-OConnor/pyflow
+__pypackages__/
+
+# Celery stuff
+celerybeat-schedule
+celerybeat.pid
+
+# SageMath parsed files
+*.sage.py
+
+# Environments
+.env
+.venv
+env/
+venv/
+ENV/
+env.bak/
+venv.bak/
+
+# Spyder project settings
+.spyderproject
+.spyproject
+
+# Rope project settings
+.ropeproject
+
+# mkdocs documentation
+/site
+
+# mypy
+.mypy_cache/
+.dmypy.json
+dmypy.json
+
+# Pyre type checker
+.pyre/
+
+# pytype static type analyzer
+.pytype/
+
+# Cython debug symbols
+cython_debug/
+
+# PyCharm
+.idea/
+
+# Custom files
+run.py
+chromedriver*
+
+# Folders
+cookies/*
+logs/*
+screenshots/*
+htmls/*
+analytics/*
+
+# Replit
+keep_replit_alive.py
+.replit
+replit.nix
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
new file mode 100644
index 0000000..5a07a6c
--- /dev/null
+++ b/.pre-commit-config.yaml
@@ -0,0 +1,26 @@
+repos:
+- repo: https://github.com/pre-commit/pre-commit-hooks
+ rev: v4.1.0
+ hooks:
+ - id: trailing-whitespace
+ - id: end-of-file-fixer
+ - id: check-added-large-files
+- repo: https://github.com/pycqa/isort
+ rev: 5.12.0
+ hooks:
+ - id: isort
+ files: ^TwitchChannelPointsMiner/
+ args: ["--profile", "black"]
+- repo: https://github.com/psf/black
+ rev: 22.3.0
+ hooks:
+ - id: black
+ files: ^TwitchChannelPointsMiner/
+- repo: https://github.com/pycqa/flake8
+ rev: 3.9.2
+ hooks:
+ - id: flake8
+ files: ^TwitchChannelPointsMiner/
+ args:
+ - "--max-line-length=88"
+ - "--extend-ignore=E501"
diff --git a/.vscode/launch.json b/.vscode/launch.json
new file mode 100644
index 0000000..be31cde
--- /dev/null
+++ b/.vscode/launch.json
@@ -0,0 +1,16 @@
+{
+ // Use IntelliSense to learn about possible attributes.
+ // Hover to view descriptions of existing attributes.
+ // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
+ "version": "0.2.0",
+ "configurations": [
+ {
+ "name": "Python: run.py",
+ "type": "python",
+ "request": "launch",
+ "program": "${cwd}/run.py",
+ "console": "integratedTerminal",
+ "justMyCode": true
+ }
+ ]
+}
\ No newline at end of file
diff --git a/CLONE.md b/CLONE.md
new file mode 100644
index 0000000..48f7e98
--- /dev/null
+++ b/CLONE.md
@@ -0,0 +1,13 @@
+
+
+ **Markdown**
+
+ ```markdown
+[](https://github.com/MShawon/github-clone-count-badge)
+
+ ```
+
+ **HTML**
+ ```html
+
+```
diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000..2b46b5b
--- /dev/null
+++ b/CODE_OF_CONDUCT.md
@@ -0,0 +1,76 @@
+# Contributor Covenant Code of Conduct
+
+## Our Pledge
+
+In the interest of fostering an open and welcoming environment, we as
+contributors and maintainers pledge to making participation in our project and
+our community a harassment-free experience for everyone, regardless of age, body
+size, disability, ethnicity, sex characteristics, gender identity and expression,
+level of experience, education, socio-economic status, nationality, personal
+appearance, race, religion, or sexual identity and orientation.
+
+## Our Standards
+
+Examples of behavior that contributes to creating a positive environment
+include:
+
+* Using welcoming and inclusive language
+* Being respectful of differing viewpoints and experiences
+* Gracefully accepting constructive criticism
+* Focusing on what is best for the community
+* Showing empathy towards other community members
+
+Examples of unacceptable behavior by participants include:
+
+* The use of sexualized language or imagery and unwelcome sexual attention or
+ advances
+* Trolling, insulting/derogatory comments, and personal or political attacks
+* Public or private harassment
+* Publishing others' private information, such as a physical or electronic
+ address, without explicit permission
+* Other conduct which could reasonably be considered inappropriate in a
+ professional setting
+
+## Our Responsibilities
+
+Project maintainers are responsible for clarifying the standards of acceptable
+behavior and are expected to take appropriate and fair corrective action in
+response to any instances of unacceptable behavior.
+
+Project maintainers have the right and responsibility to remove, edit, or
+reject comments, commits, code, wiki edits, issues, and other contributions
+that are not aligned to this Code of Conduct, or to ban temporarily or
+permanently any contributor for other behaviors that they deem inappropriate,
+threatening, offensive, or harmful.
+
+## Scope
+
+This Code of Conduct applies both within project spaces and in public spaces
+when an individual is representing the project or its community. Examples of
+representing a project or community include using an official project e-mail
+address, posting via an official social media account, or acting as an appointed
+representative at an online or offline event. Representation of a project may be
+further defined and clarified by project maintainers.
+
+## Enforcement
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be
+reported by contacting the project team at alex.tkd.alex@gmail.com. All
+complaints will be reviewed and investigated and will result in a response that
+is deemed necessary and appropriate to the circumstances. The project team is
+obligated to maintain confidentiality with regard to the reporter of an incident.
+Further details of specific enforcement policies may be posted separately.
+
+Project maintainers who do not follow or enforce the Code of Conduct in good
+faith may face temporary or permanent repercussions as determined by other
+members of the project's leadership.
+
+## Attribution
+
+This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
+available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
+
+[homepage]: https://www.contributor-covenant.org
+
+For answers to common questions about this code of conduct, see
+https://www.contributor-covenant.org/faq
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
new file mode 100644
index 0000000..561e067
--- /dev/null
+++ b/CONTRIBUTING.md
@@ -0,0 +1,110 @@
+# Contributing to this repository
+
+## Getting started
+
+Before you begin:
+- Have you read the [code of conduct](CODE_OF_CONDUCT.md)?
+- Check out the [existing issues](https://github.com/Tkd-Alex/Twitch-Channel-Points-Miner-v2/issues) & see if there is already an opened issue.
+
+### Ready to make a change? Fork the repo
+
+Fork using GitHub Desktop:
+
+- [Getting started with GitHub Desktop](https://docs.github.com/en/desktop/installing-and-configuring-github-desktop/getting-started-with-github-desktop) will guide you through setting up Desktop.
+- Once Desktop is set up, you can use it to [fork the repo](https://docs.github.com/en/desktop/contributing-and-collaborating-using-github-desktop/cloning-and-forking-repositories-from-github-desktop)!
+
+Fork using the command line:
+
+- [Fork the repo](https://docs.github.com/en/github/getting-started-with-github/fork-a-repo#fork-an-example-repository) so that you can make your changes without affecting the original project until you're ready to merge them.
+
+Fork with [GitHub Codespaces](https://github.com/features/codespaces):
+
+- [Fork, edit, and preview](https://docs.github.com/en/free-pro-team@latest/github/developing-online-with-codespaces/creating-a-codespace) using [GitHub Codespaces](https://github.com/features/codespaces) without having to install and run the project locally.
+
+### Open a pull request
+When you're done making changes, and you'd like to propose them for review, use the [pull request template](#pull-request-template) to open your PR (pull request).
+
+### Submit your PR & get it reviewed
+- Once you submit your PR, other users from the community will review it with you. The first thing you're going to want to do is a [self review](#self-review).
+- After that, we may have questions. Check back on your PR to keep up with the conversation.
+- Did you have an issue, like a merge conflict? Check out our [git tutorial](https://lab.github.com/githubtraining/managing-merge-conflicts) on resolving merge conflicts and other issues.
+
+### Your PR is merged!
+Congratulations! The whole GitHub community thanks you. :sparkles:
+
+Once your PR is merged, you will be proudly listed as a contributor in the [contributor chart](https://github.com/Tkd-Alex/Twitch-Channel-Points-Miner-v2/graphs/contributors).
+
+### Keep contributing as you use GitHub Docs
+
+Now that you're a part of the GitHub Docs community, you can keep participating in many ways.
+
+**Learn more about contributing:**
+
+- [Types of contributions :memo:](#types-of-contributions-memo)
+ - [:beetle: Issues](#beetle-issues)
+ - [:hammer_and_wrench: Pull requests](#hammer_and_wrench-pull-requests)
+- [Starting with an issue](#starting-with-an-issue)
+ - [Labels](#labels)
+- [Opening a pull request](#opening-a-pull-request)
+- [Reviewing](#reviewing)
+ - [Self review](#self-review)
+ - [Pull request template](#pull-request-template)
+ - [Python Styleguide](#python-styleguide)
+ - [Suggested changes](#suggested-changes)
+
+## Types of contributions :memo:
+You can contribute to the Twitch-Channel-Points-Miner-v2 in several ways. Bug reporting, pull request, propose new features, fork, donate, and much more :muscle: .
+
+### :beetle: Issues
+[Issues](https://docs.github.com/en/github/managing-your-work-on-github/about-issues) are used to report a bug, propose new features, or ask for help. When you open an issue, please use the appropriate template and label.
+
+### :hammer_and_wrench: Pull requests
+A [pull request](https://docs.github.com/en/github/collaborating-with-issues-and-pull-requests/about-pull-requests) is a way to suggest changes in our repository.
+
+When we merge those changes, they should be deployed to the live site within 24 hours. :earth_africa: To learn more about opening a pull request in this repo, see [Opening a pull request](#opening-a-pull-request) below.
+
+## Starting with an issue
+You can browse existing issues to find something that needs help!
+
+### Labels
+Labels can help you find an issue you'd like to help with.
+- The `bug` label is used when something isn't working
+- The `documentation` label is used when you suggest improvements or additions to documentation (README.md update)
+- The `duplicate` label is used when this issue or pull request already exists
+- The `enhancement` label is used when you ask for / or propose a new feature or request
+- The `help wanted` is used when you need help with something
+- The `improvements` label is used when you would suggest improvements on already existing features
+- The `invalid` label is used for a non-valid issue
+- The `question` label is used for further information is requested
+- The `wontfix` label is used if we will not work on it
+
+## Opening a pull request
+You can use the GitHub user interface :pencil2: for minor changes, like fixing a typo or updating a readme. You can also fork the repo and then clone it locally to view changes and run your tests on your machine.
+
+### Self review
+You should always review your own PR first.
+
+For content changes, make sure that you:
+- [ ] Confirm that the changes address every part of the content design plan from your issue (if there are differences, explain them).
+- [ ] Review the content for technical accuracy.
+- [ ] Review the entire pull request using the checklist present in the template.
+- [ ] Copy-edit the changes for grammar, spelling, and adherence to the style guide.
+- [ ] Check new or updated Liquid statements to confirm that versioning is correct.
+- [ ] Check that all of your changes render correctly in staging. Remember, that lists and tables can be tricky.
+- [ ] If there are any failing checks in your PR, troubleshoot them until they're all passing.
+
+### Pull request template
+When you open a pull request, you must fill out the "Ready for review" template before we can review your PR. This template helps reviewers understand your changes and the purpose of your pull request.
+
+### Python Styleguide
+All Python code is formatted with [Black](https://github.com/psf/black) using the default settings. Your code will not be accepted if it is not blackened.
+You can use the pre-commit hook.
+```
+pip install pre-commit
+pre-commit install
+```
+
+### Suggested changes
+We may ask for changes to be made before a PR can be merged, either using [suggested changes](https://docs.github.com/en/github/collaborating-with-issues-and-pull-requests/incorporating-feedback-in-your-pull-request) or pull request comments. You can apply suggested changes directly through the UI. You can make any other changes in your fork, then commit them to your branch.
+
+As you update your PR and apply changes, mark each conversation as [resolved](https://docs.github.com/en/github/collaborating-with-issues-and-pull-requests/commenting-on-a-pull-request#resolving-conversations).
diff --git a/DELETE_PYCACHE.bat b/DELETE_PYCACHE.bat
new file mode 100644
index 0000000..37901f4
--- /dev/null
+++ b/DELETE_PYCACHE.bat
@@ -0,0 +1,5 @@
+@echo off
+rmdir /s /q __pycache__
+rmdir /s /q TwitchChannelPointsMiner\__pycache__
+rmdir /s /q TwitchChannelPointsMiner\classes\__pycache__
+rmdir /s /q TwitchChannelPointsMiner\classes\entities\__pycache__
\ No newline at end of file
diff --git a/Dockerfile b/Dockerfile
new file mode 100644
index 0000000..0bec4ff
--- /dev/null
+++ b/Dockerfile
@@ -0,0 +1,43 @@
+FROM python:3.12
+
+ARG BUILDX_QEMU_ENV
+
+WORKDIR /usr/src/app
+
+COPY ./requirements.txt ./
+
+ENV CRYPTOGRAPHY_DONT_BUILD_RUST=1
+
+RUN pip install --upgrade pip
+
+RUN apt-get update
+RUN DEBIAN_FRONTEND=noninteractive apt-get install -qq -y --fix-missing --no-install-recommends \
+ gcc \
+ libffi-dev \
+ rustc \
+ zlib1g-dev \
+ libjpeg-dev \
+ libssl-dev \
+ libblas-dev \
+ liblapack-dev \
+ make \
+ cmake \
+ automake \
+ ninja-build \
+ g++ \
+ subversion \
+ python3-dev \
+ && if [ "${BUILDX_QEMU_ENV}" = "true" ] && [ "$(getconf LONG_BIT)" = "32" ]; then \
+ pip install -U cryptography==3.3.2; \
+ fi \
+ && pip install -r requirements.txt \
+ && pip cache purge \
+ && apt-get remove -y gcc rustc \
+ && apt-get autoremove -y \
+ && apt-get autoclean -y \
+ && apt-get clean -y \
+ && rm -rf /var/lib/apt/lists/* \
+ && rm -rf /usr/share/doc/*
+
+ADD ./TwitchChannelPointsMiner ./TwitchChannelPointsMiner
+ENTRYPOINT [ "python", "run.py" ]
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..5ca8960
--- /dev/null
+++ b/README.md
@@ -0,0 +1,743 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+https://github.com/rdavydov/Twitch-Channel-Points-Miner-v2
+
+**Credits**
+- Main idea: https://github.com/gottagofaster236/Twitch-Channel-Points-Miner
+- ~~Bet system (Selenium): https://github.com/ClementRoyer/TwitchAutoCollect-AutoBet~~
+- Based on: https://github.com/Tkd-Alex/Twitch-Channel-Points-Miner-v2
+
+> A simple script that will watch a stream for you and earn the channel points.
+
+> It can wait for a streamer to go live (+_450 points_ when the stream starts), it will automatically click the bonus button (_+50 points_), and it will follow raids (_+250 points_).
+
+Read more about the channel points [here](https://help.twitch.tv/s/article/channel-points-guide).
+
+# README Contents
+1. π€ [Community](#community)
+2. π [Main differences from the original repository](#main-differences-from-the-original-repository)
+3. π§Ύ [Logs feature](#logs-feature)
+ - [Full logs](#full-logs)
+ - [Less logs](#less-logs)
+ - [Final report](#final-report)
+4. π§ [How to use](#how-to-use)
+ - [Cloning](#by-cloning-the-repository)
+ - [Docker](#docker)
+ - [Docker Hub](#docker-hub)
+ - [Portainer](#portainer)
+ - [Replit](#replit)
+ - [Limits](#limits)
+5. π§ [Settings](#settings)
+ - [LoggerSettings](#loggersettings)
+ - [StreamerSettings](#streamersettings)
+ - [BetSettings](#betsettings)
+ - [Bet strategy](#bet-strategy)
+ - [FilterCondition](#filtercondition)
+ - [Example](#example)
+6. π [Analytics](#analytics)
+7. πͺ [Migrating from an old repository (the original one)](#migrating-from-an-old-repository-the-original-one)
+8. πͺ [Windows](#windows)
+9. π± [Termux](#termux)
+10. β οΈ [Disclaimer](#disclaimer)
+
+
+## Community
+If you want to help with this project, please leave a star π and share it with your friends! π
+
+If you want to offer me a coffee, I would be grateful! β€οΈ
+
+| | |
+|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------|
+|
|`DAKzncwKkpfPCm1xVU7u2pConpXwX7HS3D` _(DOGE)_|
+
+If you have any issues or you want to contribute, you are welcome! But please read the [CONTRIBUTING.md](https://github.com/rdavydov/Twitch-Channel-Points-Miner-v2/blob/master/CONTRIBUTING.md) file.
+
+## Main differences from the original repository:
+
+- Improved logging: emojis, colors, files and much more βοΈ
+- Final report with all the data βοΈ
+- Rewritten codebase now uses classes instead of modules with global variables βοΈ
+- Automatic downloading of the list of followers and using it as an input βοΈ
+- Better 'Watch Streak' strategy in the priority system [#11](https://github.com/Tkd-Alex/Twitch-Channel-Points-Miner-v2/issues/11) βοΈ
+- Auto claiming [game drops](https://help.twitch.tv/s/article/mission-based-drops) from the Twitch inventory [#21](https://github.com/Tkd-Alex/Twitch-Channel-Points-Miner-v2/issues/21) βοΈ
+- Placing a bet / making a prediction with your channel points [#41](https://github.com/Tkd-Alex/Twitch-Channel-Points-Miner-v2/issues/41) ([@lay295](https://github.com/lay295)) βοΈ
+- Switchable analytics chart that shows the progress of your points with various annotations [#96](https://github.com/Tkd-Alex/Twitch-Channel-Points-Miner-v2/issues/96) βοΈ
+- Joining the IRC Chat to increase the watch time and get StreamElements points [#47](https://github.com/Tkd-Alex/Twitch-Channel-Points-Miner-v2/issues/47) βοΈ
+- [Moments](https://help.twitch.tv/s/article/moments) claiming [#182](https://github.com/rdavydov/Twitch-Channel-Points-Miner-v2/issues/182) βοΈ
+- Notifying on `@nickname` mention in the Twitch chat [#227](https://github.com/rdavydov/Twitch-Channel-Points-Miner-v2/issues/227) βοΈ
+
+## Logs feature
+### Full logs
+```
+%d/%m/%y %H:%M:%S - INFO - [run]: π£ Start session: '9eb934b0-1684-4a62-b3e2-ba097bd67d35'
+%d/%m/%y %H:%M:%S - INFO - [run]: π€ Loading data for x streamers. Please wait ...
+%d/%m/%y %H:%M:%S - INFO - [set_offline]: π΄ Streamer(username=streamer-username1, channel_id=0000000, channel_points=67247) is Offline!
+%d/%m/%y %H:%M:%S - INFO - [set_offline]: π΄ Streamer(username=streamer-username2, channel_id=0000000, channel_points=4240) is Offline!
+%d/%m/%y %H:%M:%S - INFO - [set_offline]: π΄ Streamer(username=streamer-username3, channel_id=0000000, channel_points=61365) is Offline!
+%d/%m/%y %H:%M:%S - INFO - [set_offline]: π΄ Streamer(username=streamer-username4, channel_id=0000000, channel_points=3760) is Offline!
+%d/%m/%y %H:%M:%S - INFO - [set_online]: π₯³ Streamer(username=streamer-username, channel_id=0000000, channel_points=61365) is Online!
+%d/%m/%y %H:%M:%S - INFO - [start_bet]: π§ Start betting for EventPrediction(event_id=xxxx-xxxx-xxxx-xxxx, title=Please star this repo) owned by Streamer(username=streamer-username, channel_id=0000000, channel_points=61365)
+%d/%m/%y %H:%M:%S - INFO - [__open_coins_menu]: π§ Open coins menu for EventPrediction(event_id=xxxx-xxxx-xxxx-xxxx, title=Please star this repo)
+%d/%m/%y %H:%M:%S - INFO - [__click_on_bet]: π§ Click on the bet for EventPrediction(event_id=xxxx-xxxx-xxxx-xxxx, title=Please star this repo)
+%d/%m/%y %H:%M:%S - INFO - [__enable_custom_bet_value]: π§ Enable input of custom value for EventPrediction(event_id=xxxx-xxxx-xxxx-xxxx, title=Please star this repo)
+%d/%m/%y %H:%M:%S - INFO - [on_message]: β° Place the bet after: 89.99s for: EventPrediction(event_id=xxxx-xxxx-xxxx-xxxx-15c61914ef69, title=Please star this repo)
+%d/%m/%y %H:%M:%S - INFO - [on_message]: π +12 β Streamer(username=streamer-username, channel_id=0000000, channel_points=61377) - Reason: WATCH.
+%d/%m/%y %H:%M:%S - INFO - [make_predictions]: π Going to complete bet for EventPrediction(event_id=xxxx-xxxx-xxxx-xxxx-15c61914ef69, title=Please star this repo) owned by Streamer(username=streamer-username, channel_id=0000000, channel_points=61377)
+%d/%m/%y %H:%M:%S - INFO - [make_predictions]: π Place 5k channel points on: SI (BLUE), Points: 848k, Users: 190 (70.63%), Odds: 1.24 (80.65%)
+%d/%m/%y %H:%M:%S - INFO - [on_message]: π +6675 β Streamer(username=streamer-username, channel_id=0000000, channel_points=64206) - Reason: PREDICTION.
+%d/%m/%y %H:%M:%S - INFO - [on_message]: π EventPrediction(event_id=xxxx-xxxx-xxxx-xxxx, title=Please star this repo) - Result: WIN, Points won: 6675
+%d/%m/%y %H:%M:%S - INFO - [on_message]: π +12 β Streamer(username=streamer-username, channel_id=0000000, channel_points=64218) - Reason: WATCH.
+%d/%m/%y %H:%M:%S - INFO - [on_message]: π +12 β Streamer(username=streamer-username, channel_id=0000000, channel_points=64230) - Reason: WATCH.
+%d/%m/%y %H:%M:%S - INFO - [claim_bonus]: π Claiming the bonus for Streamer(username=streamer-username, channel_id=0000000, channel_points=64230)!
+%d/%m/%y %H:%M:%S - INFO - [on_message]: π +60 β Streamer(username=streamer-username, channel_id=0000000, channel_points=64290) - Reason: CLAIM.
+%d/%m/%y %H:%M:%S - INFO - [on_message]: π +12 β Streamer(username=streamer-username, channel_id=0000000, channel_points=64326) - Reason: WATCH.
+%d/%m/%y %H:%M:%S - INFO - [on_message]: π +400 β Streamer(username=streamer-username, channel_id=0000000, channel_points=64326) - Reason: WATCH_STREAK.
+%d/%m/%y %H:%M:%S - INFO - [claim_bonus]: π Claiming the bonus for Streamer(username=streamer-username, channel_id=0000000, channel_points=64326)!
+%d/%m/%y %H:%M:%S - INFO - [on_message]: π +60 β Streamer(username=streamer-username, channel_id=0000000, channel_points=64386) - Reason: CLAIM.
+%d/%m/%y %H:%M:%S - INFO - [on_message]: π +12 β Streamer(username=streamer-username, channel_id=0000000, channel_points=64398) - Reason: WATCH.
+%d/%m/%y %H:%M:%S - INFO - [update_raid]: π Joining raid from Streamer(username=streamer-username, channel_id=0000000, channel_points=64398) to another-username!
+%d/%m/%y %H:%M:%S - INFO - [on_message]: π +250 β Streamer(username=streamer-username, channel_id=0000000, channel_points=6845) - Reason: RAID.
+```
+### Less logs
+```
+%d/%m %H:%M:%S - π£ Start session: '9eb934b0-1684-4a62-b3e2-ba097bd67d35'
+%d/%m %H:%M:%S - π€ Loading data for 13 streamers. Please wait ...
+%d/%m %H:%M:%S - π΄ streamer-username1 (xxx points) is Offline!
+%d/%m %H:%M:%S - π΄ streamer-username2 (xxx points) is Offline!
+%d/%m %H:%M:%S - π΄ streamer-username3 (xxx points) is Offline!
+%d/%m %H:%M:%S - π΄ streamer-username4 (xxx points) is Offline!
+%d/%m %H:%M:%S - π₯³ streamer-username (xxx points) is Online!
+%d/%m %H:%M:%S - π§ Start betting for EventPrediction: Please star this repo owned by streamer-username (xxx points)
+%d/%m %H:%M:%S - π§ Open coins menu for EventPrediction: Please star this repo
+%d/%m %H:%M:%S - π§ Click on the bet for EventPrediction: Please star this repo
+%d/%m %H:%M:%S - π§ Enable input of custom value for EventPrediction: Please star this repo
+%d/%m %H:%M:%S - β° Place the bet after: 89.99s EventPrediction: Please star this repo
+%d/%m %H:%M:%S - π +12 β streamer-username (xxx points) - Reason: WATCH.
+%d/%m %H:%M:%S - π Going to complete bet for EventPrediction: Please star this repo owned by streamer-username (xxx points)
+%d/%m %H:%M:%S - π Place 5k channel points on: SI (BLUE), Points: 848k, Users: 190 (70.63%), Odds: 1.24 (80.65%)
+%d/%m %H:%M:%S - π +6675 β streamer-username (xxx points) - Reason: PREDICTION.
+%d/%m %H:%M:%S - π EventPrediction: Please star this repo - Result: WIN, Points won: 6675
+%d/%m %H:%M:%S - π +12 β streamer-username (xxx points) - Reason: WATCH.
+%d/%m %H:%M:%S - π +12 β streamer-username (xxx points) - Reason: WATCH.
+%d/%m %H:%M:%S - π +60 β streamer-username (xxx points) - Reason: CLAIM.
+%d/%m %H:%M:%S - π +12 β streamer-username (xxx points) - Reason: WATCH.
+%d/%m %H:%M:%S - π +400 β streamer-username (xxx points) - Reason: WATCH_STREAK.
+%d/%m %H:%M:%S - π +60 β streamer-username (xxx points) - Reason: CLAIM.
+%d/%m %H:%M:%S - π +12 β streamer-username (xxx points) - Reason: WATCH.
+%d/%m %H:%M:%S - π Joining raid from streamer-username (xxx points) to another-username!
+%d/%m %H:%M:%S - π +250 β streamer-username (xxx points) - Reason: RAID.
+```
+### Final report:
+```
+%d/%m/%y %H:%M:%S - π End session 'f738d438-cdbc-4cd5-90c4-1517576f1299'
+%d/%m/%y %H:%M:%S - π Logs file: /.../path/Twitch-Channel-Points-Miner-v2/logs/username.timestamp.log
+%d/%m/%y %H:%M:%S - β Duration 10:29:19.547371
+
+%d/%m/%y %H:%M:%S - π BetSettings(Strategy=Strategy.SMART, Percentage=7, PercentageGap=20, MaxPoints=7500
+%d/%m/%y %H:%M:%S - π EventPrediction(event_id=xxxx-xxxx-xxxx-xxxx, title="Event Title1")
+ Streamer(username=streamer-username, channel_id=0000000, channel_points=67247)
+ Bet(TotalUsers=1k, TotalPoints=11M), Decision={'choice': 'B', 'amount': 5289, 'id': 'xxxx-yyyy-zzzz'})
+ Outcome0(YES (BLUE) Points: 7M, Users: 641 (58.49%), Odds: 1.6, (5}%)
+ Outcome1(NO (PINK),Points: 4M, Users: 455 (41.51%), Odds: 2.65 (37.74%))
+ Result: {'type': 'LOSE', 'won': 0}
+%d/%m/%y %H:%M:%S - π EventPrediction(event_id=yyyy-yyyy-yyyy-yyyy, title="Event Title2")
+ Streamer(username=streamer-username, channel_id=0000000, channel_points=3453464)
+ Bet(TotalUsers=921, TotalPoints=11M), Decision={'choice': 'A', 'amount': 4926, 'id': 'xxxx-yyyy-zzzz'})
+ Outcome0(YES (BLUE) Points: 9M, Users: 562 (61.02%), Odds: 1.31 (76.34%))
+ Outcome1(YES (PINK) Points: 3M, Users: 359 (38.98%), Odds: 4.21 (23.75%))
+ Result: {'type': 'WIN', 'won': 6531}
+%d/%m/%y %H:%M:%S - π EventPrediction(event_id=ad152117-251b-4666-b683-18e5390e56c3, title="Event Title3")
+ Streamer(username=streamer-username, channel_id=0000000, channel_points=45645645)
+ Bet(TotalUsers=260, TotalPoints=3M), Decision={'choice': 'A', 'amount': 5054, 'id': 'xxxx-yyyy-zzzz'})
+ Outcome0(YES (BLUE) Points: 689k, Users: 114 (43.85%), Odds: 4.24 (23.58%))
+ Outcome1(NO (PINK) Points: 2M, Users: 146 (56.15%), Odds: 1.31 (76.34%))
+ Result: {'type': 'LOSE', 'won': 0}
+
+%d/%m/%y %H:%M:%S - π€ Streamer(username=streamer-username, channel_id=0000000, channel_points=67247), Total points gained (after farming - before farming): -7838
+%d/%m/%y %H:%M:%S - π° CLAIM(11 times, 550 gained), PREDICTION(1 times, 6531 gained), WATCH(35 times, 350 gained)
+%d/%m/%y %H:%M:%S - π€ Streamer(username=streamer-username2, channel_id=0000000, channel_points=61365), Total points gained (after farming - before farming): 977
+%d/%m/%y %H:%M:%S - π° CLAIM(4 times, 240 gained), REFUND(1 times, 605 gained), WATCH(11 times, 132 gained)
+%d/%m/%y %H:%M:%S - π€ Streamer(username=streamer-username5, channel_id=0000000, channel_points=25960), Total points gained (after farming - before farming): 1680
+%d/%m/%y %H:%M:%S - π° CLAIM(17 times, 850 gained), WATCH(53 times, 530 gained)
+%d/%m/%y %H:%M:%S - π€ Streamer(username=streamer-username6, channel_id=0000000, channel_points=9430), Total points gained (after farming - before farming): 1120
+%d/%m/%y %H:%M:%S - π° CLAIM(14 times, 700 gained), WATCH(42 times, 420 gained), WATCH_STREAK(1 times, 450 gained)
+```
+
+## How to use:
+First of all please create a run.py file. You can just copy [example.py](https://github.com/rdavydov/Twitch-Channel-Points-Miner-v2/blob/master/example.py) and modify it according to your needs.
+```python
+# -*- coding: utf-8 -*-
+
+import logging
+from colorama import Fore
+from TwitchChannelPointsMiner import TwitchChannelPointsMiner
+from TwitchChannelPointsMiner.logger import LoggerSettings, ColorPalette
+from TwitchChannelPointsMiner.classes.Chat import ChatPresence
+from TwitchChannelPointsMiner.classes.Discord import Discord
+from TwitchChannelPointsMiner.classes.Webhook import Webhook
+from TwitchChannelPointsMiner.classes.Telegram import Telegram
+from TwitchChannelPointsMiner.classes.Settings import Priority, Events, FollowersOrder
+from TwitchChannelPointsMiner.classes.entities.Bet import Strategy, BetSettings, Condition, OutcomeKeys, FilterCondition, DelayMode
+from TwitchChannelPointsMiner.classes.entities.Streamer import Streamer, StreamerSettings
+
+twitch_miner = TwitchChannelPointsMiner(
+ username="your-twitch-username",
+ password="write-your-secure-psw", # If no password will be provided, the script will ask interactively
+ claim_drops_startup=False, # If you want to auto claim all drops from Twitch inventory on the startup
+ priority=[ # Custom priority in this case for example:
+ Priority.STREAK, # - We want first of all to catch all watch streak from all streamers
+ Priority.DROPS, # - When we don't have anymore watch streak to catch, wait until all drops are collected over the streamers
+ Priority.ORDER # - When we have all of the drops claimed and no watch-streak available, use the order priority (POINTS_ASCENDING, POINTS_DESCEDING)
+ ],
+ enable_analytics=False, # Disables Analytics if False. Disabling it significantly reduces memory consumption
+ disable_ssl_cert_verification=False, # Set to True at your own risk and only to fix SSL: CERTIFICATE_VERIFY_FAILED error
+ disable_at_in_nickname=False, # Set to True if you want to check for your nickname mentions in the chat even without @ sign
+ logger_settings=LoggerSettings(
+ save=True, # If you want to save logs in a file (suggested)
+ console_level=logging.INFO, # Level of logs - use logging.DEBUG for more info
+ console_username=False, # Adds a username to every console log line if True. Also adds it to Telegram, Discord, etc. Useful when you have several accounts
+ auto_clear=True, # Create a file rotation handler with interval = 1D and backupCount = 7 if True (default)
+ time_zone="", # Set a specific time zone for console and file loggers. Use tz database names. Example: "America/Denver"
+ file_level=logging.DEBUG, # Level of logs - If you think the log file it's too big, use logging.INFO
+ emoji=True, # On Windows, we have a problem printing emoji. Set to false if you have a problem
+ less=False, # If you think that the logs are too verbose, set this to True
+ colored=True, # If you want to print colored text
+ color_palette=ColorPalette( # You can also create a custom palette color (for the common message).
+ STREAMER_online="GREEN", # Don't worry about lower/upper case. The script will parse all the values.
+ streamer_offline="red", # Read more in README.md
+ BET_wiN=Fore.MAGENTA # Color allowed are: [BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE, RESET].
+ ),
+ telegram=Telegram( # You can omit or set to None if you don't want to receive updates on Telegram
+ chat_id=123456789, # Chat ID to send messages @getmyid_bot
+ token="123456789:shfuihreuifheuifhiu34578347", # Telegram API token @BotFather
+ events=[Events.STREAMER_ONLINE, Events.STREAMER_OFFLINE,
+ Events.BET_LOSE, Events.CHAT_MENTION], # Only these events will be sent to the chat
+ disable_notification=True, # Revoke the notification (sound/vibration)
+ ),
+ discord=Discord(
+ webhook_api="https://discord.com/api/webhooks/0123456789/0a1B2c3D4e5F6g7H8i9J", # Discord Webhook URL
+ events=[Events.STREAMER_ONLINE, Events.STREAMER_OFFLINE,
+ Events.BET_LOSE, Events.CHAT_MENTION], # Only these events will be sent to the chat
+ ),
+ webhook=Webhook(
+ endpoint="https://example.com/webhook", # Webhook URL
+ method="GET", # GET or POST
+ events=[Events.STREAMER_ONLINE, Events.STREAMER_OFFLINE,
+ Events.BET_LOSE, Events.CHAT_MENTION], # Only these events will be sent to the endpoint
+ ),
+ matrix=Matrix(
+ username="twitch_miner", # Matrix username (without homeserver)
+ password="...", # Matrix password
+ homeserver="matrix.org", # Matrix homeserver
+ room_id="...", # Room ID
+ events=[Events.STREAMER_ONLINE, Events.STREAMER_OFFLINE, Events.BET_LOSE], # Only these events will be sent
+ ),
+ pushover=Pushover(
+ userkey="YOUR-ACCOUNT-TOKEN", # Login to https://pushover.net/, the user token is on the main page
+ token="YOUR-APPLICATION-TOKEN", # Create a application on the website, and use the token shown in your application
+ priority=0, # Read more about priority here: https://pushover.net/api#priority
+ sound="pushover", # A list of sounds can be found here: https://pushover.net/api#sounds
+ events=[Events.CHAT_MENTION, Events.DROP_CLAIM], # Only these events will be sent
+ )
+ ),
+ streamer_settings=StreamerSettings(
+ make_predictions=True, # If you want to Bet / Make prediction
+ follow_raid=True, # Follow raid to obtain more points
+ claim_drops=True, # We can't filter rewards base on stream. Set to False for skip viewing counter increase and you will never obtain a drop reward from this script. Issue #21
+ claim_moments=True, # If set to True, https://help.twitch.tv/s/article/moments will be claimed when available
+ watch_streak=True, # If a streamer go online change the priority of streamers array and catch the watch screak. Issue #11
+ chat=ChatPresence.ONLINE, # Join irc chat to increase watch-time [ALWAYS, NEVER, ONLINE, OFFLINE]
+ bet=BetSettings(
+ strategy=Strategy.SMART, # Choose you strategy!
+ percentage=5, # Place the x% of your channel points
+ percentage_gap=20, # Gap difference between outcomesA and outcomesB (for SMART strategy)
+ max_points=50000, # If the x percentage of your channel points is gt bet_max_points set this value
+ stealth_mode=True, # If the calculated amount of channel points is GT the highest bet, place the highest value minus 1-2 points Issue #33
+ delay_mode=DelayMode.FROM_END, # When placing a bet, we will wait until `delay` seconds before the end of the timer
+ delay=6,
+ minimum_points=20000, # Place the bet only if we have at least 20k points. Issue #113
+ filter_condition=FilterCondition(
+ by=OutcomeKeys.TOTAL_USERS, # Where apply the filter. Allowed [PERCENTAGE_USERS, ODDS_PERCENTAGE, ODDS, TOP_POINTS, TOTAL_USERS, TOTAL_POINTS]
+ where=Condition.LTE, # 'by' must be [GT, LT, GTE, LTE] than value
+ value=800
+ )
+ )
+ )
+)
+
+# You can customize the settings for each streamer. If not settings were provided, the script would use the streamer_settings from TwitchChannelPointsMiner.
+# If no streamer_settings are provided in TwitchChannelPointsMiner the script will use default settings.
+# The streamers array can be a String -> username or Streamer instance.
+
+# The settings priority are: settings in mine function, settings in TwitchChannelPointsMiner instance, default settings.
+# For example, if in the mine function you don't provide any value for 'make_prediction' but you have set it on TwitchChannelPointsMiner instance, the script will take the value from here.
+# If you haven't set any value even in the instance the default one will be used
+
+#twitch_miner.analytics(host="127.0.0.1", port=5000, refresh=5, days_ago=7) # Start the Analytics web-server (replit: host="0.0.0.0")
+
+twitch_miner.mine(
+ [
+ Streamer("streamer-username01", settings=StreamerSettings(make_predictions=True , follow_raid=False , claim_drops=True , watch_streak=True , bet=BetSettings(strategy=Strategy.SMART , percentage=5 , stealth_mode=True, percentage_gap=20 , max_points=234 , filter_condition=FilterCondition(by=OutcomeKeys.TOTAL_USERS, where=Condition.LTE, value=800 ) ) )),
+ Streamer("streamer-username02", settings=StreamerSettings(make_predictions=False , follow_raid=True , claim_drops=False , bet=BetSettings(strategy=Strategy.PERCENTAGE , percentage=5 , stealth_mode=False, percentage_gap=20 , max_points=1234 , filter_condition=FilterCondition(by=OutcomeKeys.TOTAL_POINTS, where=Condition.GTE, value=250 ) ) )),
+ Streamer("streamer-username03", settings=StreamerSettings(make_predictions=True , follow_raid=False , watch_streak=True , bet=BetSettings(strategy=Strategy.SMART , percentage=5 , stealth_mode=False, percentage_gap=30 , max_points=50000 , filter_condition=FilterCondition(by=OutcomeKeys.ODDS, where=Condition.LT, value=300 ) ) )),
+ Streamer("streamer-username04", settings=StreamerSettings(make_predictions=False , follow_raid=True , watch_streak=True )),
+ Streamer("streamer-username05", settings=StreamerSettings(make_predictions=True , follow_raid=True , claim_drops=True , watch_streak=True , bet=BetSettings(strategy=Strategy.HIGH_ODDS , percentage=7 , stealth_mode=True, percentage_gap=20 , max_points=90 , filter_condition=FilterCondition(by=OutcomeKeys.PERCENTAGE_USERS, where=Condition.GTE, value=300 ) ) )),
+ Streamer("streamer-username06"),
+ Streamer("streamer-username07"),
+ Streamer("streamer-username08"),
+ "streamer-username09",
+ "streamer-username10",
+ "streamer-username11"
+ ], # Array of streamers (order = priority)
+ followers=False, # Automatic download the list of your followers
+ followers_order=FollowersOrder.ASC # Sort the followers list by follow date. ASC or DESC
+)
+```
+You can also use all the default values except for your username obv. Short version:
+```python
+from TwitchChannelPointsMiner import TwitchChannelPointsMiner
+from TwitchChannelPointsMiner.classes.Settings import FollowersOrder
+twitch_miner = TwitchChannelPointsMiner("your-twitch-username")
+twitch_miner.mine(["streamer1", "streamer2"]) # Array of streamers OR
+twitch_miner.mine(followers=True, followers_order=FollowersOrder.ASC) # Automatic use the followers list OR
+twitch_miner.mine(["streamer1", "streamer2"], followers=True, followers_order=FollowersOrder.DESC) # Mixed
+```
+If you follow so many streamers on Twitch, but you don't want to mine points for all of them, you can blacklist the users with the `blacklist` keyword. [#94](https://github.com/Tkd-Alex/Twitch-Channel-Points-Miner-v2/issues/94)
+```python
+from TwitchChannelPointsMiner import TwitchChannelPointsMiner
+twitch_miner = TwitchChannelPointsMiner("your-twitch-username")
+twitch_miner.mine(followers=True, blacklist=["user1", "user2"]) # Blacklist example
+```
+
+### By cloning the repository
+1. Clone this repository `git clone https://github.com/rdavydov/Twitch-Channel-Points-Miner-v2`
+2. Install all the requirements `pip install -r requirements.txt` . If you have problems with requirements, make sure to have at least Python3.6. You could also try to create a _virtualenv_ and then install all the requirements
+```sh
+pip install virtualenv
+virtualenv -p python3 venv
+source venv/bin/activate
+pip install -r requirements.txt
+```
+
+Start mining! `python run.py` π₯³
+
+### Docker
+
+#### Docker Hub
+Official Docker images are on https://hub.docker.com/r/rdavidoff/twitch-channel-points-miner-v2 for `linux/amd64`, `linux/arm64` and `linux/arm/v7`.
+
+The following file is mounted :
+
+- run.py : this is your starter script with your configuration
+
+These folders are mounted :
+
+- analytics : to save the analytics data
+- cookies : to provide login information
+- logs : to keep logs outside of container
+
+**Example using docker-compose:**
+
+```yml
+version: "3.9"
+
+services:
+ miner:
+ image: rdavidoff/twitch-channel-points-miner-v2
+ stdin_open: true
+ tty: true
+ environment:
+ - TERM=xterm-256color
+ volumes:
+ - ./analytics:/usr/src/app/analytics
+ - ./cookies:/usr/src/app/cookies
+ - ./logs:/usr/src/app/logs
+ - ./run.py:/usr/src/app/run.py:ro
+ ports:
+ - "5000:5000"
+```
+
+**Example with docker run:**
+```sh
+docker run \
+ -v $(pwd)/analytics:/usr/src/app/analytics \
+ -v $(pwd)/cookies:/usr/src/app/cookies \
+ -v $(pwd)/logs:/usr/src/app/logs \
+ -v $(pwd)/run.py:/usr/src/app/run.py:ro \
+ -p 5000:5000 \
+ rdavidoff/twitch-channel-points-miner-v2
+```
+
+`$(pwd)` Could not work on Windows (cmd), please use the absolute path instead, like: `/path/of/your/cookies:/usr/src/app/cookies`.
+
+The correct solution for Windows lies in the correct command line: `docker run -v C:\Absolute\Path\To\Twitch-Channel-Points-Miner-v2\run.py:/usr/src/app/run.py:ro rdavidoff/twitch-channel-points-miner-v2`.
+
+`run.py` MUST be mounted as a volume (`-v`).
+
+If you don't mount the volume for the analytics (or cookies or logs) folder, the folder will be automatically created on the Docker container, and you will lose all the data when it is stopped.
+
+If you don't have a cookie or it's your first time running the script, you will need to login to Twitch and start the container with `-it` args. If you need to run multiple containers you can bind different ports (only if you need also the analytics) and mount dirrent run.py file, like
+
+```sh
+docker run --name user1 -v $(pwd)/user1.py:/usr/src/app/run.py:ro -p 5001:5000 rdavidoff/twitch-channel-points-miner-v2
+```
+
+```sh
+docker run --name user2 -v $(pwd)/user2.py:/usr/src/app/run.py:ro -p 5002:5000 rdavidoff/twitch-channel-points-miner-v2
+```
+
+#### Portainer
+
+[Link](https://github.com/rdavydov/Twitch-Channel-Points-Miner-v2/wiki/Deploy-Docker-container-in-Portainer) to the illustrated guide on how to deploy a Docker container in Portainer.
+
+### Replit
+
+Official Repl: https://replit.com/@rdavydov/Twitch-Channel-Points-Miner-v2
+
+Provided "as is" with no support. Testing purposes only. Updates may be delayed.
+
+### Limits
+_**Twitch has a limit - you can't watch more than two channels at one time. We take the first two streamers from the list as they have the highest priority.**_
+
+Make sure to write the streamers array in order of priority from left to right. If you use `followers=True` you can choose to download the followers sorted by follow date (ASC or DESC).
+
+## Settings
+Most of the settings are self-explained and are commented on in the example.
+You can watch only two streamers per time. With `priority` settings, you can select which streamers watch by use priority. You can use an array of priority or single item. I suggest using at least one priority from `ORDER`, `POINTS_ASCENDING`, `POINTS_DESCEDING` because, for example, If you set only `STREAK` after catch all watch streak, the script will stop to watch streamers.
+Available values are the following:
+ - `STREAK` - Catch the watch streak from all streamers
+ - `DROPS` - Claim all drops from streamers with drops tags enabled
+ - `SUBSCRIBED` - Prioritize streamers you're subscribed to (higher subscription tiers are mined first)
+ - `ORDER` - Following the order of the list
+ - `POINTS_ASCENDING` - On top the streamers with the lowest points
+ - `POINTS_DESCEDING` - On top the streamers with the highest points
+
+You can combine all priority but keep in mind that use `ORDER` and `POINTS_ASCENDING` in the same settings doesn't make sense.
+
+### LoggerSettings
+| Key | Type | Default | Description |
+|----------------- |----------------- |-------------------------------------------------------------------- |------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
+| `save` | bool | True | If you want to save logs in file (suggested) |
+| `less` | bool | False | Reduce the logging format and message verbosity [#10](https://github.com/Tkd-Alex/Twitch-Channel-Points-Miner-v2/issues/10) |
+| `console_level` | level | logging.INFO | Level of logs in terminal - Use logging.DEBUG for more helpful messages. |
+| `console_username`| bool | False | Adds a username to every log line in the console if True. [#602](https://github.com/Tkd-Alex/Twitch-Channel-Points-Miner-v2/issues/602)|
+| `time_zone`| str | None | Set a specific time zone for console and file loggers. Use tz database names. Example: "America/Denver" https://github.com/rdavydov/Twitch-Channel-Points-Miner-v2/issues/205|
+| `file_level` | level | logging.DEBUG | Level of logs in file save - If you think the log file it's too big, use logging.INFO |
+| `emoji` | bool | For Windows is False else True | On Windows, we have a problem printing emoji. Set to false if you have a problem |
+| `colored` | bool | True | If you want to print colored text [#45](https://github.com/Tkd-Alex/Twitch-Channel-Points-Miner-v2/issues/45) [#82](https://github.com/Tkd-Alex/Twitch-Channel-Points-Miner-v2/issues/82) |
+| `auto_clear` | bool | True | Create a file rotation handler with interval = 1D and backupCount = 7 [#215](https://github.com/Tkd-Alex/Twitch-Channel-Points-Miner-v2/issues/215) |
+| `color_palette` | ColorPalette | All messages are Fore.RESET except WIN and LOSE bet (GREEN and RED) | Create your custom color palette. Read more above. |
+| `telegram` | Telegram | None | (Optional) Receive Telegram updates for multiple events list [#233](https://github.com/Tkd-Alex/Twitch-Channel-Points-Miner-v2/issues/233) |
+| `discord` | Discord | None | (Optional) Receive Discord updates for multiple events list [#320](https://github.com/Tkd-Alex/Twitch-Channel-Points-Miner-v2/issues/320) |
+
+#### Color Palette
+Now you can customize the color of the terminal message. We have created a default ColorPalette that provide all the message with `DEFAULT (RESET)` color and the `BET_WIN` and `BET_LOSE` message `GREEN` and `RED` respectively. You can change the colors of all `Events` enum class. The colors allowed are all the Fore color from Colorama: `BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE, RESET.`
+The script was developed to handle all the human error, lower-case upper case and more, but I want to suggest using the following code-style
+```python
+from colorama import Fore
+ColorPalette(
+ STREAMER_ONLINE = Fore.GREEN,
+ STREAMER_OFFLINE = Fore.RED,
+ GAIN_FOR_RAID = Fore.YELLOW,
+ GAIN_FOR_CLAIM = Fore.YELLOW,
+ GAIN_FOR_WATCH = Fore.YELLOW,
+ GAIN_FOR_WATCH_STREAK = Fore.YELLOW,
+ BET_WIN = Fore.GREEN,
+ BET_LOSE = Fore.RED,
+ BET_REFUND = Fore.RESET,
+ BET_FILTERS = Fore.MAGENTA,
+ BET_GENERAL = Fore.BLUE,
+ BET_FAILED = Fore.RED,
+)
+```
+
+#### Telegram
+If you want to receive logs update on Telegram, initiate a new Telegram class, else omit this parameter or set as None.
+1. Create a bot with [@BotFather](https://t.me/botfather)
+2. Get you `chat_id` with [@getmyid_bot](https://t.me/getmyid_bot)
+
+| Key | Type | Default | Description |
+|----------------------- |----------------- |--------- |------------------------------------------------------------------- |
+| `chat_id` | int | | Chat ID to send messages @getmyid_bot |
+| `token` | string | | Telegram API token @BotFather |
+| `events` | list | | Only these events will be sent to the chat. Array of Event. or str |
+| `disable_notification` | bool | false | Revoke the notification (sound/vibration) |
+
+
+```python
+Telegram(
+ chat_id=123456789,
+ token="123456789:shfuihreuifheuifhiu34578347",
+ events=[Events.STREAMER_ONLINE, Events.STREAMER_OFFLINE,
+ Events.BET_LOSE, Events.CHAT_MENTION],
+ disable_notification=True,
+)
+```
+
+#### Discord
+If you want to receive log updates on Discord initialize a new Discord class, else leave omit this parameter or set it as None [YT Video](https://www.youtube.com/watch?v=fKksxz2Gdnc)
+1. Go to the Server you want to receive updates
+2. Click "Edit Channel"
+3. Click "Integrations"
+4. Click "Webhooks"
+5. Click "New Webhook"
+6. Name it if you want
+7. Click on "Copy Webhook URL"
+
+
+| Key | Type | Default | Description |
+|----------------------- |--------------------- |-------------- |------------------------------------------------------------------- |
+| `webhook_api` | string | | Discord webhook URL |
+| `events` | list | | Only these events will be sent to the chat. Array of Event. or str |
+
+```python
+Discord(
+ webhook_api="https://discord.com/api/webhooks/0123456789/0a1B2c3D4e5F6g7H8i9J",
+ events=[Events.STREAMER_ONLINE, Events.STREAMER_OFFLINE,
+ Events.BET_LOSE, Events.CHAT_MENTION],
+)
+```
+
+#### Generic Webhook
+You can use generic webhook
+
+| Key | Type | Default | Description |
+|----------------------- |------------------|-----------|------------------------------------------------------------------- |
+| `endpoint` | string | | webhook url |
+| `method` | string | | `POST` or `GET` |
+| `events` | list | | Only these events will be sent to the endpoint. Array of Event. or str |
+
+```python
+Webhook(
+ endpoint="https://example.com/webhook",
+ method="GET",
+ events=[Events.STREAMER_ONLINE, Events.STREAMER_OFFLINE,
+ Events.BET_LOSE, Events.CHAT_MENTION],
+)
+```
+
+
+#### Events
+ - `STREAMER_ONLINE`
+ - `STREAMER_OFFLINE`
+ - `GAIN_FOR_RAID`
+ - `GAIN_FOR_CLAIM`
+ - `GAIN_FOR_WATCH`
+ - `BET_WIN`
+ - `BET_LOSE`
+ - `BET_REFUND`
+ - `BET_FILTERS`
+ - `BET_GENERAL`
+ - `BET_FAILED`
+ - `BET_START`
+ - `BONUS_CLAIM`
+ - `MOMENT_CLAIM`
+ - `JOIN_RAID`
+ - `DROP_CLAIM`
+ - `DROP_STATUS`
+ - `CHAT_MENTION`
+
+### StreamerSettings
+| Key | Type | Default | Description |
+|-------------------- |------------- |-------------------------------- |---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| `make_predictions` | bool | True | Choose if you want to make predictions / bet or not |
+| `follow_raid` | bool | True | Choose if you want to follow raid +250 points |
+| `claim_drops` | bool | True | If this value is True, the script will increase the watch-time for the current game. With this, you can claim the drops from Twitch Inventory [#21](https://github.com/Tkd-Alex/Twitch-Channel-Points-Miner-v2/issues/21) |
+| `claim_moments` | bool | True | If set to True, [moments](https://help.twitch.tv/s/article/moments) will be claimed when available |
+| `watch_streak` | bool | True | Choose if you want to change a priority for these streamers and try to catch the Watch Streak event [#11](https://github.com/Tkd-Alex/Twitch-Channel-Points-Miner-v2/issues/11) |
+| `bet` | BetSettings | | Rules to follow for the bet |
+| `chat` | ChatPresence | ONLINE | Join IRC-Chat to appear online in chat and attempt to get StreamElements channel points and increase view-time [#47](https://github.com/Tkd-Alex/Twitch-Channel-Points-Miner-v2/issues/47) |
+
+Allowed values for `chat` are:
+- `ALWAYS` Join in IRC chat and never leave
+- `NEVER` Never join IRC chat
+- `ONLINE` Partecipate to IRC chat if the streamer is online (leave if offline)
+- `OFFLINE` Partecipate to IRC chat if the streamer is offline (leave if online)
+
+### BetSettings
+| Key | Type | Default | Description |
+|-------------------- |----------------- |--------- |-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| `strategy` | Strategy | SMART | Choose your strategy! See below for more info |
+| `percentage` | int | 5 | Place the x% of your channel points |
+| `percentage_gap` | int | 20 | Gap difference between outcomesA and outcomesB (for SMART stragegy) |
+| `max_points` | int | 50000 | If the x percentage of your channel points is GT bet_max_points set this value |
+| `stealth_mode` | bool | False | If the calculated amount of channel points is GT the highest bet, place the highest value minus 1-2 points [#33](https://github.com/Tkd-Alex/Twitch-Channel-Points-Miner-v2/issues/33) |
+| `delay_mode` | DelayMode | FROM_END | Define how is calculating the waiting time before placing a bet |
+| `delay` | float | 6 | Value to be used to calculate bet delay depending on `delay_mode` value |
+
+#### Bet strategy
+
+- **MOST_VOTED**: Select the option most voted based on users count
+- **HIGH_ODDS**: Select the option with the highest odds
+- **PERCENTAGE**: Select the option with the highest percentage based on odds (It's the same that show Twitch) - Should be the same as select LOWEST_ODDS
+- **SMART_MONEY**: Select the option with the highest points placed. [#331](https://github.com/Tkd-Alex/Twitch-Channel-Points-Miner-v2/issues/331)
+- **SMART**: If the majority in percent chose an option, then follow the other users, otherwise select the option with the highest odds
+
+
+
+Here a concrete example:
+
+- **MOST_VOTED**: 21 Users have select **'over 7.5'**, instead of 9 'under 7.5'
+- **HIGH_ODDS**: The highest odd is 2.27 on **'over 7.5'** vs 1.79 on 'under 7.5'
+- **PERCENTAGE**: The highest percentage is 56% for **'under 7.5'**
+- **SMART**: Calculate the percentage based on the users. The percentages are: 'over 7.5': 70% and 'under 7.5': 30%. If the difference between the two percentages is higher than `percentage_gap` select the highest percentage, else the highest odds.
+
+In this case if percentage_gap = 20 ; 70-30 = 40 > percentage_gap, so the bot will select 'over 7.5'
+### FilterCondition
+| Key | Type | Default | Description |
+|------------- |------------- |--------- |---------------------------------------------------------------------------------- |
+| `by` | OutcomeKeys | None | Key to apply the filter |
+| `where` | Condition | None | Condition that should match for place bet |
+| `value` | number | None | Value to compare |
+
+Allowed values for `by` are:
+- `PERCENTAGE_USERS` (no sum) [Would never want a sum as it'd always be 100%]
+- `ODDS_PERCENTAGE` (no sum) [Doesn't make sense to sum odds]
+- `ODDS` (no sum) [Doesn't make sense to sum odds]
+- `DECISION_USERS` (no sum)
+- `DECISION_POINTS` (no sum)
+- `TOP_POINTS` (no sum) [Doesn't make sense to the top points of both sides]
+- `TOTAL_USERS` (sum)
+- `TOTAL_POINTS` (sum)
+
+Allowed values for `where` are: `GT, LT, GTE, LTE`
+
+#### Example
+- If you want to place the bet ONLY if the total of users participants in the bet is greater than 200
+`FilterCondition(by=OutcomeKeys.TOTAL_USERS, where=Condition.GT, value=200)`
+- If you want to place the bet ONLY if the winning odd of your decision is greater than or equal to 1.3
+`FilterCondition(by=OutcomeKeys.ODDS, where=Condition.GTE, value=1.3)`
+- If you want to place the bet ONLY if the highest bet is lower than 2000
+`FilterCondition(by=OutcomeKeys.TOP_POINTS, where=Condition.LT, value=2000)`
+
+### DelayMode
+
+- **FROM_START**: Will wait `delay` seconds from when the bet was opened
+- **FROM_END**: Will until there is `delay` seconds left to place the bet
+- **PERCENTAGE**: Will place the bet when `delay` percent of the set timer is elapsed
+
+Here's a concrete example. Let's suppose we have a bet that is opened with a timer of 10 minutes:
+
+- **FROM_START** with `delay=20`: The bet will be placed 20s after the bet is opened
+- **FROM_END** with `delay=20`: The bet will be placed 20s before the end of the bet (so 9mins 40s after the bet is opened)
+- **PERCENTAGE** with `delay=0.2`: The bet will be placed when the timer went down by 20% (so 2mins after the bet is opened)
+
+## Analytics
+We have recently introduced a little frontend where you can show with a chart you points trend. The script will spawn a Flask web-server on your machine where you can select binding address and port.
+The chart provides some annotation to handle the prediction and watch strike events. Usually annotation are used to notice big increase / decrease of points. If you want to can disable annotations.
+On each (x, y) points Its present a tooltip that show points, date time and reason of points gained / lost. This web page was just a funny idea, and it is not intended to use for a professional usage.
+If you want you can toggle the dark theme with the dedicated checkbox.
+
+| Light theme | Dark theme |
+| ----------- | ---------- |
+|  |  |
+
+For use this feature just call the `analytics()` method before start mining. Read more at: [#96](https://github.com/Tkd-Alex/Twitch-Channel-Points-Miner-v2/issues/96)
+The chart will be autofreshed each `refresh` minutes. If you want to connect from one to second machine that have that webpanel you have to use `0.0.0.0` instead of `127.0.0.1`. With the `days_ago` arg you can select how many days you want to show by default in your analytics graph.
+```python
+from TwitchChannelPointsMiner import TwitchChannelPointsMiner
+twitch_miner = TwitchChannelPointsMiner("your-twitch-username")
+twitch_miner.analytics(host="127.0.0.1", port=5000, refresh=5, days_ago=7) # Analytics web-server
+twitch_miner.mine(followers=True, blacklist=["user1", "user2"])
+```
+
+### `enable_analytics` option in `twitch_minerfile` toggles Analytics needed for the `analytics()` method
+
+Disabling Analytics significantly reduces memory consumption and saves some disk space by not creating and writing `/analytics/*.json`.
+
+Set this option to `True` if you need Analytics. Otherwise set this option to `False` (default value).
+
+## Migrating from an old repository (the original one):
+If you already have a `twitch-cookies.pkl` and you don't want to log in again, please create a `cookies/` folder in the current directory and then copy the .pkl file with a new name `your-twitch-username.pkl`
+```
+.
++-- run.py
++-- cookies
+| +-- your-twitch-username.pkl
+```
+
+## Windows
+Other users have find multiple problems on Windows. Suggestions are:
+ - Stop using Windows :stuck_out_tongue_closed_eyes:
+ - Suppress the emoji in logs with `logger_settings=LoggerSettings(emoji=False)`
+
+Other useful info can be found here:
+- https://github.com/gottagofaster236/Twitch-Channel-Points-Miner/issues/31
+- https://github.com/Tkd-Alex/Twitch-Channel-Points-Miner-v2/issues/55
+
+You can also follow this [video tutorial](https://www.youtube.com/watch?v=0VkM7NOZkuA).
+
+## Termux
+**1. Upgrade packages**
+```
+pkg upgrade
+```
+
+**2. Install packages to Termux**
+```
+pkg install python git rust libjpeg-turbo libcrypt ndk-sysroot clang zlib binutils tur-repo
+LDFLAGS="-L${PREFIX}/lib/" CFLAGS="-I${PREFIX}/include/" pip install --upgrade wheel pillow
+```
+Note: `pkg install tur-repo` will basically enable the [user repository](https://github.com/termux-user-repository/tur) _(Very similar to Arch AUR)_ and `python-pandas` pre-compiled package comes exactly from this repository.
+
+**3. Install pandas**
+```
+pkg install python-pandas
+```
+
+**4. Clone this repository**
+
+`git clone https://github.com/rdavydov/Twitch-Channel-Points-Miner-v2`
+
+**5. Go to the miner's directory**
+
+`cd Twitch-Channel-Points-Miner-v2`
+
+**6. Configure your miner on your preferences by typing**
+
+`nano example.py`
+
+**7. Rename file name (optional)**
+
+`mv example.py run.py`
+
+**8. Install packages**
+```
+pip install -r requirements.txt
+pip install Twitch-Channel-Points-Miner-v2
+```
+
+**9. Run the miner!**
+
+`python run.py`
+
+Read more at [#92](https://github.com/Tkd-Alex/Twitch-Channel-Points-Miner-v2/issues/92) [#76](https://github.com/Tkd-Alex/Twitch-Channel-Points-Miner-v2/issues/76)
+
+**Note**
+
+If you can't install `cryptography`, please try:
+
+`export RUSTFLAGS=" -C lto=no" && export CARGO_BUILD_TARGET="$(rustc -vV | sed -n 's|host: ||p')" && pip install cryptography`
+
+β οΈ Installation of `pandas`, `maturin` and `cryptography` takes a long time.
+
+## Disclaimer
+This project comes with no guarantee or warranty. You are responsible for whatever happens from using this project. It is possible to get soft or hard banned by using this project if you are not careful. This is a personal project and is in no way affiliated with Twitch.
diff --git a/TRAFFIC.md b/TRAFFIC.md
new file mode 100644
index 0000000..576f91e
--- /dev/null
+++ b/TRAFFIC.md
@@ -0,0 +1,11 @@
+
+
+ **Markdown**
+ ```markdown
+[](https://github.com/MShawon/github-clone-count-badge)
+
+ ```
+ **HTML**
+ ```html
+
+```
diff --git a/TwitchChannelPointsMiner/TwitchChannelPointsMiner.py b/TwitchChannelPointsMiner/TwitchChannelPointsMiner.py
new file mode 100644
index 0000000..f937351
--- /dev/null
+++ b/TwitchChannelPointsMiner/TwitchChannelPointsMiner.py
@@ -0,0 +1,496 @@
+# -*- coding: utf-8 -*-
+
+import logging
+import os
+import random
+import signal
+import sys
+import threading
+import time
+import uuid
+from datetime import datetime
+from pathlib import Path
+
+from TwitchChannelPointsMiner.classes.Chat import ChatPresence, ThreadChat
+from TwitchChannelPointsMiner.classes.entities.PubsubTopic import PubsubTopic
+from TwitchChannelPointsMiner.classes.entities.Streamer import (
+ Streamer,
+ StreamerSettings,
+)
+from TwitchChannelPointsMiner.classes.Exceptions import StreamerDoesNotExistException
+from TwitchChannelPointsMiner.classes.Settings import FollowersOrder, Priority, Settings
+from TwitchChannelPointsMiner.classes.Twitch import Twitch
+from TwitchChannelPointsMiner.classes.WebSocketsPool import WebSocketsPool
+from TwitchChannelPointsMiner.logger import LoggerSettings, configure_loggers
+from TwitchChannelPointsMiner.utils import (
+ _millify,
+ at_least_one_value_in_settings_is,
+ check_versions,
+ get_user_agent,
+ internet_connection_available,
+ set_default_settings,
+)
+
+# Suppress:
+# - chardet.charsetprober - [feed]
+# - chardet.charsetprober - [get_confidence]
+# - requests - [Starting new HTTPS connection (1)]
+# - Flask (werkzeug) logs
+# - irc.client - [process_data]
+# - irc.client - [_dispatcher]
+# - irc.client - [_handle_message]
+logging.getLogger("chardet.charsetprober").setLevel(logging.ERROR)
+logging.getLogger("requests").setLevel(logging.ERROR)
+logging.getLogger("werkzeug").setLevel(logging.ERROR)
+logging.getLogger("irc.client").setLevel(logging.ERROR)
+logging.getLogger("seleniumwire").setLevel(logging.ERROR)
+logging.getLogger("websocket").setLevel(logging.ERROR)
+
+logger = logging.getLogger(__name__)
+
+
+class TwitchChannelPointsMiner:
+ __slots__ = [
+ "username",
+ "twitch",
+ "claim_drops_startup",
+ "enable_analytics",
+ "disable_ssl_cert_verification",
+ "disable_at_in_nickname",
+ "priority",
+ "streamers",
+ "events_predictions",
+ "minute_watcher_thread",
+ "sync_campaigns_thread",
+ "ws_pool",
+ "session_id",
+ "running",
+ "start_datetime",
+ "original_streamers",
+ "logs_file",
+ "queue_listener",
+ ]
+
+ def __init__(
+ self,
+ username: str,
+ password: str = None,
+ claim_drops_startup: bool = False,
+ enable_analytics: bool = False,
+ disable_ssl_cert_verification: bool = False,
+ disable_at_in_nickname: bool = False,
+ # Settings for logging and selenium as you can see.
+ priority: list = [Priority.STREAK, Priority.DROPS, Priority.ORDER],
+ # This settings will be global shared trought Settings class
+ logger_settings: LoggerSettings = LoggerSettings(),
+ # Default values for all streamers
+ streamer_settings: StreamerSettings = StreamerSettings(),
+ ):
+ # Fixes TypeError: 'NoneType' object is not subscriptable
+ if not username or username == "your-twitch-username":
+ logger.error(
+ "Please edit your runner file (usually run.py) and try again.")
+ logger.error("No username, exiting...")
+ sys.exit(0)
+
+ # This disables certificate verification and allows the connection to proceed, but also makes it vulnerable to man-in-the-middle (MITM) attacks.
+ Settings.disable_ssl_cert_verification = disable_ssl_cert_verification
+
+ Settings.disable_at_in_nickname = disable_at_in_nickname
+
+ import socket
+
+ def is_connected():
+ try:
+ # resolve the IP address of the Twitch.tv domain name
+ socket.gethostbyname("twitch.tv")
+ return True
+ except OSError:
+ pass
+ return False
+
+ # check for Twitch.tv connectivity every 5 seconds
+ error_printed = False
+ while not is_connected():
+ if not error_printed:
+ logger.error("Waiting for Twitch.tv connectivity...")
+ error_printed = True
+ time.sleep(5)
+
+ # Analytics switch
+ Settings.enable_analytics = enable_analytics
+
+ if enable_analytics is True:
+ Settings.analytics_path = os.path.join(
+ Path().absolute(), "analytics", username
+ )
+ Path(Settings.analytics_path).mkdir(parents=True, exist_ok=True)
+
+ self.username = username
+
+ # Set as global config
+ Settings.logger = logger_settings
+
+ # Init as default all the missing values
+ streamer_settings.default()
+ streamer_settings.bet.default()
+ Settings.streamer_settings = streamer_settings
+
+ # user_agent = get_user_agent("FIREFOX")
+ user_agent = get_user_agent("CHROME")
+ self.twitch = Twitch(self.username, user_agent, password)
+
+ self.claim_drops_startup = claim_drops_startup
+ self.priority = priority if isinstance(priority, list) else [priority]
+
+ self.streamers = []
+ self.events_predictions = {}
+ self.minute_watcher_thread = None
+ self.sync_campaigns_thread = None
+ self.ws_pool = None
+
+ self.session_id = str(uuid.uuid4())
+ self.running = False
+ self.start_datetime = None
+ self.original_streamers = []
+
+ self.logs_file, self.queue_listener = configure_loggers(
+ self.username, logger_settings
+ )
+
+ # Check for the latest version of the script
+ current_version, github_version = check_versions()
+
+ logger.info(
+ f"Twitch Channel Points Miner v2-{current_version} (fork by rdavydov)"
+ )
+ logger.info(
+ "https://github.com/rdavydov/Twitch-Channel-Points-Miner-v2")
+
+ if github_version == "0.0.0":
+ logger.error(
+ "Unable to detect if you have the latest version of this script"
+ )
+ elif current_version != github_version:
+ logger.info(
+ f"You are running version {current_version} of this script")
+ logger.info(f"The latest version on GitHub is {github_version}")
+
+ for sign in [signal.SIGINT, signal.SIGSEGV, signal.SIGTERM]:
+ signal.signal(sign, self.end)
+
+ def analytics(
+ self,
+ host: str = "127.0.0.1",
+ port: int = 5000,
+ refresh: int = 5,
+ days_ago: int = 7,
+ ):
+ # Analytics switch
+ if Settings.enable_analytics is True:
+ from TwitchChannelPointsMiner.classes.AnalyticsServer import AnalyticsServer
+
+ http_server = AnalyticsServer(
+ host=host, port=port, refresh=refresh, days_ago=days_ago, username=self.username
+ )
+ http_server.daemon = True
+ http_server.name = "Analytics Thread"
+ http_server.start()
+ else:
+ logger.error(
+ "Can't start analytics(), please set enable_analytics=True")
+
+ def mine(
+ self,
+ streamers: list = [],
+ blacklist: list = [],
+ followers: bool = False,
+ followers_order: FollowersOrder = FollowersOrder.ASC,
+ ):
+ self.run(streamers=streamers, blacklist=blacklist, followers=followers)
+
+ def run(
+ self,
+ streamers: list = [],
+ blacklist: list = [],
+ followers: bool = False,
+ followers_order: FollowersOrder = FollowersOrder.ASC,
+ ):
+ if self.running:
+ logger.error("You can't start multiple sessions of this instance!")
+ else:
+ logger.info(
+ f"Start session: '{self.session_id}'", extra={"emoji": ":bomb:"}
+ )
+ self.running = True
+ self.start_datetime = datetime.now()
+
+ self.twitch.login()
+
+ if self.claim_drops_startup is True:
+ self.twitch.claim_all_drops_from_inventory()
+
+ streamers_name: list = []
+ streamers_dict: dict = {}
+
+ for streamer in streamers:
+ username = (
+ streamer.username
+ if isinstance(streamer, Streamer)
+ else streamer.lower().strip()
+ )
+ if username not in blacklist:
+ streamers_name.append(username)
+ streamers_dict[username] = streamer
+
+ if followers is True:
+ followers_array = self.twitch.get_followers(
+ order=followers_order)
+ logger.info(
+ f"Load {len(followers_array)} followers from your profile!",
+ extra={"emoji": ":clipboard:"},
+ )
+ for username in followers_array:
+ if username not in streamers_dict and username not in blacklist:
+ streamers_name.append(username)
+ streamers_dict[username] = username.lower().strip()
+
+ logger.info(
+ f"Loading data for {len(streamers_name)} streamers. Please wait...",
+ extra={"emoji": ":nerd_face:"},
+ )
+ for username in streamers_name:
+ if username in streamers_name:
+ time.sleep(random.uniform(0.3, 0.7))
+ try:
+ streamer = (
+ streamers_dict[username]
+ if isinstance(streamers_dict[username], Streamer) is True
+ else Streamer(username)
+ )
+ streamer.channel_id = self.twitch.get_channel_id(
+ username)
+ streamer.settings = set_default_settings(
+ streamer.settings, Settings.streamer_settings
+ )
+ streamer.settings.bet = set_default_settings(
+ streamer.settings.bet, Settings.streamer_settings.bet
+ )
+ if streamer.settings.chat != ChatPresence.NEVER:
+ streamer.irc_chat = ThreadChat(
+ self.username,
+ self.twitch.twitch_login.get_auth_token(),
+ streamer.username,
+ )
+ self.streamers.append(streamer)
+ except StreamerDoesNotExistException:
+ logger.info(
+ f"Streamer {username} does not exist",
+ extra={"emoji": ":cry:"},
+ )
+
+ # Populate the streamers with default values.
+ # 1. Load channel points and auto-claim bonus
+ # 2. Check if streamers are online
+ # 3. DEACTIVATED: Check if the user is a moderator. (was used before the 5th of April 2021 to deactivate predictions)
+ for streamer in self.streamers:
+ time.sleep(random.uniform(0.3, 0.7))
+ self.twitch.load_channel_points_context(streamer)
+ self.twitch.check_streamer_online(streamer)
+ # self.twitch.viewer_is_mod(streamer)
+
+ self.original_streamers = [
+ streamer.channel_points for streamer in self.streamers
+ ]
+
+ # If we have at least one streamer with settings = make_predictions True
+ make_predictions = at_least_one_value_in_settings_is(
+ self.streamers, "make_predictions", True
+ )
+
+ # If we have at least one streamer with settings = claim_drops True
+ # Spawn a thread for sync inventory and dashboard
+ if (
+ at_least_one_value_in_settings_is(
+ self.streamers, "claim_drops", True)
+ is True
+ ):
+ self.sync_campaigns_thread = threading.Thread(
+ target=self.twitch.sync_campaigns,
+ args=(self.streamers,),
+ )
+ self.sync_campaigns_thread.name = "Sync campaigns/inventory"
+ self.sync_campaigns_thread.start()
+ time.sleep(30)
+
+ self.minute_watcher_thread = threading.Thread(
+ target=self.twitch.send_minute_watched_events,
+ args=(self.streamers, self.priority),
+ )
+ self.minute_watcher_thread.name = "Minute watcher"
+ self.minute_watcher_thread.start()
+
+ self.ws_pool = WebSocketsPool(
+ twitch=self.twitch,
+ streamers=self.streamers,
+ events_predictions=self.events_predictions,
+ )
+
+ # Subscribe to community-points-user. Get update for points spent or gains
+ user_id = self.twitch.twitch_login.get_user_id()
+ # print(f"!!!!!!!!!!!!!! USER_ID: {user_id}")
+
+ # Fixes 'ERR_BADAUTH'
+ if not user_id:
+ logger.error("No user_id, exiting...")
+ self.end(0, 0)
+
+ self.ws_pool.submit(
+ PubsubTopic(
+ "community-points-user-v1",
+ user_id=user_id,
+ )
+ )
+
+ # Going to subscribe to predictions-user-v1. Get update when we place a new prediction (confirm)
+ if make_predictions is True:
+ self.ws_pool.submit(
+ PubsubTopic(
+ "predictions-user-v1",
+ user_id=user_id,
+ )
+ )
+
+ for streamer in self.streamers:
+ self.ws_pool.submit(
+ PubsubTopic("video-playback-by-id", streamer=streamer)
+ )
+
+ if streamer.settings.follow_raid is True:
+ self.ws_pool.submit(PubsubTopic("raid", streamer=streamer))
+
+ if streamer.settings.make_predictions is True:
+ self.ws_pool.submit(
+ PubsubTopic("predictions-channel-v1",
+ streamer=streamer)
+ )
+
+ if streamer.settings.claim_moments is True:
+ self.ws_pool.submit(
+ PubsubTopic("community-moments-channel-v1",
+ streamer=streamer)
+ )
+
+ refresh_context = time.time()
+ while self.running:
+ time.sleep(random.uniform(20, 60))
+ # Do an external control for WebSocket. Check if the thread is running
+ # Check if is not None because maybe we have already created a new connection on array+1 and now index is None
+ for index in range(0, len(self.ws_pool.ws)):
+ if (
+ self.ws_pool.ws[index].is_reconnecting is False
+ and self.ws_pool.ws[index].elapsed_last_ping() > 10
+ and internet_connection_available() is True
+ ):
+ logger.info(
+ f"#{index} - The last PING was sent more than 10 minutes ago. Reconnecting to the WebSocket..."
+ )
+ WebSocketsPool.handle_reconnection(
+ self.ws_pool.ws[index])
+
+ if ((time.time() - refresh_context) // 60) >= 30:
+ refresh_context = time.time()
+ for index in range(0, len(self.streamers)):
+ if self.streamers[index].is_online:
+ self.twitch.load_channel_points_context(
+ self.streamers[index]
+ )
+
+ def end(self, signum, frame):
+ logger.info("CTRL+C Detected! Please wait just a moment!")
+
+ for streamer in self.streamers:
+ if (
+ streamer.irc_chat is not None
+ and streamer.settings.chat != ChatPresence.NEVER
+ ):
+ streamer.leave_chat()
+ if streamer.irc_chat.is_alive() is True:
+ streamer.irc_chat.join()
+
+ self.running = self.twitch.running = False
+ if self.ws_pool is not None:
+ self.ws_pool.end()
+
+ if self.minute_watcher_thread is not None:
+ self.minute_watcher_thread.join()
+
+ if self.sync_campaigns_thread is not None:
+ self.sync_campaigns_thread.join()
+
+ # Check if all the mutex are unlocked.
+ # Prevent breaks of .json file
+ for streamer in self.streamers:
+ if streamer.mutex.locked():
+ streamer.mutex.acquire()
+ streamer.mutex.release()
+
+ self.__print_report()
+
+ # Stop the queue listener to make sure all messages have been logged
+ self.queue_listener.stop()
+
+ sys.exit(0)
+
+ def __print_report(self):
+ print("\n")
+ logger.info(
+ f"Ending session: '{self.session_id}'", extra={"emoji": ":stop_sign:"}
+ )
+ if self.logs_file is not None:
+ logger.info(
+ f"Logs file: {self.logs_file}", extra={"emoji": ":page_facing_up:"}
+ )
+ logger.info(
+ f"Duration {datetime.now() - self.start_datetime}",
+ extra={"emoji": ":hourglass:"},
+ )
+
+ if self.events_predictions != {}:
+ print("")
+ for event_id in self.events_predictions:
+ event = self.events_predictions[event_id]
+ if (
+ event.bet_confirmed is True
+ and event.streamer.settings.make_predictions is True
+ ):
+ logger.info(
+ f"{event.streamer.settings.bet}",
+ extra={"emoji": ":wrench:"},
+ )
+ if event.streamer.settings.bet.filter_condition is not None:
+ logger.info(
+ f"{event.streamer.settings.bet.filter_condition}",
+ extra={"emoji": ":pushpin:"},
+ )
+ logger.info(
+ f"{event.print_recap()}",
+ extra={"emoji": ":bar_chart:"},
+ )
+
+ print("")
+ for streamer_index in range(0, len(self.streamers)):
+ if self.streamers[streamer_index].history != {}:
+ gained = (
+ self.streamers[streamer_index].channel_points
+ - self.original_streamers[streamer_index]
+ )
+ logger.info(
+ f"{repr(self.streamers[streamer_index])}, Total Points Gained (after farming - before farming): {_millify(gained)}",
+ extra={"emoji": ":robot:"},
+ )
+ if self.streamers[streamer_index].history != {}:
+ logger.info(
+ f"{self.streamers[streamer_index].print_history()}",
+ extra={"emoji": ":moneybag:"},
+ )
diff --git a/TwitchChannelPointsMiner/__init__.py b/TwitchChannelPointsMiner/__init__.py
new file mode 100644
index 0000000..fbd8841
--- /dev/null
+++ b/TwitchChannelPointsMiner/__init__.py
@@ -0,0 +1,7 @@
+# -*- coding: utf-8 -*-
+__version__ = "1.9.3"
+from .TwitchChannelPointsMiner import TwitchChannelPointsMiner
+
+__all__ = [
+ "TwitchChannelPointsMiner",
+]
diff --git a/TwitchChannelPointsMiner/classes/AnalyticsServer.py b/TwitchChannelPointsMiner/classes/AnalyticsServer.py
new file mode 100644
index 0000000..0aabbe1
--- /dev/null
+++ b/TwitchChannelPointsMiner/classes/AnalyticsServer.py
@@ -0,0 +1,295 @@
+import json
+import logging
+import os
+from datetime import datetime
+from pathlib import Path
+from threading import Thread
+
+import pandas as pd
+from flask import Flask, Response, cli, render_template, request
+
+from TwitchChannelPointsMiner.classes.Settings import Settings
+from TwitchChannelPointsMiner.utils import download_file
+
+cli.show_server_banner = lambda *_: None
+logger = logging.getLogger(__name__)
+
+
+def streamers_available():
+ path = Settings.analytics_path
+ return [
+ f
+ for f in os.listdir(path)
+ if os.path.isfile(os.path.join(path, f)) and f.endswith(".json")
+ ]
+
+
+def aggregate(df, freq="30Min"):
+ df_base_events = df[(df.z == "Watch") | (df.z == "Claim")]
+ df_other_events = df[(df.z != "Watch") & (df.z != "Claim")]
+
+ be = df_base_events.groupby(
+ [pd.Grouper(freq=freq, key="datetime"), "z"]).max()
+ be = be.reset_index()
+
+ oe = df_other_events.groupby(
+ [pd.Grouper(freq=freq, key="datetime"), "z"]).max()
+ oe = oe.reset_index()
+
+ result = pd.concat([be, oe])
+ return result
+
+
+def filter_datas(start_date, end_date, datas):
+ # Note: https://stackoverflow.com/questions/4676195/why-do-i-need-to-multiply-unix-timestamps-by-1000-in-javascript
+ start_date = (
+ datetime.strptime(start_date, "%Y-%m-%d").timestamp() * 1000
+ if start_date is not None
+ else 0
+ )
+ end_date = (
+ datetime.strptime(end_date, "%Y-%m-%d")
+ if end_date is not None
+ else datetime.now()
+ ).replace(hour=23, minute=59, second=59).timestamp() * 1000
+
+ original_series = datas["series"]
+
+ if "series" in datas:
+ df = pd.DataFrame(datas["series"])
+ df["datetime"] = pd.to_datetime(df.x // 1000, unit="s")
+
+ df = df[(df.x >= start_date) & (df.x <= end_date)]
+
+ datas["series"] = (
+ df.drop(columns="datetime")
+ .sort_values(by=["x", "y"], ascending=True)
+ .to_dict("records")
+ )
+ else:
+ datas["series"] = []
+
+ # If no data is found within the timeframe, that usually means the streamer hasn't streamed within that timeframe
+ # We create a series that shows up as a straight line on the dashboard, with 'No Stream' as labels
+ if len(datas["series"]) == 0:
+ new_end_date = start_date
+ new_start_date = 0
+ df = pd.DataFrame(original_series)
+ df["datetime"] = pd.to_datetime(df.x // 1000, unit="s")
+
+ # Attempt to get the last known balance from before the provided timeframe
+ df = df[(df.x >= new_start_date) & (df.x <= new_end_date)]
+ last_balance = df.drop(columns="datetime").sort_values(
+ by=["x", "y"], ascending=True).to_dict("records")[-1]['y']
+
+ datas["series"] = [{'x': start_date, 'y': last_balance, 'z': 'No Stream'}, {
+ 'x': end_date, 'y': last_balance, 'z': 'No Stream'}]
+
+ if "annotations" in datas:
+ df = pd.DataFrame(datas["annotations"])
+ df["datetime"] = pd.to_datetime(df.x // 1000, unit="s")
+
+ df = df[(df.x >= start_date) & (df.x <= end_date)]
+
+ datas["annotations"] = (
+ df.drop(columns="datetime")
+ .sort_values(by="x", ascending=True)
+ .to_dict("records")
+ )
+ else:
+ datas["annotations"] = []
+
+ return datas
+
+
+def read_json(streamer, return_response=True):
+ start_date = request.args.get("startDate", type=str)
+ end_date = request.args.get("endDate", type=str)
+
+ path = Settings.analytics_path
+ streamer = streamer if streamer.endswith(".json") else f"{streamer}.json"
+
+ # Check if the file exists before attempting to read it
+ if not os.path.exists(os.path.join(path, streamer)):
+ error_message = f"File '{streamer}' not found."
+ logger.error(error_message)
+ if return_response:
+ return Response(json.dumps({"error": error_message}), status=404, mimetype="application/json")
+ else:
+ return {"error": error_message}
+
+ try:
+ with open(os.path.join(path, streamer), 'r') as file:
+ data = json.load(file)
+ except json.JSONDecodeError as e:
+ error_message = f"Error decoding JSON in file '{streamer}': {str(e)}"
+ logger.error(error_message)
+ if return_response:
+ return Response(json.dumps({"error": error_message}), status=500, mimetype="application/json")
+ else:
+ return {"error": error_message}
+
+ # Handle filtering data, if applicable
+ filtered_data = filter_datas(start_date, end_date, data)
+ if return_response:
+ return Response(json.dumps(filtered_data), status=200, mimetype="application/json")
+ else:
+ return filtered_data
+
+
+def get_challenge_points(streamer):
+ datas = read_json(streamer, return_response=False)
+ if "series" in datas and datas["series"]:
+ return datas["series"][-1]["y"]
+ return 0 # Default value when 'series' key is not found or empty
+
+
+def get_last_activity(streamer):
+ datas = read_json(streamer, return_response=False)
+ if "series" in datas and datas["series"]:
+ return datas["series"][-1]["x"]
+ return 0 # Default value when 'series' key is not found or empty
+
+
+def json_all():
+ return Response(
+ json.dumps(
+ [
+ {
+ "name": streamer.strip(".json"),
+ "data": read_json(streamer, return_response=False),
+ }
+ for streamer in streamers_available()
+ ]
+ ),
+ status=200,
+ mimetype="application/json",
+ )
+
+
+def index(refresh=5, days_ago=7):
+ return render_template(
+ "charts.html",
+ refresh=(refresh * 60 * 1000),
+ daysAgo=days_ago,
+ )
+
+
+def streamers():
+ return Response(
+ json.dumps(
+ [
+ {"name": s, "points": get_challenge_points(
+ s), "last_activity": get_last_activity(s)}
+ for s in sorted(streamers_available())
+ ]
+ ),
+ status=200,
+ mimetype="application/json",
+ )
+
+
+def download_assets(assets_folder, required_files):
+ Path(assets_folder).mkdir(parents=True, exist_ok=True)
+ logger.info(f"Downloading assets to {assets_folder}")
+
+ for f in required_files:
+ if os.path.isfile(os.path.join(assets_folder, f)) is False:
+ if (
+ download_file(os.path.join("assets", f),
+ os.path.join(assets_folder, f))
+ is True
+ ):
+ logger.info(f"Downloaded {f}")
+
+
+def check_assets():
+ required_files = [
+ "banner.png",
+ "charts.html",
+ "script.js",
+ "style.css",
+ "dark-theme.css",
+ ]
+ assets_folder = os.path.join(Path().absolute(), "assets")
+ if os.path.isdir(assets_folder) is False:
+ logger.info(f"Assets folder not found at {assets_folder}")
+ download_assets(assets_folder, required_files)
+ else:
+ for f in required_files:
+ if os.path.isfile(os.path.join(assets_folder, f)) is False:
+ logger.info(f"Missing file {f} in {assets_folder}")
+ download_assets(assets_folder, required_files)
+ break
+
+last_sent_log_index = 0
+
+class AnalyticsServer(Thread):
+ def __init__(
+ self,
+ host: str = "127.0.0.1",
+ port: int = 5000,
+ refresh: int = 5,
+ days_ago: int = 7,
+ username: str = None
+ ):
+ super(AnalyticsServer, self).__init__()
+
+ check_assets()
+
+ self.host = host
+ self.port = port
+ self.refresh = refresh
+ self.days_ago = days_ago
+ self.username = username
+
+ def generate_log():
+ global last_sent_log_index # Use the global variable
+
+ # Get the last received log index from the client request parameters
+ last_received_index = int(request.args.get("lastIndex", last_sent_log_index))
+
+ logs_path = os.path.join(Path().absolute(), "logs")
+ log_file_path = os.path.join(logs_path, f"{username}.log")
+ try:
+ with open(log_file_path, "r") as log_file:
+ log_content = log_file.read()
+
+ # Extract new log entries since the last received index
+ new_log_entries = log_content[last_received_index:]
+ last_sent_log_index = len(log_content) # Update the last sent index
+
+ return Response(new_log_entries, status=200, mimetype="text/plain")
+
+ except FileNotFoundError:
+ return Response("Log file not found.", status=404, mimetype="text/plain")
+
+ self.app = Flask(
+ __name__,
+ template_folder=os.path.join(Path().absolute(), "assets"),
+ static_folder=os.path.join(Path().absolute(), "assets"),
+ )
+ self.app.add_url_rule(
+ "/",
+ "index",
+ index,
+ defaults={"refresh": refresh, "days_ago": days_ago},
+ methods=["GET"],
+ )
+ self.app.add_url_rule("/streamers", "streamers",
+ streamers, methods=["GET"])
+ self.app.add_url_rule(
+ "/json/", "json", read_json, methods=["GET"]
+ )
+ self.app.add_url_rule("/json_all", "json_all",
+ json_all, methods=["GET"])
+ self.app.add_url_rule(
+ "/log", "log", generate_log, methods=["GET"])
+
+ def run(self):
+ logger.info(
+ f"Analytics running on http://{self.host}:{self.port}/",
+ extra={"emoji": ":globe_with_meridians:"},
+ )
+ self.app.run(host=self.host, port=self.port,
+ threaded=True, debug=False)
diff --git a/TwitchChannelPointsMiner/classes/Chat.py b/TwitchChannelPointsMiner/classes/Chat.py
new file mode 100644
index 0000000..ef58d8e
--- /dev/null
+++ b/TwitchChannelPointsMiner/classes/Chat.py
@@ -0,0 +1,105 @@
+import logging
+import time
+from enum import Enum, auto
+from threading import Thread
+
+from irc.bot import SingleServerIRCBot
+
+from TwitchChannelPointsMiner.constants import IRC, IRC_PORT
+from TwitchChannelPointsMiner.classes.Settings import Events, Settings
+
+logger = logging.getLogger(__name__)
+
+
+class ChatPresence(Enum):
+ ALWAYS = auto()
+ NEVER = auto()
+ ONLINE = auto()
+ OFFLINE = auto()
+
+ def __str__(self):
+ return self.name
+
+
+class ClientIRC(SingleServerIRCBot):
+ def __init__(self, username, token, channel):
+ self.token = token
+ self.channel = "#" + channel
+ self.__active = False
+
+ super(ClientIRC, self).__init__(
+ [(IRC, IRC_PORT, f"oauth:{token}")], username, username
+ )
+
+ def on_welcome(self, client, event):
+ client.join(self.channel)
+
+ def start(self):
+ self.__active = True
+ self._connect()
+ while self.__active:
+ try:
+ self.reactor.process_once(timeout=0.2)
+ time.sleep(0.01)
+ except Exception as e:
+ logger.error(
+ f"Exception raised: {e}. Thread is active: {self.__active}"
+ )
+
+ def die(self, msg="Bye, cruel world!"):
+ self.connection.disconnect(msg)
+ self.__active = False
+
+ """
+ def on_join(self, connection, event):
+ logger.info(f"Event: {event}", extra={"emoji": ":speech_balloon:"})
+ """
+
+ # """
+ def on_pubmsg(self, connection, event):
+ msg = event.arguments[0]
+ mention = None
+
+ if Settings.disable_at_in_nickname is True:
+ mention = f"{self._nickname.lower()}"
+ else:
+ mention = f"@{self._nickname.lower()}"
+
+ # also self._realname
+ # if msg.startswith(f"@{self._nickname}"):
+ if mention != None and mention in msg.lower():
+ # nickname!username@nickname.tmi.twitch.tv
+ nick = event.source.split("!", 1)[0]
+ # chan = event.target
+
+ logger.info(f"{nick} at {self.channel} wrote: {msg}", extra={
+ "emoji": ":speech_balloon:", "event": Events.CHAT_MENTION})
+ # """
+
+
+class ThreadChat(Thread):
+ def __deepcopy__(self, memo):
+ return None
+
+ def __init__(self, username, token, channel):
+ super(ThreadChat, self).__init__()
+
+ self.username = username
+ self.token = token
+ self.channel = channel
+
+ self.chat_irc = None
+
+ def run(self):
+ self.chat_irc = ClientIRC(self.username, self.token, self.channel)
+ logger.info(
+ f"Join IRC Chat: {self.channel}", extra={"emoji": ":speech_balloon:"}
+ )
+ self.chat_irc.start()
+
+ def stop(self):
+ if self.chat_irc is not None:
+ logger.info(
+ f"Leave IRC Chat: {self.channel}", extra={"emoji": ":speech_balloon:"}
+ )
+ self.chat_irc.die()
diff --git a/TwitchChannelPointsMiner/classes/Discord.py b/TwitchChannelPointsMiner/classes/Discord.py
new file mode 100644
index 0000000..00b9670
--- /dev/null
+++ b/TwitchChannelPointsMiner/classes/Discord.py
@@ -0,0 +1,24 @@
+from textwrap import dedent
+
+import requests
+
+from TwitchChannelPointsMiner.classes.Settings import Events
+
+
+class Discord(object):
+ __slots__ = ["webhook_api", "events"]
+
+ def __init__(self, webhook_api: str, events: list):
+ self.webhook_api = webhook_api
+ self.events = [str(e) for e in events]
+
+ def send(self, message: str, event: Events) -> None:
+ if str(event) in self.events:
+ requests.post(
+ url=self.webhook_api,
+ data={
+ "content": dedent(message),
+ "username": "Twitch Channel Points Miner",
+ "avatar_url": "https://i.imgur.com/X9fEkhT.png",
+ },
+ )
diff --git a/TwitchChannelPointsMiner/classes/Exceptions.py b/TwitchChannelPointsMiner/classes/Exceptions.py
new file mode 100644
index 0000000..9f88b60
--- /dev/null
+++ b/TwitchChannelPointsMiner/classes/Exceptions.py
@@ -0,0 +1,14 @@
+class StreamerDoesNotExistException(Exception):
+ pass
+
+
+class StreamerIsOfflineException(Exception):
+ pass
+
+
+class WrongCookiesException(Exception):
+ pass
+
+
+class BadCredentialsException(Exception):
+ pass
diff --git a/TwitchChannelPointsMiner/classes/Matrix.py b/TwitchChannelPointsMiner/classes/Matrix.py
new file mode 100644
index 0000000..6cfa9ae
--- /dev/null
+++ b/TwitchChannelPointsMiner/classes/Matrix.py
@@ -0,0 +1,40 @@
+from textwrap import dedent
+
+import logging
+import requests
+from urllib.parse import quote
+
+from TwitchChannelPointsMiner.classes.Settings import Events
+
+
+class Matrix(object):
+ __slots__ = ["access_token", "homeserver", "room_id", "events"]
+
+ def __init__(self, username: str, password: str, homeserver: str, room_id: str, events: list):
+ self.homeserver = homeserver
+ self.room_id = quote(room_id)
+ self.events = [str(e) for e in events]
+
+ body = requests.post(
+ url=f"https://{self.homeserver}/_matrix/client/r0/login",
+ json={
+ "user": username,
+ "password": password,
+ "type": "m.login.password"
+ }
+ ).json()
+
+ self.access_token = body.get("access_token")
+
+ if not self.access_token:
+ logging.getLogger(__name__).info("Invalid Matrix password provided. Notifications will not be sent.")
+
+ def send(self, message: str, event: Events) -> None:
+ if str(event) in self.events:
+ requests.post(
+ url=f"https://{self.homeserver}/_matrix/client/r0/rooms/{self.room_id}/send/m.room.message?access_token={self.access_token}",
+ json={
+ "body": dedent(message),
+ "msgtype": "m.text"
+ }
+ )
diff --git a/TwitchChannelPointsMiner/classes/Pushover.py b/TwitchChannelPointsMiner/classes/Pushover.py
new file mode 100644
index 0000000..3d5a3c8
--- /dev/null
+++ b/TwitchChannelPointsMiner/classes/Pushover.py
@@ -0,0 +1,30 @@
+from textwrap import dedent
+
+import requests
+
+from TwitchChannelPointsMiner.classes.Settings import Events
+
+
+class Pushover(object):
+ __slots__ = ["userkey", "token", "priority", "sound", "events"]
+
+ def __init__(self, userkey: str, token: str, priority, sound, events: list):
+ self.userkey = userkey
+ self.token = token
+ self. priority = priority
+ self.sound = sound
+ self.events = [str(e) for e in events]
+
+ def send(self, message: str, event: Events) -> None:
+ if str(event) in self.events:
+ requests.post(
+ url="https://api.pushover.net/1/messages.json",
+ data={
+ "user": self.userkey,
+ "token": self.token,
+ "message": dedent(message),
+ "title": "Twitch Channel Points Miner",
+ "priority": self.priority,
+ "sound": self.sound,
+ },
+ )
diff --git a/TwitchChannelPointsMiner/classes/Settings.py b/TwitchChannelPointsMiner/classes/Settings.py
new file mode 100644
index 0000000..3db6b62
--- /dev/null
+++ b/TwitchChannelPointsMiner/classes/Settings.py
@@ -0,0 +1,53 @@
+from enum import Enum, auto
+
+
+class Priority(Enum):
+ ORDER = auto()
+ STREAK = auto()
+ DROPS = auto()
+ SUBSCRIBED = auto()
+ POINTS_ASCENDING = auto()
+ POINTS_DESCEDING = auto()
+
+
+class FollowersOrder(Enum):
+ ASC = auto()
+ DESC = auto()
+
+ def __str__(self):
+ return self.name
+
+
+# Empty object shared between class
+class Settings(object):
+ __slots__ = ["logger", "streamer_settings",
+ "enable_analytics", "disable_ssl_cert_verification", "disable_at_in_nickname"]
+
+
+class Events(Enum):
+ STREAMER_ONLINE = auto()
+ STREAMER_OFFLINE = auto()
+ GAIN_FOR_RAID = auto()
+ GAIN_FOR_CLAIM = auto()
+ GAIN_FOR_WATCH = auto()
+ GAIN_FOR_WATCH_STREAK = auto()
+ BET_WIN = auto()
+ BET_LOSE = auto()
+ BET_REFUND = auto()
+ BET_FILTERS = auto()
+ BET_GENERAL = auto()
+ BET_FAILED = auto()
+ BET_START = auto()
+ BONUS_CLAIM = auto()
+ MOMENT_CLAIM = auto()
+ JOIN_RAID = auto()
+ DROP_CLAIM = auto()
+ DROP_STATUS = auto()
+ CHAT_MENTION = auto()
+
+ def __str__(self):
+ return self.name
+
+ @classmethod
+ def get(cls, key):
+ return getattr(cls, str(key)) if str(key) in dir(cls) else None
diff --git a/TwitchChannelPointsMiner/classes/Telegram.py b/TwitchChannelPointsMiner/classes/Telegram.py
new file mode 100644
index 0000000..c6d9055
--- /dev/null
+++ b/TwitchChannelPointsMiner/classes/Telegram.py
@@ -0,0 +1,29 @@
+from textwrap import dedent
+
+import requests
+
+from TwitchChannelPointsMiner.classes.Settings import Events
+
+
+class Telegram(object):
+ __slots__ = ["chat_id", "telegram_api", "events", "disable_notification"]
+
+ def __init__(
+ self, chat_id: int, token: str, events: list, disable_notification: bool = False
+ ):
+ self.chat_id = chat_id
+ self.telegram_api = f"https://api.telegram.org/bot{token}/sendMessage"
+ self.events = [str(e) for e in events]
+ self.disable_notification = disable_notification
+
+ def send(self, message: str, event: Events) -> None:
+ if str(event) in self.events:
+ requests.post(
+ url=self.telegram_api,
+ data={
+ "chat_id": self.chat_id,
+ "text": dedent(message),
+ "disable_web_page_preview": True, # include link to twitch streamer?
+ "disable_notification": self.disable_notification, # no sound, notif just in tray
+ },
+ )
diff --git a/TwitchChannelPointsMiner/classes/Twitch.py b/TwitchChannelPointsMiner/classes/Twitch.py
new file mode 100644
index 0000000..7a9dd47
--- /dev/null
+++ b/TwitchChannelPointsMiner/classes/Twitch.py
@@ -0,0 +1,859 @@
+# For documentation on Twitch GraphQL API see:
+# https://www.apollographql.com/docs/
+# https://github.com/mauricew/twitch-graphql-api
+# Full list of available methods: https://azr.ivr.fi/schema/query.doc.html (a bit outdated)
+
+
+import copy
+import logging
+import os
+import random
+import re
+import string
+import time
+# from datetime import datetime
+from pathlib import Path
+from secrets import choice, token_hex
+
+# import json
+# from base64 import urlsafe_b64decode
+
+import requests
+
+from TwitchChannelPointsMiner.classes.entities.Campaign import Campaign
+from TwitchChannelPointsMiner.classes.entities.Drop import Drop
+from TwitchChannelPointsMiner.classes.Exceptions import (
+ StreamerDoesNotExistException,
+ StreamerIsOfflineException,
+)
+from TwitchChannelPointsMiner.classes.Settings import (
+ Events,
+ FollowersOrder,
+ Priority,
+ Settings,
+)
+from TwitchChannelPointsMiner.classes.TwitchLogin import TwitchLogin
+from TwitchChannelPointsMiner.constants import (
+ CLIENT_ID,
+ CLIENT_VERSION,
+ URL,
+ GQLOperations,
+)
+from TwitchChannelPointsMiner.utils import (
+ _millify,
+ create_chunks,
+ internet_connection_available,
+)
+
+logger = logging.getLogger(__name__)
+
+
+class Twitch(object):
+ __slots__ = [
+ "cookies_file",
+ "user_agent",
+ "twitch_login",
+ "running",
+ "device_id",
+ # "integrity",
+ # "integrity_expire",
+ "client_session",
+ "client_version",
+ "twilight_build_id_pattern",
+ ]
+
+ def __init__(self, username, user_agent, password=None):
+ cookies_path = os.path.join(Path().absolute(), "cookies")
+ Path(cookies_path).mkdir(parents=True, exist_ok=True)
+ self.cookies_file = os.path.join(cookies_path, f"{username}.pkl")
+ self.user_agent = user_agent
+ self.device_id = "".join(
+ choice(string.ascii_letters + string.digits) for _ in range(32)
+ )
+ self.twitch_login = TwitchLogin(
+ CLIENT_ID, self.device_id, username, self.user_agent, password=password
+ )
+ self.running = True
+ # self.integrity = None
+ # self.integrity_expire = 0
+ self.client_session = token_hex(16)
+ self.client_version = CLIENT_VERSION
+ self.twilight_build_id_pattern = re.compile(
+ r"window\.__twilightBuildID=\"([0-9A-Fa-f]{8}-[0-9A-Fa-f]{4}-4[0-9A-Fa-f]{3}-[89ABab][0-9A-Fa-f]{3}-[0-9A-Fa-f]{12})\";"
+ )
+
+ def login(self):
+ if not os.path.isfile(self.cookies_file):
+ if self.twitch_login.login_flow():
+ self.twitch_login.save_cookies(self.cookies_file)
+ else:
+ self.twitch_login.load_cookies(self.cookies_file)
+ self.twitch_login.set_token(self.twitch_login.get_auth_token())
+
+ # === STREAMER / STREAM / INFO === #
+ def update_stream(self, streamer):
+ if streamer.stream.update_required() is True:
+ stream_info = self.get_stream_info(streamer)
+ if stream_info is not None:
+ streamer.stream.update(
+ broadcast_id=stream_info["stream"]["id"],
+ title=stream_info["broadcastSettings"]["title"],
+ game=stream_info["broadcastSettings"]["game"],
+ tags=stream_info["stream"]["tags"],
+ viewers_count=stream_info["stream"]["viewersCount"],
+ )
+
+ event_properties = {
+ "channel_id": streamer.channel_id,
+ "broadcast_id": streamer.stream.broadcast_id,
+ "player": "site",
+ "user_id": self.twitch_login.get_user_id(),
+ "live": True,
+ "channel": streamer.username
+ }
+
+ if (
+ streamer.stream.game_name() is not None
+ and streamer.stream.game_id() is not None
+ and streamer.settings.claim_drops is True
+ ):
+ event_properties["game"] = streamer.stream.game_name()
+ event_properties["game_id"] = streamer.stream.game_id()
+ # Update also the campaigns_ids so we are sure to tracking the correct campaign
+ streamer.stream.campaigns_ids = (
+ self.__get_campaign_ids_from_streamer(streamer)
+ )
+
+ streamer.stream.payload = [
+ {"event": "minute-watched", "properties": event_properties}
+ ]
+
+ def get_spade_url(self, streamer):
+ try:
+ # fixes AttributeError: 'NoneType' object has no attribute 'group'
+ # headers = {"User-Agent": self.user_agent}
+ from TwitchChannelPointsMiner.constants import USER_AGENTS
+ headers = {"User-Agent": USER_AGENTS["Linux"]["FIREFOX"]}
+
+ main_page_request = requests.get(
+ streamer.streamer_url, headers=headers)
+ response = main_page_request.text
+ # logger.info(response)
+ regex_settings = "(https://static.twitchcdn.net/config/settings.*?js)"
+ settings_url = re.search(regex_settings, response).group(1)
+
+ settings_request = requests.get(settings_url, headers=headers)
+ response = settings_request.text
+ regex_spade = '"spade_url":"(.*?)"'
+ streamer.stream.spade_url = re.search(
+ regex_spade, response).group(1)
+ except requests.exceptions.RequestException as e:
+ logger.error(
+ f"Something went wrong during extraction of 'spade_url': {e}")
+
+ def get_broadcast_id(self, streamer):
+ json_data = copy.deepcopy(GQLOperations.WithIsStreamLiveQuery)
+ json_data["variables"] = {"id": streamer.channel_id}
+ response = self.post_gql_request(json_data)
+ if response != {}:
+ stream = response["data"]["user"]["stream"]
+ if stream is not None:
+ return stream["id"]
+ else:
+ raise StreamerIsOfflineException
+
+ def get_stream_info(self, streamer):
+ json_data = copy.deepcopy(
+ GQLOperations.VideoPlayerStreamInfoOverlayChannel)
+ json_data["variables"] = {"channel": streamer.username}
+ response = self.post_gql_request(json_data)
+ if response != {}:
+ if response["data"]["user"]["stream"] is None:
+ raise StreamerIsOfflineException
+ else:
+ return response["data"]["user"]
+
+ def check_streamer_online(self, streamer):
+ if time.time() < streamer.offline_at + 60:
+ return
+
+ if streamer.is_online is False:
+ try:
+ self.get_spade_url(streamer)
+ self.update_stream(streamer)
+ except StreamerIsOfflineException:
+ streamer.set_offline()
+ else:
+ streamer.set_online()
+ else:
+ try:
+ self.update_stream(streamer)
+ except StreamerIsOfflineException:
+ streamer.set_offline()
+
+ def get_channel_id(self, streamer_username):
+ json_data = copy.deepcopy(GQLOperations.ReportMenuItem)
+ json_data["variables"] = {"channelLogin": streamer_username}
+ json_response = self.post_gql_request(json_data)
+ if (
+ "data" not in json_response
+ or "user" not in json_response["data"]
+ or json_response["data"]["user"] is None
+ ):
+ raise StreamerDoesNotExistException
+ else:
+ return json_response["data"]["user"]["id"]
+
+ def get_followers(
+ self, limit: int = 100, order: FollowersOrder = FollowersOrder.ASC
+ ):
+ json_data = copy.deepcopy(GQLOperations.ChannelFollows)
+ json_data["variables"] = {"limit": limit, "order": str(order)}
+ has_next = True
+ last_cursor = ""
+ follows = []
+ while has_next is True:
+ json_data["variables"]["cursor"] = last_cursor
+ json_response = self.post_gql_request(json_data)
+ try:
+ follows_response = json_response["data"]["user"]["follows"]
+ last_cursor = None
+ for f in follows_response["edges"]:
+ follows.append(f["node"]["login"].lower())
+ last_cursor = f["cursor"]
+
+ has_next = follows_response["pageInfo"]["hasNextPage"]
+ except KeyError:
+ return []
+ return follows
+
+ def update_raid(self, streamer, raid):
+ if streamer.raid != raid:
+ streamer.raid = raid
+ json_data = copy.deepcopy(GQLOperations.JoinRaid)
+ json_data["variables"] = {"input": {"raidID": raid.raid_id}}
+ self.post_gql_request(json_data)
+
+ logger.info(
+ f"Joining raid from {streamer} to {raid.target_login}!",
+ extra={"emoji": ":performing_arts:",
+ "event": Events.JOIN_RAID},
+ )
+
+ def viewer_is_mod(self, streamer):
+ json_data = copy.deepcopy(GQLOperations.ModViewChannelQuery)
+ json_data["variables"] = {"channelLogin": streamer.username}
+ response = self.post_gql_request(json_data)
+ try:
+ streamer.viewer_is_mod = response["data"]["user"]["self"]["isModerator"]
+ except (ValueError, KeyError):
+ streamer.viewer_is_mod = False
+
+ # === 'GLOBALS' METHODS === #
+ # Create chunk of sleep of speed-up the break loop after CTRL+C
+ def __chuncked_sleep(self, seconds, chunk_size=3):
+ sleep_time = max(seconds, 0) / chunk_size
+ for i in range(0, chunk_size):
+ time.sleep(sleep_time)
+ if self.running is False:
+ break
+
+ def __check_connection_handler(self, chunk_size):
+ # The success rate It's very hight usually. Why we have failed?
+ # Check internet connection ...
+ while internet_connection_available() is False:
+ random_sleep = random.randint(1, 3)
+ logger.warning(
+ f"No internet connection available! Retry after {random_sleep}m"
+ )
+ self.__chuncked_sleep(random_sleep * 60, chunk_size=chunk_size)
+
+ def post_gql_request(self, json_data):
+ try:
+ response = requests.post(
+ GQLOperations.url,
+ json=json_data,
+ headers={
+ "Authorization": f"OAuth {self.twitch_login.get_auth_token()}",
+ "Client-Id": CLIENT_ID,
+ # "Client-Integrity": self.post_integrity(),
+ "Client-Session-Id": self.client_session,
+ "Client-Version": self.update_client_version(),
+ "User-Agent": self.user_agent,
+ "X-Device-Id": self.device_id,
+ },
+ )
+ logger.debug(
+ f"Data: {json_data}, Status code: {response.status_code}, Content: {response.text}"
+ )
+ return response.json()
+ except requests.exceptions.RequestException as e:
+ logger.error(
+ f"Error with GQLOperations ({json_data['operationName']}): {e}"
+ )
+ return {}
+
+ # Request for Integrity Token
+ # Twitch needs Authorization, Client-Id, X-Device-Id to generate JWT which is used for authorize gql requests
+ # Regenerate Integrity Token 5 minutes before expire
+ """def post_integrity(self):
+ if (
+ self.integrity_expire - datetime.now().timestamp() * 1000 > 5 * 60 * 1000
+ and self.integrity is not None
+ ):
+ return self.integrity
+ try:
+ response = requests.post(
+ GQLOperations.integrity_url,
+ json={},
+ headers={
+ "Authorization": f"OAuth {self.twitch_login.get_auth_token()}",
+ "Client-Id": CLIENT_ID,
+ "Client-Session-Id": self.client_session,
+ "Client-Version": self.update_client_version(),
+ "User-Agent": self.user_agent,
+ "X-Device-Id": self.device_id,
+ },
+ )
+ logger.debug(
+ f"Data: [], Status code: {response.status_code}, Content: {response.text}"
+ )
+ self.integrity = response.json().get("token", None)
+ # logger.info(f"integrity: {self.integrity}")
+
+ if self.isBadBot(self.integrity) is True:
+ logger.info(
+ "Uh-oh, Twitch has detected this miner as a \"Bad Bot\". Don't worry.")
+
+ self.integrity_expire = response.json().get("expiration", 0)
+ # logger.info(f"integrity_expire: {self.integrity_expire}")
+ return self.integrity
+ except requests.exceptions.RequestException as e:
+ logger.error(f"Error with post_integrity: {e}")
+ return self.integrity
+
+ # verify the integrity token's contents for the "is_bad_bot" flag
+ def isBadBot(self, integrity):
+ stripped_token: str = self.integrity.split('.')[2] + "=="
+ messy_json: str = urlsafe_b64decode(
+ stripped_token.encode()).decode(errors="ignore")
+ match = re.search(r'(.+)(?<="}).+$', messy_json)
+ if match is None:
+ # raise MinerException("Unable to parse the integrity token")
+ logger.info("Unable to parse the integrity token. Don't worry.")
+ return
+ decoded_header = json.loads(match.group(1))
+ # logger.info(f"decoded_header: {decoded_header}")
+ if decoded_header.get("is_bad_bot", "false") != "false":
+ return True
+ else:
+ return False"""
+
+ def update_client_version(self):
+ try:
+ response = requests.get(URL)
+ if response.status_code != 200:
+ logger.debug(
+ f"Error with update_client_version: {response.status_code}"
+ )
+ return self.client_version
+ matcher = re.search(self.twilight_build_id_pattern, response.text)
+ if not matcher:
+ logger.debug("Error with update_client_version: no match")
+ return self.client_version
+ self.client_version = matcher.group(1)
+ logger.debug(f"Client version: {self.client_version}")
+ return self.client_version
+ except requests.exceptions.RequestException as e:
+ logger.error(f"Error with update_client_version: {e}")
+ return self.client_version
+
+ def send_minute_watched_events(self, streamers, priority, chunk_size=3):
+ while self.running:
+ try:
+ streamers_index = [
+ i
+ for i in range(0, len(streamers))
+ if streamers[i].is_online is True
+ and (
+ streamers[i].online_at == 0
+ or (time.time() - streamers[i].online_at) > 30
+ )
+ ]
+
+ for index in streamers_index:
+ if (streamers[index].stream.update_elapsed() / 60) > 10:
+ # Why this user It's currently online but the last updated was more than 10minutes ago?
+ # Please perform a manually update and check if the user it's online
+ self.check_streamer_online(streamers[index])
+
+ streamers_watching = []
+ for prior in priority:
+ if prior == Priority.ORDER and len(streamers_watching) < 2:
+ # Get the first 2 items, they are already in order
+ streamers_watching += streamers_index[:2]
+
+ elif (
+ prior in [Priority.POINTS_ASCENDING,
+ Priority.POINTS_DESCEDING]
+ and len(streamers_watching) < 2
+ ):
+ items = [
+ {"points": streamers[index].channel_points,
+ "index": index}
+ for index in streamers_index
+ ]
+ items = sorted(
+ items,
+ key=lambda x: x["points"],
+ reverse=(
+ True if prior == Priority.POINTS_DESCEDING else False
+ ),
+ )
+ streamers_watching += [item["index"]
+ for item in items][:2]
+
+ elif prior == Priority.STREAK and len(streamers_watching) < 2:
+ """
+ Check if we need need to change priority based on watch streak
+ Viewers receive points for returning for x consecutive streams.
+ Each stream must be at least 10 minutes long and it must have been at least 30 minutes since the last stream ended.
+ Watch at least 6m for get the +10
+ """
+ for index in streamers_index:
+ if (
+ streamers[index].settings.watch_streak is True
+ and streamers[index].stream.watch_streak_missing is True
+ and (
+ streamers[index].offline_at == 0
+ or (
+ (time.time() -
+ streamers[index].offline_at)
+ // 60
+ )
+ > 30
+ )
+ and streamers[index].stream.minute_watched < 7 # fix #425
+ ):
+ streamers_watching.append(index)
+ if len(streamers_watching) == 2:
+ break
+
+ elif prior == Priority.DROPS and len(streamers_watching) < 2:
+ for index in streamers_index:
+ if streamers[index].drops_condition() is True:
+ streamers_watching.append(index)
+ if len(streamers_watching) == 2:
+ break
+
+ elif prior == Priority.SUBSCRIBED and len(streamers_watching) < 2:
+ streamers_with_multiplier = [
+ index
+ for index in streamers_index
+ if streamers[index].viewer_has_points_multiplier()
+ ]
+ streamers_with_multiplier = sorted(
+ streamers_with_multiplier,
+ key=lambda x: streamers[x].total_points_multiplier(
+ ),
+ reverse=True,
+ )
+ streamers_watching += streamers_with_multiplier[:2]
+
+ """
+ Twitch has a limit - you can't watch more than 2 channels at one time.
+ We take the first two streamers from the list as they have the highest priority (based on order or WatchStreak).
+ """
+ streamers_watching = streamers_watching[:2]
+
+ for index in streamers_watching:
+ next_iteration = time.time() + 60 / len(streamers_watching)
+
+ try:
+ response = requests.post(
+ streamers[index].stream.spade_url,
+ data=streamers[index].stream.encode_payload(),
+ headers={"User-Agent": self.user_agent},
+ timeout=60,
+ )
+ logger.debug(
+ f"Send minute watched request for {streamers[index]} - Status code: {response.status_code}"
+ )
+ if response.status_code == 204:
+ streamers[index].stream.update_minute_watched()
+
+ """
+ Remember, you can only earn progress towards a time-based Drop on one participating channel at a time. [ ! ! ! ]
+ You can also check your progress towards Drops within a campaign anytime by viewing the Drops Inventory.
+ For time-based Drops, if you are unable to claim the Drop in time, you will be able to claim it from the inventory page until the Drops campaign ends.
+ """
+
+ for campaign in streamers[index].stream.campaigns:
+ for drop in campaign.drops:
+ # We could add .has_preconditions_met condition inside is_printable
+ if (
+ drop.has_preconditions_met is not False
+ and drop.is_printable is True
+ ):
+ drop_messages = [
+ f"{streamers[index]} is streaming {streamers[index].stream}",
+ f"Campaign: {campaign}",
+ f"Drop: {drop}",
+ f"{drop.progress_bar()}",
+ ]
+ for single_line in drop_messages:
+ logger.info(
+ single_line,
+ extra={
+ "event": Events.DROP_STATUS,
+ "skip_telegram": True,
+ "skip_discord": True,
+ "skip_webhook": True,
+ "skip_matrix": True,
+ },
+ )
+
+ if Settings.logger.telegram is not None:
+ Settings.logger.telegram.send(
+ "\n".join(drop_messages),
+ Events.DROP_STATUS,
+ )
+
+ if Settings.logger.discord is not None:
+ Settings.logger.discord.send(
+ "\n".join(drop_messages),
+ Events.DROP_STATUS,
+ )
+ if Settings.logger.webhook is not None:
+ Settings.logger.webhook.send(
+ "\n".join(drop_messages),
+ Events.DROP_STATUS,
+ )
+
+ except requests.exceptions.ConnectionError as e:
+ logger.error(
+ f"Error while trying to send minute watched: {e}")
+ self.__check_connection_handler(chunk_size)
+ except requests.exceptions.Timeout as e:
+ logger.error(
+ f"Error while trying to send minute watched: {e}")
+
+ self.__chuncked_sleep(
+ next_iteration - time.time(), chunk_size=chunk_size
+ )
+
+ if streamers_watching == []:
+ self.__chuncked_sleep(60, chunk_size=chunk_size)
+ except Exception:
+ logger.error(
+ "Exception raised in send minute watched", exc_info=True)
+
+ # === CHANNEL POINTS / PREDICTION === #
+ # Load the amount of current points for a channel, check if a bonus is available
+ def load_channel_points_context(self, streamer):
+ json_data = copy.deepcopy(GQLOperations.ChannelPointsContext)
+ json_data["variables"] = {"channelLogin": streamer.username}
+
+ response = self.post_gql_request(json_data)
+ if response != {}:
+ if response["data"]["community"] is None:
+ raise StreamerDoesNotExistException
+ channel = response["data"]["community"]["channel"]
+ community_points = channel["self"]["communityPoints"]
+ streamer.channel_points = community_points["balance"]
+ streamer.activeMultipliers = community_points["activeMultipliers"]
+
+ if community_points["availableClaim"] is not None:
+ self.claim_bonus(
+ streamer, community_points["availableClaim"]["id"])
+
+ def make_predictions(self, event):
+ decision = event.bet.calculate(event.streamer.channel_points)
+ # selector_index = 0 if decision["choice"] == "A" else 1
+
+ logger.info(
+ f"Going to complete bet for {event}",
+ extra={
+ "emoji": ":four_leaf_clover:",
+ "event": Events.BET_GENERAL,
+ },
+ )
+ if event.status == "ACTIVE":
+ skip, compared_value = event.bet.skip()
+ if skip is True:
+ logger.info(
+ f"Skip betting for the event {event}",
+ extra={
+ "emoji": ":pushpin:",
+ "event": Events.BET_FILTERS,
+ },
+ )
+ logger.info(
+ f"Skip settings {event.bet.settings.filter_condition}, current value is: {compared_value}",
+ extra={
+ "emoji": ":pushpin:",
+ "event": Events.BET_FILTERS,
+ },
+ )
+ else:
+ if decision["amount"] >= 10:
+ logger.info(
+ # f"Place {_millify(decision['amount'])} channel points on: {event.bet.get_outcome(selector_index)}",
+ f"Place {_millify(decision['amount'])} channel points on: {event.bet.get_outcome(decision['choice'])}",
+ extra={
+ "emoji": ":four_leaf_clover:",
+ "event": Events.BET_GENERAL,
+ },
+ )
+
+ json_data = copy.deepcopy(GQLOperations.MakePrediction)
+ json_data["variables"] = {
+ "input": {
+ "eventID": event.event_id,
+ "outcomeID": decision["id"],
+ "points": decision["amount"],
+ "transactionID": token_hex(16),
+ }
+ }
+ response = self.post_gql_request(json_data)
+ if (
+ "data" in response
+ and "makePrediction" in response["data"]
+ and "error" in response["data"]["makePrediction"]
+ and response["data"]["makePrediction"]["error"] is not None
+ ):
+ error_code = response["data"]["makePrediction"]["error"]["code"]
+ logger.error(
+ f"Failed to place bet, error: {error_code}",
+ extra={
+ "emoji": ":four_leaf_clover:",
+ "event": Events.BET_FAILED,
+ },
+ )
+ else:
+ logger.info(
+ f"Bet won't be placed as the amount {_millify(decision['amount'])} is less than the minimum required 10",
+ extra={
+ "emoji": ":four_leaf_clover:",
+ "event": Events.BET_GENERAL,
+ },
+ )
+ else:
+ logger.info(
+ f"Oh no! The event is not active anymore! Current status: {event.status}",
+ extra={
+ "emoji": ":disappointed_relieved:",
+ "event": Events.BET_FAILED,
+ },
+ )
+
+ def claim_bonus(self, streamer, claim_id):
+ if Settings.logger.less is False:
+ logger.info(
+ f"Claiming the bonus for {streamer}!",
+ extra={"emoji": ":gift:", "event": Events.BONUS_CLAIM},
+ )
+
+ json_data = copy.deepcopy(GQLOperations.ClaimCommunityPoints)
+ json_data["variables"] = {
+ "input": {"channelID": streamer.channel_id, "claimID": claim_id}
+ }
+ self.post_gql_request(json_data)
+
+ # === MOMENTS === #
+ def claim_moment(self, streamer, moment_id):
+ if Settings.logger.less is False:
+ logger.info(
+ f"Claiming the moment for {streamer}!",
+ extra={"emoji": ":video_camera:",
+ "event": Events.MOMENT_CLAIM},
+ )
+
+ json_data = copy.deepcopy(GQLOperations.CommunityMomentCallout_Claim)
+ json_data["variables"] = {
+ "input": {"momentID": moment_id}
+ }
+ self.post_gql_request(json_data)
+
+ # === CAMPAIGNS / DROPS / INVENTORY === #
+ def __get_campaign_ids_from_streamer(self, streamer):
+ json_data = copy.deepcopy(
+ GQLOperations.DropsHighlightService_AvailableDrops)
+ json_data["variables"] = {"channelID": streamer.channel_id}
+ response = self.post_gql_request(json_data)
+ try:
+ return (
+ []
+ if response["data"]["channel"]["viewerDropCampaigns"] is None
+ else [
+ item["id"]
+ for item in response["data"]["channel"]["viewerDropCampaigns"]
+ ]
+ )
+ except (ValueError, KeyError):
+ return []
+
+ def __get_inventory(self):
+ response = self.post_gql_request(GQLOperations.Inventory)
+ try:
+ return (
+ response["data"]["currentUser"]["inventory"] if response != {} else {}
+ )
+ except (ValueError, KeyError, TypeError):
+ return {}
+
+ def __get_drops_dashboard(self, status=None):
+ response = self.post_gql_request(GQLOperations.ViewerDropsDashboard)
+ campaigns = response["data"]["currentUser"]["dropCampaigns"] or []
+
+ if status is not None:
+ campaigns = list(
+ filter(lambda x: x["status"] == status.upper(), campaigns)) or []
+
+ return campaigns
+
+ def __get_campaigns_details(self, campaigns):
+ result = []
+ chunks = create_chunks(campaigns, 20)
+ for chunk in chunks:
+ json_data = []
+ for campaign in chunk:
+ json_data.append(copy.deepcopy(
+ GQLOperations.DropCampaignDetails))
+ json_data[-1]["variables"] = {
+ "dropID": campaign["id"],
+ "channelLogin": f"{self.twitch_login.get_user_id()}",
+ }
+
+ response = self.post_gql_request(json_data)
+ for r in response:
+ if r["data"]["user"] is not None:
+ result.append(r["data"]["user"]["dropCampaign"])
+ return result
+
+ def __sync_campaigns(self, campaigns):
+ # We need the inventory only for get the real updated value/progress
+ # Get data from inventory and sync current status with streamers.campaigns
+ inventory = self.__get_inventory()
+ if inventory not in [None, {}] and inventory["dropCampaignsInProgress"] not in [
+ None,
+ {},
+ ]:
+ # Iterate all campaigns from dashboard (only active, with working drops)
+ # In this array we have also the campaigns never started from us (not in nventory)
+ for i in range(len(campaigns)):
+ campaigns[i].clear_drops() # Remove all the claimed drops
+ # Iterate all campaigns currently in progress from out inventory
+ for progress in inventory["dropCampaignsInProgress"]:
+ if progress["id"] == campaigns[i].id:
+ campaigns[i].in_inventory = True
+ campaigns[i].sync_drops(
+ progress["timeBasedDrops"], self.claim_drop
+ )
+ # Remove all the claimed drops
+ campaigns[i].clear_drops()
+ break
+ return campaigns
+
+ def claim_drop(self, drop):
+ logger.info(
+ f"Claim {drop}", extra={"emoji": ":package:", "event": Events.DROP_CLAIM}
+ )
+
+ json_data = copy.deepcopy(GQLOperations.DropsPage_ClaimDropRewards)
+ json_data["variables"] = {
+ "input": {"dropInstanceID": drop.drop_instance_id}}
+ response = self.post_gql_request(json_data)
+ try:
+ # response["data"]["claimDropRewards"] can be null and respose["data"]["errors"] != []
+ # or response["data"]["claimDropRewards"]["status"] === DROP_INSTANCE_ALREADY_CLAIMED
+ if ("claimDropRewards" in response["data"]) and (
+ response["data"]["claimDropRewards"] is None
+ ):
+ return False
+ elif ("errors" in response["data"]) and (response["data"]["errors"] != []):
+ return False
+ elif ("claimDropRewards" in response["data"]) and (
+ response["data"]["claimDropRewards"]["status"]
+ in ["ELIGIBLE_FOR_ALL", "DROP_INSTANCE_ALREADY_CLAIMED"]
+ ):
+ return True
+ else:
+ return False
+ except (ValueError, KeyError):
+ return False
+
+ def claim_all_drops_from_inventory(self):
+ inventory = self.__get_inventory()
+ if inventory not in [None, {}]:
+ if inventory["dropCampaignsInProgress"] not in [None, {}]:
+ for campaign in inventory["dropCampaignsInProgress"]:
+ for drop_dict in campaign["timeBasedDrops"]:
+ drop = Drop(drop_dict)
+ drop.update(drop_dict["self"])
+ if drop.is_claimable is True:
+ drop.is_claimed = self.claim_drop(drop)
+ time.sleep(random.uniform(5, 10))
+
+ def sync_campaigns(self, streamers, chunk_size=3):
+ campaigns_update = 0
+ while self.running:
+ try:
+ # Get update from dashboard each 60minutes
+ if (
+ campaigns_update == 0
+ # or ((time.time() - campaigns_update) / 60) > 60
+
+ # TEMPORARY AUTO DROP CLAIMING FIX
+ # 30 minutes instead of 60 minutes
+ or ((time.time() - campaigns_update) / 30) > 30
+ #####################################
+ ):
+ campaigns_update = time.time()
+
+ # TEMPORARY AUTO DROP CLAIMING FIX
+ self.claim_all_drops_from_inventory()
+ #####################################
+
+ # Get full details from current ACTIVE campaigns
+ # Use dashboard so we can explore new drops not currently active in our Inventory
+ campaigns_details = self.__get_campaigns_details(
+ self.__get_drops_dashboard(status="ACTIVE")
+ )
+ campaigns = []
+
+ # Going to clear array and structure. Remove all the timeBasedDrops expired or not started yet
+ for index in range(0, len(campaigns_details)):
+ if campaigns_details[index] is not None:
+ campaign = Campaign(campaigns_details[index])
+ if campaign.dt_match is True:
+ # Remove all the drops already claimed or with dt not matching
+ campaign.clear_drops()
+ if campaign.drops != []:
+ campaigns.append(campaign)
+ else:
+ continue
+
+ # Divide et impera :)
+ campaigns = self.__sync_campaigns(campaigns)
+
+ # Check if user It's currently streaming the same game present in campaigns_details
+ for i in range(0, len(streamers)):
+ if streamers[i].drops_condition() is True:
+ # yes! The streamer[i] have the drops_tags enabled and we It's currently stream a game with campaign active!
+ # With 'campaigns_ids' we are also sure that this streamer have the campaign active.
+ # yes! The streamer[index] have the drops_tags enabled and we It's currently stream a game with campaign active!
+ streamers[i].stream.campaigns = list(
+ filter(
+ lambda x: x.drops != []
+ and x.game == streamers[i].stream.game
+ and x.id in streamers[i].stream.campaigns_ids,
+ campaigns,
+ )
+ )
+
+ except (ValueError, KeyError, requests.exceptions.ConnectionError) as e:
+ logger.error(f"Error while syncing inventory: {e}")
+ self.__check_connection_handler(chunk_size)
+
+ self.__chuncked_sleep(60, chunk_size=chunk_size)
diff --git a/TwitchChannelPointsMiner/classes/TwitchLogin.py b/TwitchChannelPointsMiner/classes/TwitchLogin.py
new file mode 100644
index 0000000..d8784ad
--- /dev/null
+++ b/TwitchChannelPointsMiner/classes/TwitchLogin.py
@@ -0,0 +1,360 @@
+# Based on https://github.com/derrod/twl.py
+# Original Copyright (c) 2020 Rodney
+# The MIT License (MIT)
+
+import copy
+# import getpass
+import logging
+import os
+import pickle
+
+# import webbrowser
+# import browser_cookie3
+
+import requests
+
+from TwitchChannelPointsMiner.classes.Exceptions import (
+ BadCredentialsException,
+ WrongCookiesException,
+)
+from TwitchChannelPointsMiner.constants import CLIENT_ID, GQLOperations, USER_AGENTS
+
+from datetime import datetime, timedelta, timezone
+from time import sleep
+
+logger = logging.getLogger(__name__)
+
+"""def interceptor(request) -> str:
+ if (
+ request.method == 'POST'
+ and request.url == 'https://passport.twitch.tv/protected_login'
+ ):
+ import json
+ body = request.body.decode('utf-8')
+ data = json.loads(body)
+ data['client_id'] = CLIENT_ID
+ request.body = json.dumps(data).encode('utf-8')
+ del request.headers['Content-Length']
+ request.headers['Content-Length'] = str(len(request.body))"""
+
+
+class TwitchLogin(object):
+ __slots__ = [
+ "client_id",
+ "device_id",
+ "token",
+ "login_check_result",
+ "session",
+ "session",
+ "username",
+ "password",
+ "user_id",
+ "email",
+ "cookies",
+ "shared_cookies"
+ ]
+
+ def __init__(self, client_id, device_id, username, user_agent, password=None):
+ self.client_id = client_id
+ self.device_id = device_id
+ self.token = None
+ self.login_check_result = False
+ self.session = requests.session()
+ self.session.headers.update(
+ {"Client-ID": self.client_id,
+ "X-Device-Id": self.device_id, "User-Agent": user_agent}
+ )
+ self.username = username
+ self.password = password
+ self.user_id = None
+ self.email = None
+
+ self.cookies = []
+ self.shared_cookies = []
+
+ def login_flow(self):
+ logger.info("You'll have to login to Twitch!")
+
+ post_data = {
+ "client_id": self.client_id,
+ "scopes": (
+ "channel_read chat:read user_blocks_edit "
+ "user_blocks_read user_follows_edit user_read"
+ )
+ }
+ # login-fix
+ use_backup_flow = False
+ # use_backup_flow = True
+ while True:
+ logger.info("Trying the TV login method..")
+
+ login_response = self.send_oauth_request(
+ "https://id.twitch.tv/oauth2/device", post_data)
+
+ # {
+ # "device_code": "40 chars [A-Za-z0-9]",
+ # "expires_in": 1800,
+ # "interval": 5,
+ # "user_code": "8 chars [A-Z]",
+ # "verification_uri": "https://www.twitch.tv/activate"
+ # }
+
+ if login_response.status_code != 200:
+ logger.error("TV login response is not 200. Try again")
+ break
+
+ login_response_json = login_response.json()
+
+ if "user_code" in login_response_json:
+ user_code: str = login_response_json["user_code"]
+ now = datetime.now(timezone.utc)
+ device_code: str = login_response_json["device_code"]
+ interval: int = login_response_json["interval"]
+ expires_at = now + \
+ timedelta(seconds=login_response_json["expires_in"])
+ logger.info(
+ "Open https://www.twitch.tv/activate"
+ )
+ logger.info(
+ f"and enter this code: {user_code}"
+ )
+ logger.info(
+ f"Hurry up! It will expire in {int(login_response_json['expires_in'] / 60)} minutes!"
+ )
+ # twofa = input("2FA token: ")
+ # webbrowser.open_new_tab("https://www.twitch.tv/activate")
+
+ post_data = {
+ "client_id": CLIENT_ID,
+ "device_code": device_code,
+ "grant_type": "urn:ietf:params:oauth:grant-type:device_code",
+ }
+
+ while True:
+ # sleep first, not like the user is gonna enter the code *that* fast
+ sleep(interval)
+ login_response = self.send_oauth_request(
+ "https://id.twitch.tv/oauth2/token", post_data)
+ if now == expires_at:
+ logger.error("Code expired. Try again")
+ break
+ # 200 means success, 400 means the user haven't entered the code yet
+ if login_response.status_code != 200:
+ continue
+ # {
+ # "access_token": "40 chars [A-Za-z0-9]",
+ # "refresh_token": "40 chars [A-Za-z0-9]",
+ # "scope": [...],
+ # "token_type": "bearer"
+ # }
+ login_response_json = login_response.json()
+ if "access_token" in login_response_json:
+ self.set_token(login_response_json["access_token"])
+ return self.check_login()
+ # except RequestInvalid:
+ # the device_code has expired, request a new code
+ # continue
+ # invalidate_after is not None
+ # account for the expiration landing during the request
+ # and datetime.now(timezone.utc) >= (invalidate_after - session_timeout)
+ # ):
+ # raise RequestInvalid()
+ else:
+ if "error_code" in login_response:
+ err_code = login_response["error_code"]
+
+ logger.error(f"Unknown error: {login_response}")
+ raise NotImplementedError(
+ f"Unknown TwitchAPI error code: {err_code}"
+ )
+
+ if use_backup_flow:
+ break
+
+ if use_backup_flow:
+ # self.set_token(self.login_flow_backup(password))
+ self.set_token(self.login_flow_backup())
+ return self.check_login()
+
+ return False
+
+ def set_token(self, new_token):
+ self.token = new_token
+ self.session.headers.update({"Authorization": f"Bearer {self.token}"})
+
+ # def send_login_request(self, json_data):
+ def send_oauth_request(self, url, json_data):
+ # response = self.session.post("https://passport.twitch.tv/protected_login", json=json_data)
+ """response = self.session.post("https://passport.twitch.tv/login", json=json_data, headers={
+ 'Accept': 'application/vnd.twitchtv.v3+json',
+ 'Accept-Encoding': 'gzip',
+ 'Accept-Language': 'en-US',
+ 'Content-Type': 'application/json; charset=UTF-8',
+ 'Host': 'passport.twitch.tv'
+ },)"""
+ response = self.session.post(url, data=json_data, headers={
+ 'Accept': 'application/json',
+ 'Accept-Encoding': 'gzip',
+ 'Accept-Language': 'en-US',
+ "Cache-Control": "no-cache",
+ "Client-Id": CLIENT_ID,
+ "Host": "id.twitch.tv",
+ "Origin": "https://android.tv.twitch.tv",
+ "Pragma": "no-cache",
+ "Referer": "https://android.tv.twitch.tv/",
+ "User-Agent": USER_AGENTS["Android"]["TV"],
+ "X-Device-Id": self.device_id
+ },)
+ return response
+
+ def login_flow_backup(self, password=None):
+ """Backup OAuth Selenium login
+ from undetected_chromedriver import ChromeOptions
+ import seleniumwire.undetected_chromedriver.v2 as uc
+ from selenium.webdriver.common.by import By
+ from time import sleep
+
+ HEADLESS = False
+
+ options = uc.ChromeOptions()
+ if HEADLESS is True:
+ options.add_argument('--headless')
+ options.add_argument('--log-level=3')
+ options.add_argument('--disable-web-security')
+ options.add_argument('--allow-running-insecure-content')
+ options.add_argument('--lang=en')
+ options.add_argument('--no-sandbox')
+ options.add_argument('--disable-gpu')
+ # options.add_argument("--user-agent=\"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36\"")
+ # options.add_argument("--window-size=1920,1080")
+ # options.set_capability("detach", True)
+
+ logger.info(
+ 'Now a browser window will open, it will login with your data.')
+ driver = uc.Chrome(
+ options=options, use_subprocess=True # , executable_path=EXECUTABLE_PATH
+ )
+ driver.request_interceptor = interceptor
+ driver.get('https://www.twitch.tv/login')
+
+ driver.find_element(By.ID, 'login-username').send_keys(self.username)
+ driver.find_element(By.ID, 'password-input').send_keys(password)
+ sleep(0.3)
+ driver.execute_script(
+ 'document.querySelector("#root > div > div.scrollable-area > div.simplebar-scroll-content > div > div > div > div.Layout-sc-nxg1ff-0.gZaqky > form > div > div:nth-child(3) > button > div > div").click()'
+ )
+
+ logger.info(
+ 'Enter your verification code in the browser and wait for the Twitch website to load, then press Enter here.'
+ )
+ input()
+
+ logger.info("Extracting cookies...")
+ self.cookies = driver.get_cookies()
+ # print(self.cookies)
+ # driver.close()
+ driver.quit()
+ self.username = self.get_cookie_value("login")
+ # print(f"self.username: {self.username}")
+
+ if not self.username:
+ logger.error("Couldn't extract login, probably bad cookies.")
+ return False
+
+ return self.get_cookie_value("auth-token")"""
+
+ # logger.error("Backup login flow is not available. Use a VPN or wait a while to avoid the CAPTCHA.")
+ # return False
+
+ """Backup OAuth login flow in case manual captcha solving is required"""
+ browser = input(
+ "What browser do you use? Chrome (1), Firefox (2), Other (3): "
+ ).strip()
+ if browser not in ("1", "2"):
+ logger.info("Your browser is unsupported, sorry.")
+ return None
+
+ input(
+ "Please login inside your browser of choice (NOT incognito mode) and press Enter..."
+ )
+ logger.info("Loading cookies saved on your computer...")
+ twitch_domain = ".twitch.tv"
+ if browser == "1": # chrome
+ cookie_jar = browser_cookie3.chrome(domain_name=twitch_domain)
+ else:
+ cookie_jar = browser_cookie3.firefox(domain_name=twitch_domain)
+ # logger.info(f"cookie_jar: {cookie_jar}")
+ cookies_dict = requests.utils.dict_from_cookiejar(cookie_jar)
+ # logger.info(f"cookies_dict: {cookies_dict}")
+ self.username = cookies_dict.get("login")
+ self.shared_cookies = cookies_dict
+ return cookies_dict.get("auth-token")
+
+ def check_login(self):
+ if self.login_check_result:
+ return self.login_check_result
+ if self.token is None:
+ return False
+
+ self.login_check_result = self.__set_user_id()
+ return self.login_check_result
+
+ def save_cookies(self, cookies_file):
+ logger.info("Saving cookies to your computer..")
+ cookies_dict = self.session.cookies.get_dict()
+ # print(f"cookies_dict2pickle: {cookies_dict}")
+ cookies_dict["auth-token"] = self.token
+ if "persistent" not in cookies_dict: # saving user id cookies
+ cookies_dict["persistent"] = self.user_id
+
+ # old way saves only 'auth-token' and 'persistent'
+ self.cookies = []
+ # cookies_dict = self.shared_cookies
+ # print(f"cookies_dict2pickle: {cookies_dict}")
+ for cookie_name, value in cookies_dict.items():
+ self.cookies.append({"name": cookie_name, "value": value})
+ # print(f"cookies2pickle: {self.cookies}")
+ pickle.dump(self.cookies, open(cookies_file, "wb"))
+
+ def get_cookie_value(self, key):
+ for cookie in self.cookies:
+ if cookie["name"] == key:
+ if cookie["value"] is not None:
+ return cookie["value"]
+ return None
+
+ def load_cookies(self, cookies_file):
+ if os.path.isfile(cookies_file):
+ self.cookies = pickle.load(open(cookies_file, "rb"))
+ else:
+ raise WrongCookiesException("There must be a cookies file!")
+
+ def get_user_id(self):
+ persistent = self.get_cookie_value("persistent")
+ user_id = (
+ int(persistent.split("%")[
+ 0]) if persistent is not None else self.user_id
+ )
+ if user_id is None:
+ if self.__set_user_id() is True:
+ return self.user_id
+ return user_id
+
+ def __set_user_id(self):
+ json_data = copy.deepcopy(GQLOperations.ReportMenuItem)
+ json_data["variables"] = {"channelLogin": self.username}
+ response = self.session.post(GQLOperations.url, json=json_data)
+
+ if response.status_code == 200:
+ json_response = response.json()
+ if (
+ "data" in json_response
+ and "user" in json_response["data"]
+ and json_response["data"]["user"]["id"] is not None
+ ):
+ self.user_id = json_response["data"]["user"]["id"]
+ return True
+ return False
+
+ def get_auth_token(self):
+ return self.get_cookie_value("auth-token")
diff --git a/TwitchChannelPointsMiner/classes/TwitchWebSocket.py b/TwitchChannelPointsMiner/classes/TwitchWebSocket.py
new file mode 100644
index 0000000..f81a398
--- /dev/null
+++ b/TwitchChannelPointsMiner/classes/TwitchWebSocket.py
@@ -0,0 +1,65 @@
+import json
+import logging
+import time
+
+from websocket import WebSocketApp, WebSocketConnectionClosedException
+
+from TwitchChannelPointsMiner.utils import create_nonce
+
+logger = logging.getLogger(__name__)
+
+
+class TwitchWebSocket(WebSocketApp):
+ def __init__(self, index, parent_pool, *args, **kw):
+ super().__init__(*args, **kw)
+ self.index = index
+
+ self.parent_pool = parent_pool
+ self.is_closed = False
+ self.is_opened = False
+
+ self.is_reconnecting = False
+ self.forced_close = False
+
+ # Custom attribute
+ self.topics = []
+ self.pending_topics = []
+
+ self.twitch = parent_pool.twitch
+ self.streamers = parent_pool.streamers
+ self.events_predictions = parent_pool.events_predictions
+
+ self.last_message_timestamp = None
+ self.last_message_type_channel = None
+
+ self.last_pong = time.time()
+ self.last_ping = time.time()
+
+ # def close(self):
+ # self.forced_close = True
+ # super().close()
+
+ def listen(self, topic, auth_token=None):
+ data = {"topics": [str(topic)]}
+ if topic.is_user_topic() and auth_token is not None:
+ data["auth_token"] = auth_token
+ nonce = create_nonce()
+ self.send({"type": "LISTEN", "nonce": nonce, "data": data})
+
+ def ping(self):
+ self.send({"type": "PING"})
+ self.last_ping = time.time()
+
+ def send(self, request):
+ try:
+ request_str = json.dumps(request, separators=(",", ":"))
+ logger.debug(f"#{self.index} - Send: {request_str}")
+ super().send(request_str)
+ except WebSocketConnectionClosedException:
+ self.is_closed = True
+
+ def elapsed_last_pong(self):
+ return (time.time() - self.last_pong) // 60
+
+ def elapsed_last_ping(self):
+ return (time.time() - self.last_ping) // 60
diff --git a/TwitchChannelPointsMiner/classes/WebSocketsPool.py b/TwitchChannelPointsMiner/classes/WebSocketsPool.py
new file mode 100644
index 0000000..7af4c48
--- /dev/null
+++ b/TwitchChannelPointsMiner/classes/WebSocketsPool.py
@@ -0,0 +1,434 @@
+import json
+import logging
+import random
+import time
+# import os
+from threading import Thread, Timer
+# from pathlib import Path
+
+from dateutil import parser
+
+from TwitchChannelPointsMiner.classes.entities.EventPrediction import EventPrediction
+from TwitchChannelPointsMiner.classes.entities.Message import Message
+from TwitchChannelPointsMiner.classes.entities.Raid import Raid
+from TwitchChannelPointsMiner.classes.Settings import Events, Settings
+from TwitchChannelPointsMiner.classes.TwitchWebSocket import TwitchWebSocket
+from TwitchChannelPointsMiner.constants import WEBSOCKET
+from TwitchChannelPointsMiner.utils import (
+ get_streamer_index,
+ internet_connection_available,
+)
+
+logger = logging.getLogger(__name__)
+
+
+class WebSocketsPool:
+ __slots__ = ["ws", "twitch", "streamers", "events_predictions"]
+
+ def __init__(self, twitch, streamers, events_predictions):
+ self.ws = []
+ self.twitch = twitch
+ self.streamers = streamers
+ self.events_predictions = events_predictions
+
+ """
+ API Limits
+ - Clients can listen to up to 50 topics per connection. Trying to listen to more topics will result in an error message.
+ - We recommend that a single client IP address establishes no more than 10 simultaneous connections.
+ The two limits above are likely to be relaxed for approved third-party applications, as we start to better understand third-party requirements.
+ """
+
+ def submit(self, topic):
+ # Check if we need to create a new WebSocket instance
+ if self.ws == [] or len(self.ws[-1].topics) >= 50:
+ self.ws.append(self.__new(len(self.ws)))
+ self.__start(-1)
+
+ self.__submit(-1, topic)
+
+ def __submit(self, index, topic):
+ # Topic in topics should never happen. Anyway prevent any types of duplicates
+ if topic not in self.ws[index].topics:
+ self.ws[index].topics.append(topic)
+
+ if self.ws[index].is_opened is False:
+ self.ws[index].pending_topics.append(topic)
+ else:
+ self.ws[index].listen(topic, self.twitch.twitch_login.get_auth_token())
+
+ def __new(self, index):
+ return TwitchWebSocket(
+ index=index,
+ parent_pool=self,
+ url=WEBSOCKET,
+ on_message=WebSocketsPool.on_message,
+ on_open=WebSocketsPool.on_open,
+ on_error=WebSocketsPool.on_error,
+ on_close=WebSocketsPool.on_close
+ # on_close=WebSocketsPool.handle_reconnection, # Do nothing.
+ )
+
+ def __start(self, index):
+ if Settings.disable_ssl_cert_verification is True:
+ import ssl
+
+ thread_ws = Thread(
+ target=lambda: self.ws[index].run_forever(
+ sslopt={"cert_reqs": ssl.CERT_NONE}
+ )
+ )
+ logger.warn("SSL certificate verification is disabled! Be aware!")
+ else:
+ thread_ws = Thread(target=lambda: self.ws[index].run_forever())
+ thread_ws.daemon = True
+ thread_ws.name = f"WebSocket #{self.ws[index].index}"
+ thread_ws.start()
+
+ def end(self):
+ for index in range(0, len(self.ws)):
+ self.ws[index].forced_close = True
+ self.ws[index].close()
+
+ @staticmethod
+ def on_open(ws):
+ def run():
+ ws.is_opened = True
+ ws.ping()
+
+ for topic in ws.pending_topics:
+ ws.listen(topic, ws.twitch.twitch_login.get_auth_token())
+
+ while ws.is_closed is False:
+ # Else: the ws is currently in reconnecting phase, you can't do ping or other operation.
+ # Probably this ws will be closed very soon with ws.is_closed = True
+ if ws.is_reconnecting is False:
+ ws.ping() # We need ping for keep the connection alive
+ time.sleep(random.uniform(25, 30))
+
+ if ws.elapsed_last_pong() > 5:
+ logger.info(
+ f"#{ws.index} - The last PONG was received more than 5 minutes ago"
+ )
+ WebSocketsPool.handle_reconnection(ws)
+
+ thread_ws = Thread(target=run)
+ thread_ws.daemon = True
+ thread_ws.start()
+
+ @staticmethod
+ def on_error(ws, error):
+ # Connection lost | [WinError 10054] An existing connection was forcibly closed by the remote host
+ # Connection already closed | Connection is already closed (raise WebSocketConnectionClosedException)
+ logger.error(f"#{ws.index} - WebSocket error: {error}")
+
+ @staticmethod
+ def on_close(ws, close_status_code, close_reason):
+ logger.info(f"#{ws.index} - WebSocket closed")
+ # On close please reconnect automatically
+ WebSocketsPool.handle_reconnection(ws)
+
+ @staticmethod
+ def handle_reconnection(ws):
+ # Reconnect only if ws.is_reconnecting is False to prevent more than 1 ws from being created
+ if ws.is_reconnecting is False:
+ # Close the current WebSocket.
+ ws.is_closed = True
+ ws.keep_running = False
+ # Reconnect only if ws.forced_close is False (replace the keep_running)
+
+ # Set the current socket as reconnecting status
+ # So the external ping check will be locked
+ ws.is_reconnecting = True
+
+ if ws.forced_close is False:
+ logger.info(
+ f"#{ws.index} - Reconnecting to Twitch PubSub server in ~60 seconds"
+ )
+ time.sleep(30)
+
+ while internet_connection_available() is False:
+ random_sleep = random.randint(1, 3)
+ logger.warning(
+ f"#{ws.index} - No internet connection available! Retry after {random_sleep}m"
+ )
+ time.sleep(random_sleep * 60)
+
+ # Why not create a new ws on the same array index? Let's try.
+ self = ws.parent_pool
+ # Create a new connection.
+ self.ws[ws.index] = self.__new(ws.index)
+
+ self.__start(ws.index) # Start a new thread.
+ time.sleep(30)
+
+ for topic in ws.topics:
+ self.__submit(ws.index, topic)
+
+ @staticmethod
+ def on_message(ws, message):
+ logger.debug(f"#{ws.index} - Received: {message.strip()}")
+ response = json.loads(message)
+
+ if response["type"] == "MESSAGE":
+ # We should create a Message class ...
+ message = Message(response["data"])
+
+ # If we have more than one PubSub connection, messages may be duplicated
+ # Check the concatenation between message_type.top.channel_id
+ if (
+ ws.last_message_type_channel is not None
+ and ws.last_message_timestamp is not None
+ and ws.last_message_timestamp == message.timestamp
+ and ws.last_message_type_channel == message.identifier
+ ):
+ return
+
+ ws.last_message_timestamp = message.timestamp
+ ws.last_message_type_channel = message.identifier
+
+ streamer_index = get_streamer_index(ws.streamers, message.channel_id)
+ if streamer_index != -1:
+ try:
+ if message.topic == "community-points-user-v1":
+ if message.type in ["points-earned", "points-spent"]:
+ balance = message.data["balance"]["balance"]
+ ws.streamers[streamer_index].channel_points = balance
+ # Analytics switch
+ if Settings.enable_analytics is True:
+ ws.streamers[streamer_index].persistent_series(
+ event_type=message.data["point_gain"]["reason_code"]
+ if message.type == "points-earned"
+ else "Spent"
+ )
+
+ if message.type == "points-earned":
+ earned = message.data["point_gain"]["total_points"]
+ reason_code = message.data["point_gain"]["reason_code"]
+
+ logger.info(
+ f"+{earned} β {ws.streamers[streamer_index]} - Reason: {reason_code}.",
+ extra={
+ "emoji": ":rocket:",
+ "event": Events.get(f"GAIN_FOR_{reason_code}"),
+ },
+ )
+ ws.streamers[streamer_index].update_history(
+ reason_code, earned
+ )
+ # Analytics switch
+ if Settings.enable_analytics is True:
+ ws.streamers[streamer_index].persistent_annotations(
+ reason_code, f"+{earned} - {reason_code}"
+ )
+ elif message.type == "claim-available":
+ ws.twitch.claim_bonus(
+ ws.streamers[streamer_index],
+ message.data["claim"]["id"],
+ )
+
+ elif message.topic == "video-playback-by-id":
+ # There is stream-up message type, but it's sent earlier than the API updates
+ if message.type == "stream-up":
+ ws.streamers[streamer_index].stream_up = time.time()
+ elif message.type == "stream-down":
+ if ws.streamers[streamer_index].is_online is True:
+ ws.streamers[streamer_index].set_offline()
+ elif message.type == "viewcount":
+ if ws.streamers[streamer_index].stream_up_elapsed():
+ ws.twitch.check_streamer_online(
+ ws.streamers[streamer_index]
+ )
+
+ elif message.topic == "raid":
+ if message.type == "raid_update_v2":
+ raid = Raid(
+ message.message["raid"]["id"],
+ message.message["raid"]["target_login"],
+ )
+ ws.twitch.update_raid(ws.streamers[streamer_index], raid)
+
+ elif message.topic == "community-moments-channel-v1":
+ if message.type == "active":
+ ws.twitch.claim_moment(
+ ws.streamers[streamer_index], message.data["moment_id"]
+ )
+
+ elif message.topic == "predictions-channel-v1":
+
+ event_dict = message.data["event"]
+ event_id = event_dict["id"]
+ event_status = event_dict["status"]
+
+ current_tmsp = parser.parse(message.timestamp)
+
+ if (
+ message.type == "event-created"
+ and event_id not in ws.events_predictions
+ ):
+ if event_status == "ACTIVE":
+ prediction_window_seconds = float(
+ event_dict["prediction_window_seconds"]
+ )
+ # Reduce prediction window by 3/6s - Collect more accurate data for decision
+ prediction_window_seconds = ws.streamers[
+ streamer_index
+ ].get_prediction_window(prediction_window_seconds)
+ event = EventPrediction(
+ ws.streamers[streamer_index],
+ event_id,
+ event_dict["title"],
+ parser.parse(event_dict["created_at"]),
+ prediction_window_seconds,
+ event_status,
+ event_dict["outcomes"],
+ )
+ if (
+ ws.streamers[streamer_index].is_online
+ and event.closing_bet_after(current_tmsp) > 0
+ ):
+ streamer = ws.streamers[streamer_index]
+ bet_settings = streamer.settings.bet
+ if (
+ bet_settings.minimum_points is None
+ or streamer.channel_points
+ > bet_settings.minimum_points
+ ):
+ ws.events_predictions[event_id] = event
+ start_after = event.closing_bet_after(
+ current_tmsp
+ )
+
+ place_bet_thread = Timer(
+ start_after,
+ ws.twitch.make_predictions,
+ (ws.events_predictions[event_id],),
+ )
+ place_bet_thread.daemon = True
+ place_bet_thread.start()
+
+ logger.info(
+ f"Place the bet after: {start_after}s for: {ws.events_predictions[event_id]}",
+ extra={
+ "emoji": ":alarm_clock:",
+ "event": Events.BET_START,
+ },
+ )
+ else:
+ logger.info(
+ f"{streamer} have only {streamer.channel_points} channel points and the minimum for bet is: {bet_settings.minimum_points}",
+ extra={
+ "emoji": ":pushpin:",
+ "event": Events.BET_FILTERS,
+ },
+ )
+
+ elif (
+ message.type == "event-updated"
+ and event_id in ws.events_predictions
+ ):
+ ws.events_predictions[event_id].status = event_status
+ # Game over we can't update anymore the values... The bet was placed!
+ if (
+ ws.events_predictions[event_id].bet_placed is False
+ and ws.events_predictions[event_id].bet.decision == {}
+ ):
+ ws.events_predictions[event_id].bet.update_outcomes(
+ event_dict["outcomes"]
+ )
+
+ elif message.topic == "predictions-user-v1":
+ event_id = message.data["prediction"]["event_id"]
+ if event_id in ws.events_predictions:
+ event_prediction = ws.events_predictions[event_id]
+ if (
+ message.type == "prediction-result"
+ and event_prediction.bet_confirmed
+ ):
+ points = event_prediction.parse_result(
+ message.data["prediction"]["result"]
+ )
+
+ decision = event_prediction.bet.get_decision()
+ choice = event_prediction.bet.decision["choice"]
+
+ logger.info(
+ (
+ f"{event_prediction} - Decision: {choice}: {decision['title']} "
+ f"({decision['color']}) - Result: {event_prediction.result['string']}"
+ ),
+ extra={
+ "emoji": ":bar_chart:",
+ "event": Events.get(
+ f"BET_{event_prediction.result['type']}"
+ ),
+ },
+ )
+
+ ws.streamers[streamer_index].update_history(
+ "PREDICTION", points["gained"]
+ )
+
+ # Remove duplicate history records from previous message sent in community-points-user-v1
+ if event_prediction.result["type"] == "REFUND":
+ ws.streamers[streamer_index].update_history(
+ "REFUND",
+ -points["placed"],
+ counter=-1,
+ )
+ elif event_prediction.result["type"] == "WIN":
+ ws.streamers[streamer_index].update_history(
+ "PREDICTION",
+ -points["won"],
+ counter=-1,
+ )
+
+ if event_prediction.result["type"]:
+ # Analytics switch
+ if Settings.enable_analytics is True:
+ ws.streamers[
+ streamer_index
+ ].persistent_annotations(
+ event_prediction.result["type"],
+ f"{ws.events_predictions[event_id].title}",
+ )
+ elif message.type == "prediction-made":
+ event_prediction.bet_confirmed = True
+ # Analytics switch
+ if Settings.enable_analytics is True:
+ ws.streamers[streamer_index].persistent_annotations(
+ "PREDICTION_MADE",
+ f"Decision: {event_prediction.bet.decision['choice']} - {event_prediction.title}",
+ )
+ except Exception:
+ logger.error(
+ f"Exception raised for topic: {message.topic} and message: {message}",
+ exc_info=True,
+ )
+
+ elif response["type"] == "RESPONSE" and len(response.get("error", "")) > 0:
+ # raise RuntimeError(f"Error while trying to listen for a topic: {response}")
+ error_message = response.get("error", "")
+ logger.error(f"Error while trying to listen for a topic: {error_message}")
+
+ # Check if the error message indicates an authentication issue (ERR_BADAUTH)
+ if "ERR_BADAUTH" in error_message:
+ # Inform the user about the potential outdated cookie file
+ username = ws.twitch.twitch_login.username
+ logger.error(f"Received the ERR_BADAUTH error, most likely you have an outdated cookie file \"cookies\\{username}.pkl\". Delete this file and try again.")
+ # Attempt to delete the outdated cookie file
+ # try:
+ # cookie_file_path = os.path.join("cookies", f"{username}.pkl")
+ # if os.path.exists(cookie_file_path):
+ # os.remove(cookie_file_path)
+ # logger.info(f"Deleted outdated cookie file for user: {username}")
+ # else:
+ # logger.warning(f"Cookie file not found for user: {username}")
+ # except Exception as e:
+ # logger.error(f"Error occurred while deleting cookie file: {str(e)}")
+
+ elif response["type"] == "RECONNECT":
+ logger.info(f"#{ws.index} - Reconnection required")
+ WebSocketsPool.handle_reconnection(ws)
+
+ elif response["type"] == "PONG":
+ ws.last_pong = time.time()
diff --git a/TwitchChannelPointsMiner/classes/Webhook.py b/TwitchChannelPointsMiner/classes/Webhook.py
new file mode 100644
index 0000000..d37fda6
--- /dev/null
+++ b/TwitchChannelPointsMiner/classes/Webhook.py
@@ -0,0 +1,26 @@
+from textwrap import dedent
+
+import requests
+
+from TwitchChannelPointsMiner.classes.Settings import Events
+
+
+class Webhook(object):
+ __slots__ = ["endpoint", "method", "events"]
+
+ def __init__(self, endpoint: str, method: str, events: list):
+ self.endpoint = endpoint
+ self.method = method
+ self.events = [str(e) for e in events]
+
+ def send(self, message: str, event: Events) -> None:
+
+ if str(event) in self.events:
+ url = self.endpoint + f"?event_name={str(event)}&message={message}"
+
+ if self.method.lower() == "get":
+ requests.get(url=url)
+ elif self.method.lower() == "post":
+ requests.post(url=url)
+ else:
+ raise ValueError("Invalid method, use POST or GET")
diff --git a/TwitchChannelPointsMiner/classes/__init__.py b/TwitchChannelPointsMiner/classes/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/TwitchChannelPointsMiner/classes/entities/Bet.py b/TwitchChannelPointsMiner/classes/entities/Bet.py
new file mode 100644
index 0000000..b6234d8
--- /dev/null
+++ b/TwitchChannelPointsMiner/classes/entities/Bet.py
@@ -0,0 +1,315 @@
+import copy
+from enum import Enum, auto
+from random import uniform
+
+from millify import millify
+
+#from TwitchChannelPointsMiner.utils import char_decision_as_index, float_round
+from TwitchChannelPointsMiner.utils import float_round
+
+
+class Strategy(Enum):
+ MOST_VOTED = auto()
+ HIGH_ODDS = auto()
+ PERCENTAGE = auto()
+ SMART_MONEY = auto()
+ SMART = auto()
+
+ def __str__(self):
+ return self.name
+
+
+class Condition(Enum):
+ GT = auto()
+ LT = auto()
+ GTE = auto()
+ LTE = auto()
+
+ def __str__(self):
+ return self.name
+
+
+class OutcomeKeys(object):
+ # Real key on Bet dict ['']
+ PERCENTAGE_USERS = "percentage_users"
+ ODDS_PERCENTAGE = "odds_percentage"
+ ODDS = "odds"
+ TOP_POINTS = "top_points"
+ # Real key on Bet dict [''] - Sum()
+ TOTAL_USERS = "total_users"
+ TOTAL_POINTS = "total_points"
+ # This key does not exist
+ DECISION_USERS = "decision_users"
+ DECISION_POINTS = "decision_points"
+
+
+class DelayMode(Enum):
+ FROM_START = auto()
+ FROM_END = auto()
+ PERCENTAGE = auto()
+
+ def __str__(self):
+ return self.name
+
+
+class FilterCondition(object):
+ __slots__ = [
+ "by",
+ "where",
+ "value",
+ ]
+
+ def __init__(self, by=None, where=None, value=None, decision=None):
+ self.by = by
+ self.where = where
+ self.value = value
+
+ def __repr__(self):
+ return f"FilterCondition(by={self.by.upper()}, where={self.where}, value={self.value})"
+
+
+class BetSettings(object):
+ __slots__ = [
+ "strategy",
+ "percentage",
+ "percentage_gap",
+ "max_points",
+ "minimum_points",
+ "stealth_mode",
+ "filter_condition",
+ "delay",
+ "delay_mode",
+ ]
+
+ def __init__(
+ self,
+ strategy: Strategy = None,
+ percentage: int = None,
+ percentage_gap: int = None,
+ max_points: int = None,
+ minimum_points: int = None,
+ stealth_mode: bool = None,
+ filter_condition: FilterCondition = None,
+ delay: float = None,
+ delay_mode: DelayMode = None,
+ ):
+ self.strategy = strategy
+ self.percentage = percentage
+ self.percentage_gap = percentage_gap
+ self.max_points = max_points
+ self.minimum_points = minimum_points
+ self.stealth_mode = stealth_mode
+ self.filter_condition = filter_condition
+ self.delay = delay
+ self.delay_mode = delay_mode
+
+ def default(self):
+ self.strategy = self.strategy if self.strategy is not None else Strategy.SMART
+ self.percentage = self.percentage if self.percentage is not None else 5
+ self.percentage_gap = (
+ self.percentage_gap if self.percentage_gap is not None else 20
+ )
+ self.max_points = self.max_points if self.max_points is not None else 50000
+ self.minimum_points = (
+ self.minimum_points if self.minimum_points is not None else 0
+ )
+ self.stealth_mode = (
+ self.stealth_mode if self.stealth_mode is not None else False
+ )
+ self.delay = self.delay if self.delay is not None else 6
+ self.delay_mode = (
+ self.delay_mode if self.delay_mode is not None else DelayMode.FROM_END
+ )
+
+ def __repr__(self):
+ return f"BetSettings(strategy={self.strategy}, percentage={self.percentage}, percentage_gap={self.percentage_gap}, max_points={self.max_points}, minimum_points={self.minimum_points}, stealth_mode={self.stealth_mode})"
+
+
+class Bet(object):
+ __slots__ = ["outcomes", "decision", "total_users", "total_points", "settings"]
+
+ def __init__(self, outcomes: list, settings: BetSettings):
+ self.outcomes = outcomes
+ self.__clear_outcomes()
+ self.decision: dict = {}
+ self.total_users = 0
+ self.total_points = 0
+ self.settings = settings
+
+ def update_outcomes(self, outcomes):
+ for index in range(0, len(self.outcomes)):
+ self.outcomes[index][OutcomeKeys.TOTAL_USERS] = int(
+ outcomes[index][OutcomeKeys.TOTAL_USERS]
+ )
+ self.outcomes[index][OutcomeKeys.TOTAL_POINTS] = int(
+ outcomes[index][OutcomeKeys.TOTAL_POINTS]
+ )
+ if outcomes[index]["top_predictors"] != []:
+ # Sort by points placed by other users
+ outcomes[index]["top_predictors"] = sorted(
+ outcomes[index]["top_predictors"],
+ key=lambda x: x["points"],
+ reverse=True,
+ )
+ # Get the first elements (most placed)
+ top_points = outcomes[index]["top_predictors"][0]["points"]
+ self.outcomes[index][OutcomeKeys.TOP_POINTS] = top_points
+
+ # Inefficient, but otherwise outcomekeys are represented wrong
+ self.total_points = 0
+ self.total_users = 0
+ for index in range(0, len(self.outcomes)):
+ self.total_users += self.outcomes[index][OutcomeKeys.TOTAL_USERS]
+ self.total_points += self.outcomes[index][OutcomeKeys.TOTAL_POINTS]
+
+ if (
+ self.total_users > 0
+ and self.total_points > 0
+ ):
+ for index in range(0, len(self.outcomes)):
+ self.outcomes[index][OutcomeKeys.PERCENTAGE_USERS] = float_round(
+ (100 * self.outcomes[index][OutcomeKeys.TOTAL_USERS]) / self.total_users
+ )
+ self.outcomes[index][OutcomeKeys.ODDS] = float_round(
+ #self.total_points / max(self.outcomes[index][OutcomeKeys.TOTAL_POINTS], 1)
+ 0
+ if self.outcomes[index][OutcomeKeys.TOTAL_POINTS] == 0
+ else self.total_points / self.outcomes[index][OutcomeKeys.TOTAL_POINTS]
+ )
+ self.outcomes[index][OutcomeKeys.ODDS_PERCENTAGE] = float_round(
+ #100 / max(self.outcomes[index][OutcomeKeys.ODDS], 1)
+ 0
+ if self.outcomes[index][OutcomeKeys.ODDS] == 0
+ else 100 / self.outcomes[index][OutcomeKeys.ODDS]
+ )
+
+ self.__clear_outcomes()
+
+ def __repr__(self):
+ return f"Bet(total_users={millify(self.total_users)}, total_points={millify(self.total_points)}), decision={self.decision})\n\t\tOutcome A({self.get_outcome(0)})\n\t\tOutcome B({self.get_outcome(1)})"
+
+ def get_decision(self, parsed=False):
+ #decision = self.outcomes[0 if self.decision["choice"] == "A" else 1]
+ decision = self.outcomes[self.decision["choice"]]
+ return decision if parsed is False else Bet.__parse_outcome(decision)
+
+ @staticmethod
+ def __parse_outcome(outcome):
+ return f"{outcome['title']} ({outcome['color']}), Points: {millify(outcome[OutcomeKeys.TOTAL_POINTS])}, Users: {millify(outcome[OutcomeKeys.TOTAL_USERS])} ({outcome[OutcomeKeys.PERCENTAGE_USERS]}%), Odds: {outcome[OutcomeKeys.ODDS]} ({outcome[OutcomeKeys.ODDS_PERCENTAGE]}%)"
+
+ def get_outcome(self, index):
+ return Bet.__parse_outcome(self.outcomes[index])
+
+ def __clear_outcomes(self):
+ for index in range(0, len(self.outcomes)):
+ keys = copy.deepcopy(list(self.outcomes[index].keys()))
+ for key in keys:
+ if key not in [
+ OutcomeKeys.TOTAL_USERS,
+ OutcomeKeys.TOTAL_POINTS,
+ OutcomeKeys.TOP_POINTS,
+ OutcomeKeys.PERCENTAGE_USERS,
+ OutcomeKeys.ODDS,
+ OutcomeKeys.ODDS_PERCENTAGE,
+ "title",
+ "color",
+ "id",
+ ]:
+ del self.outcomes[index][key]
+ for key in [
+ OutcomeKeys.PERCENTAGE_USERS,
+ OutcomeKeys.ODDS,
+ OutcomeKeys.ODDS_PERCENTAGE,
+ OutcomeKeys.TOP_POINTS,
+ ]:
+ if key not in self.outcomes[index]:
+ self.outcomes[index][key] = 0
+
+ '''def __return_choice(self, key) -> str:
+ return "A" if self.outcomes[0][key] > self.outcomes[1][key] else "B"'''
+
+ def __return_choice(self, key) -> int:
+ largest=0
+ for index in range(0, len(self.outcomes)):
+ if self.outcomes[index][key] > self.outcomes[largest][key]:
+ largest = index
+ return largest
+
+ def skip(self) -> bool:
+ if self.settings.filter_condition is not None:
+ # key == by , condition == where
+ key = self.settings.filter_condition.by
+ condition = self.settings.filter_condition.where
+ value = self.settings.filter_condition.value
+
+ fixed_key = (
+ key
+ if key not in [OutcomeKeys.DECISION_USERS, OutcomeKeys.DECISION_POINTS]
+ else key.replace("decision", "total")
+ )
+ if key in [OutcomeKeys.TOTAL_USERS, OutcomeKeys.TOTAL_POINTS]:
+ compared_value = (
+ self.outcomes[0][fixed_key] + self.outcomes[1][fixed_key]
+ )
+ else:
+ #outcome_index = char_decision_as_index(self.decision["choice"])
+ outcome_index = self.decision["choice"]
+ compared_value = self.outcomes[outcome_index][fixed_key]
+
+ # Check if condition is satisfied
+ if condition == Condition.GT:
+ if compared_value > value:
+ return False, compared_value
+ elif condition == Condition.LT:
+ if compared_value < value:
+ return False, compared_value
+ elif condition == Condition.GTE:
+ if compared_value >= value:
+ return False, compared_value
+ elif condition == Condition.LTE:
+ if compared_value <= value:
+ return False, compared_value
+ return True, compared_value # Else skip the bet
+ else:
+ return False, 0 # Default don't skip the bet
+
+ def calculate(self, balance: int) -> dict:
+ self.decision = {"choice": None, "amount": 0, "id": None}
+ if self.settings.strategy == Strategy.MOST_VOTED:
+ self.decision["choice"] = self.__return_choice(OutcomeKeys.TOTAL_USERS)
+ elif self.settings.strategy == Strategy.HIGH_ODDS:
+ self.decision["choice"] = self.__return_choice(OutcomeKeys.ODDS)
+ elif self.settings.strategy == Strategy.PERCENTAGE:
+ self.decision["choice"] = self.__return_choice(OutcomeKeys.ODDS_PERCENTAGE)
+ elif self.settings.strategy == Strategy.SMART_MONEY:
+ self.decision["choice"] = self.__return_choice(OutcomeKeys.TOP_POINTS)
+ elif self.settings.strategy == Strategy.SMART:
+ difference = abs(
+ self.outcomes[0][OutcomeKeys.PERCENTAGE_USERS]
+ - self.outcomes[1][OutcomeKeys.PERCENTAGE_USERS]
+ )
+ self.decision["choice"] = (
+ self.__return_choice(OutcomeKeys.ODDS)
+ if difference < self.settings.percentage_gap
+ else self.__return_choice(OutcomeKeys.TOTAL_USERS)
+ )
+
+ if self.decision["choice"] is not None:
+ #index = char_decision_as_index(self.decision["choice"])
+ index = self.decision["choice"]
+ self.decision["id"] = self.outcomes[index]["id"]
+ self.decision["amount"] = min(
+ int(balance * (self.settings.percentage / 100)),
+ self.settings.max_points,
+ )
+ if (
+ self.settings.stealth_mode is True
+ and self.decision["amount"]
+ >= self.outcomes[index][OutcomeKeys.TOP_POINTS]
+ ):
+ reduce_amount = uniform(1, 5)
+ self.decision["amount"] = (
+ self.outcomes[index][OutcomeKeys.TOP_POINTS] - reduce_amount
+ )
+ self.decision["amount"] = int(self.decision["amount"])
+ return self.decision
diff --git a/TwitchChannelPointsMiner/classes/entities/Campaign.py b/TwitchChannelPointsMiner/classes/entities/Campaign.py
new file mode 100644
index 0000000..17f9ace
--- /dev/null
+++ b/TwitchChannelPointsMiner/classes/entities/Campaign.py
@@ -0,0 +1,74 @@
+from datetime import datetime
+
+from TwitchChannelPointsMiner.classes.entities.Drop import Drop
+from TwitchChannelPointsMiner.classes.Settings import Settings
+
+
+class Campaign(object):
+ __slots__ = [
+ "id",
+ "game",
+ "name",
+ "status",
+ "in_inventory",
+ "end_at",
+ "start_at",
+ "dt_match",
+ "drops",
+ "channels",
+ ]
+
+ def __init__(self, dict):
+ self.id = dict["id"]
+ self.game = dict["game"]
+ self.name = dict["name"]
+ self.status = dict["status"]
+ self.channels = (
+ []
+ if dict["allow"]["channels"] is None
+ else list(map(lambda x: x["id"], dict["allow"]["channels"]))
+ )
+ self.in_inventory = False
+
+ self.end_at = datetime.strptime(dict["endAt"], "%Y-%m-%dT%H:%M:%SZ")
+ self.start_at = datetime.strptime(dict["startAt"], "%Y-%m-%dT%H:%M:%SZ")
+ self.dt_match = self.start_at < datetime.now() < self.end_at
+
+ self.drops = list(map(lambda x: Drop(x), dict["timeBasedDrops"]))
+
+ def __repr__(self):
+ return f"Campaign(id={self.id}, name={self.name}, game={self.game}, in_inventory={self.in_inventory})"
+
+ def __str__(self):
+ return (
+ f"{self.name}, Game: {self.game['displayName']} - Drops: {len(self.drops)} pcs. - In inventory: {self.in_inventory}"
+ if Settings.logger.less
+ else self.__repr__()
+ )
+
+ def clear_drops(self):
+ self.drops = list(
+ filter(lambda x: x.dt_match is True and x.is_claimed is False, self.drops)
+ )
+
+ def __eq__(self, other):
+ if isinstance(other, self.__class__):
+ return self.id == other.id
+ else:
+ return False
+
+ def sync_drops(self, drops, callback):
+ # Iterate all the drops from inventory
+ for drop in drops:
+ # Iterate all the drops from out campaigns array
+ # After id match update with:
+ # [currentMinutesWatched, hasPreconditionsMet, dropInstanceID, isClaimed]
+ for i in range(len(self.drops)):
+ current_id = self.drops[i].id
+ if drop["id"] == current_id:
+ self.drops[i].update(drop["self"])
+ # If after update we all conditions are meet we can claim the drop
+ if self.drops[i].is_claimable is True:
+ claimed = callback(self.drops[i])
+ self.drops[i].is_claimed = claimed
+ break
diff --git a/TwitchChannelPointsMiner/classes/entities/Drop.py b/TwitchChannelPointsMiner/classes/entities/Drop.py
new file mode 100644
index 0000000..b975aaf
--- /dev/null
+++ b/TwitchChannelPointsMiner/classes/entities/Drop.py
@@ -0,0 +1,103 @@
+from datetime import datetime
+
+from TwitchChannelPointsMiner.classes.Settings import Settings
+from TwitchChannelPointsMiner.utils import percentage
+
+
+class Drop(object):
+ __slots__ = [
+ "id",
+ "name",
+ "benefit",
+ "minutes_required",
+ "has_preconditions_met",
+ "current_minutes_watched",
+ "drop_instance_id",
+ "is_claimed",
+ "is_claimable",
+ "percentage_progress",
+ "end_at",
+ "start_at",
+ "dt_match",
+ "is_printable",
+ ]
+
+ def __init__(self, dict):
+ self.id = dict["id"]
+ self.name = dict["name"]
+ self.benefit = ", ".join(
+ list(set([bf["benefit"]["name"] for bf in dict["benefitEdges"]]))
+ )
+ self.minutes_required = dict["requiredMinutesWatched"]
+
+ self.has_preconditions_met = None # [True, False], None we don't know
+ self.current_minutes_watched = 0
+ self.drop_instance_id = None
+ self.is_claimed = False
+ self.is_claimable = False
+ self.is_printable = False
+ self.percentage_progress = 0
+
+ self.end_at = datetime.strptime(dict["endAt"], "%Y-%m-%dT%H:%M:%SZ")
+ self.start_at = datetime.strptime(dict["startAt"], "%Y-%m-%dT%H:%M:%SZ")
+ self.dt_match = self.start_at < datetime.now() < self.end_at
+
+ def update(
+ self,
+ progress,
+ ):
+ self.has_preconditions_met = progress["hasPreconditionsMet"]
+
+ updated_percentage = percentage(
+ progress["currentMinutesWatched"], self.minutes_required
+ )
+ quarter = round((updated_percentage / 25), 4).is_integer()
+ self.is_printable = (
+ # The new currentMinutesWatched are GT than previous
+ progress["currentMinutesWatched"] > self.current_minutes_watched
+ and (
+ # The drop is printable when we have a new updated values and:
+ # - also the percentage It's different and quarter is True (self.current_minutes_watched != 0 for skip boostrap phase)
+ # - or we have watched 1 and the previous value is 0 - We are collecting a new drop :)
+ (
+ updated_percentage > self.percentage_progress
+ and quarter is True
+ and self.current_minutes_watched != 0
+ )
+ or (
+ progress["currentMinutesWatched"] == 1
+ and self.current_minutes_watched == 0
+ )
+ )
+ )
+
+ self.current_minutes_watched = progress["currentMinutesWatched"]
+ self.drop_instance_id = progress["dropInstanceID"]
+ self.is_claimed = progress["isClaimed"]
+ self.is_claimable = (
+ self.is_claimed is False and self.drop_instance_id is not None
+ )
+ self.percentage_progress = updated_percentage
+
+ def __repr__(self):
+ return f"Drop(id={self.id}, name={self.name}, benefit={self.benefit}, minutes_required={self.minutes_required}, has_preconditions_met={self.has_preconditions_met}, current_minutes_watched={self.current_minutes_watched}, percentage_progress={self.percentage_progress}%, drop_instance_id={self.drop_instance_id}, is_claimed={self.is_claimed})"
+
+ def __str__(self):
+ return (
+ f"{self.name} ({self.benefit}) {self.current_minutes_watched}/{self.minutes_required} ({self.percentage_progress}%)"
+ if Settings.logger.less
+ else self.__repr__()
+ )
+
+ def progress_bar(self):
+ progress = self.percentage_progress // 2
+ remaining = (100 - self.percentage_progress) // 2
+ if remaining + progress < 50:
+ remaining += 50 - (remaining + progress)
+ return f"|{('β' * progress)}{(' ' * remaining)}|\t{self.percentage_progress}% [{self.current_minutes_watched}/{self.minutes_required}]"
+
+ def __eq__(self, other):
+ if isinstance(other, self.__class__):
+ return self.id == other.id
+ else:
+ return False
diff --git a/TwitchChannelPointsMiner/classes/entities/EventPrediction.py b/TwitchChannelPointsMiner/classes/entities/EventPrediction.py
new file mode 100644
index 0000000..58dc761
--- /dev/null
+++ b/TwitchChannelPointsMiner/classes/entities/EventPrediction.py
@@ -0,0 +1,94 @@
+from TwitchChannelPointsMiner.classes.entities.Bet import Bet
+from TwitchChannelPointsMiner.classes.entities.Streamer import Streamer
+from TwitchChannelPointsMiner.classes.Settings import Settings
+from TwitchChannelPointsMiner.utils import _millify, float_round
+
+
+class EventPrediction(object):
+ __slots__ = [
+ "streamer",
+ "event_id",
+ "title",
+ "created_at",
+ "prediction_window_seconds",
+ "status",
+ "result",
+ "box_fillable",
+ "bet_confirmed",
+ "bet_placed",
+ "bet",
+ ]
+
+ def __init__(
+ self,
+ streamer: Streamer,
+ event_id,
+ title,
+ created_at,
+ prediction_window_seconds,
+ status,
+ outcomes,
+ ):
+ self.streamer = streamer
+
+ self.event_id = event_id
+ self.title = title.strip()
+ self.created_at = created_at
+ self.prediction_window_seconds = prediction_window_seconds
+ self.status = status
+ self.result: dict = {"string": "", "type": None, "gained": 0}
+
+ self.box_fillable = False
+ self.bet_confirmed = False
+ self.bet_placed = False
+ self.bet = Bet(outcomes, streamer.settings.bet)
+
+ def __repr__(self):
+ return f"EventPrediction(event_id={self.event_id}, streamer={self.streamer}, title={self.title})"
+
+ def __str__(self):
+ return (
+ f"EventPrediction: {self.streamer} - {self.title}"
+ if Settings.logger.less
+ else self.__repr__()
+ )
+
+ def elapsed(self, timestamp):
+ return float_round((timestamp - self.created_at).total_seconds())
+
+ def closing_bet_after(self, timestamp):
+ return float_round(self.prediction_window_seconds - self.elapsed(timestamp))
+
+ def print_recap(self) -> str:
+ return f"{self}\n\t\t{self.bet}\n\t\tResult: {self.result['string']}"
+
+ def parse_result(self, result) -> dict:
+ result_type = result["type"]
+
+ points = {}
+ points["placed"] = (
+ self.bet.decision["amount"] if result_type != "REFUND" else 0
+ )
+ points["won"] = (
+ result["points_won"]
+ if result["points_won"] or result_type == "REFUND"
+ else 0
+ )
+ points["gained"] = (
+ points["won"] - points["placed"] if result_type != "REFUND" else 0
+ )
+ points["prefix"] = "+" if points["gained"] >= 0 else ""
+
+ action = (
+ "Lost"
+ if result_type == "LOSE"
+ else ("Refunded" if result_type == "REFUND" else "Gained")
+ )
+
+ self.result = {
+ "string": f"{result_type}, {action}: {points['prefix']}{_millify(points['gained'])}",
+ "type": result_type,
+ "gained": points["gained"],
+ }
+
+ return points
diff --git a/TwitchChannelPointsMiner/classes/entities/Message.py b/TwitchChannelPointsMiner/classes/entities/Message.py
new file mode 100644
index 0000000..cb3330a
--- /dev/null
+++ b/TwitchChannelPointsMiner/classes/entities/Message.py
@@ -0,0 +1,69 @@
+import json
+
+from TwitchChannelPointsMiner.utils import server_time
+
+
+class Message(object):
+ __slots__ = [
+ "topic",
+ "topic_user",
+ "message",
+ "type",
+ "data",
+ "timestamp",
+ "channel_id",
+ "identifier",
+ ]
+
+ def __init__(self, data):
+ self.topic, self.topic_user = data["topic"].split(".")
+
+ self.message = json.loads(data["message"])
+ self.type = self.message["type"]
+
+ self.data = self.message["data"] if "data" in self.message else None
+
+ self.timestamp = self.__get_timestamp()
+ self.channel_id = self.__get_channel_id()
+
+ self.identifier = f"{self.type}.{self.topic}.{self.channel_id}"
+
+ def __repr__(self):
+ return f"{self.message}"
+
+ def __str__(self):
+ return f"{self.message}"
+
+ def __get_timestamp(self):
+ return (
+ server_time(self.message)
+ if self.data is None
+ else (
+ self.data["timestamp"]
+ if "timestamp" in self.data
+ else server_time(self.data)
+ )
+ )
+
+ def __get_channel_id(self):
+ return (
+ self.topic_user
+ if self.data is None
+ else (
+ self.data["prediction"]["channel_id"]
+ if "prediction" in self.data
+ else (
+ self.data["claim"]["channel_id"]
+ if "claim" in self.data
+ else (
+ self.data["channel_id"]
+ if "channel_id" in self.data
+ else (
+ self.data["balance"]["channel_id"]
+ if "balance" in self.data
+ else self.topic_user
+ )
+ )
+ )
+ )
+ )
diff --git a/TwitchChannelPointsMiner/classes/entities/PubsubTopic.py b/TwitchChannelPointsMiner/classes/entities/PubsubTopic.py
new file mode 100644
index 0000000..8972325
--- /dev/null
+++ b/TwitchChannelPointsMiner/classes/entities/PubsubTopic.py
@@ -0,0 +1,16 @@
+class PubsubTopic(object):
+ __slots__ = ["topic", "user_id", "streamer"]
+
+ def __init__(self, topic, user_id=None, streamer=None):
+ self.topic = topic
+ self.user_id = user_id
+ self.streamer = streamer
+
+ def is_user_topic(self):
+ return self.streamer is None
+
+ def __str__(self):
+ if self.is_user_topic():
+ return f"{self.topic}.{self.user_id}"
+ else:
+ return f"{self.topic}.{self.streamer.channel_id}"
diff --git a/TwitchChannelPointsMiner/classes/entities/Raid.py b/TwitchChannelPointsMiner/classes/entities/Raid.py
new file mode 100644
index 0000000..cd3a525
--- /dev/null
+++ b/TwitchChannelPointsMiner/classes/entities/Raid.py
@@ -0,0 +1,12 @@
+class Raid(object):
+ __slots__ = ["raid_id", "target_login"]
+
+ def __init__(self, raid_id, target_login):
+ self.raid_id = raid_id
+ self.target_login = target_login
+
+ def __eq__(self, other):
+ if isinstance(other, self.__class__):
+ return self.raid_id == other.raid_id
+ else:
+ return False
diff --git a/TwitchChannelPointsMiner/classes/entities/Stream.py b/TwitchChannelPointsMiner/classes/entities/Stream.py
new file mode 100644
index 0000000..50ffc79
--- /dev/null
+++ b/TwitchChannelPointsMiner/classes/entities/Stream.py
@@ -0,0 +1,107 @@
+import json
+import logging
+import time
+from base64 import b64encode
+
+from TwitchChannelPointsMiner.classes.Settings import Settings
+from TwitchChannelPointsMiner.constants import DROP_ID
+
+logger = logging.getLogger(__name__)
+
+
+class Stream(object):
+ __slots__ = [
+ "broadcast_id",
+ "title",
+ "game",
+ "tags",
+ "drops_tags",
+ "campaigns",
+ "campaigns_ids",
+ "viewers_count",
+ "spade_url",
+ "payload",
+ "watch_streak_missing",
+ "minute_watched",
+ "__last_update",
+ "__minute_watched_timestamp",
+ ]
+
+ def __init__(self):
+ self.broadcast_id = None
+
+ self.title = None
+ self.game = {}
+ self.tags = []
+
+ self.drops_tags = False
+ self.campaigns = []
+ self.campaigns_ids = []
+
+ self.viewers_count = 0
+ self.__last_update = 0
+
+ self.spade_url = None
+ self.payload = None
+
+ self.init_watch_streak()
+
+ def encode_payload(self) -> dict:
+ json_event = json.dumps(self.payload, separators=(",", ":"))
+ return {"data": (b64encode(json_event.encode("utf-8"))).decode("utf-8")}
+
+ def update(self, broadcast_id, title, game, tags, viewers_count):
+ self.broadcast_id = broadcast_id
+ self.title = title.strip()
+ self.game = game
+ # #343 temporary workaround
+ self.tags = tags or []
+ # ------------------------
+ self.viewers_count = viewers_count
+
+ self.drops_tags = (
+ DROP_ID in [tag["id"] for tag in self.tags] and self.game != {}
+ )
+ self.__last_update = time.time()
+
+ logger.debug(f"Update: {self}")
+
+ def __repr__(self):
+ return f"Stream(title={self.title}, game={self.__str_game()}, tags={self.__str_tags()})"
+
+ def __str__(self):
+ return f"{self.title}" if Settings.logger.less else self.__repr__()
+
+ def __str_tags(self):
+ return (
+ None
+ if self.tags == []
+ else ", ".join([tag["localizedName"] for tag in self.tags])
+ )
+
+ def __str_game(self):
+ return None if self.game in [{}, None] else self.game["displayName"]
+
+ def game_name(self):
+ return None if self.game in [{}, None] else self.game["name"]
+
+ def game_id(self):
+ return None if self.game in [{}, None] else self.game["id"]
+
+ def update_required(self):
+ return self.__last_update == 0 or self.update_elapsed() >= 120
+
+ def update_elapsed(self):
+ return 0 if self.__last_update == 0 else (time.time() - self.__last_update)
+
+ def init_watch_streak(self):
+ self.watch_streak_missing = True
+ self.minute_watched = 0
+ self.__minute_watched_timestamp = 0
+
+ def update_minute_watched(self):
+ if self.__minute_watched_timestamp != 0:
+ self.minute_watched += round(
+ (time.time() - self.__minute_watched_timestamp) / 60, 5
+ )
+ self.__minute_watched_timestamp = time.time()
diff --git a/TwitchChannelPointsMiner/classes/entities/Streamer.py b/TwitchChannelPointsMiner/classes/entities/Streamer.py
new file mode 100644
index 0000000..39e269a
--- /dev/null
+++ b/TwitchChannelPointsMiner/classes/entities/Streamer.py
@@ -0,0 +1,284 @@
+import json
+import logging
+import os
+import time
+from datetime import datetime
+from threading import Lock
+
+from TwitchChannelPointsMiner.classes.Chat import ChatPresence, ThreadChat
+from TwitchChannelPointsMiner.classes.entities.Bet import BetSettings, DelayMode
+from TwitchChannelPointsMiner.classes.entities.Stream import Stream
+from TwitchChannelPointsMiner.classes.Settings import Events, Settings
+from TwitchChannelPointsMiner.constants import URL
+from TwitchChannelPointsMiner.utils import _millify
+
+logger = logging.getLogger(__name__)
+
+
+class StreamerSettings(object):
+ __slots__ = [
+ "make_predictions",
+ "follow_raid",
+ "claim_drops",
+ "claim_moments",
+ "watch_streak",
+ "bet",
+ "chat",
+ ]
+
+ def __init__(
+ self,
+ make_predictions: bool = None,
+ follow_raid: bool = None,
+ claim_drops: bool = None,
+ claim_moments: bool = None,
+ watch_streak: bool = None,
+ bet: BetSettings = None,
+ chat: ChatPresence = None,
+ ):
+ self.make_predictions = make_predictions
+ self.follow_raid = follow_raid
+ self.claim_drops = claim_drops
+ self.claim_moments = claim_moments
+ self.watch_streak = watch_streak
+ self.bet = bet
+ self.chat = chat
+
+ def default(self):
+ for name in [
+ "make_predictions",
+ "follow_raid",
+ "claim_drops",
+ "claim_moments",
+ "watch_streak",
+ ]:
+ if getattr(self, name) is None:
+ setattr(self, name, True)
+ if self.bet is None:
+ self.bet = BetSettings()
+ if self.chat is None:
+ self.chat = ChatPresence.ONLINE
+
+ def __repr__(self):
+ return f"BetSettings(make_predictions={self.make_predictions}, follow_raid={self.follow_raid}, claim_drops={self.claim_drops}, claim_moments={self.claim_moments}, watch_streak={self.watch_streak}, bet={self.bet}, chat={self.chat})"
+
+
+class Streamer(object):
+ __slots__ = [
+ "username",
+ "channel_id",
+ "settings",
+ "is_online",
+ "stream_up",
+ "online_at",
+ "offline_at",
+ "channel_points",
+ "minute_watched_requests",
+ "viewer_is_mod",
+ "activeMultipliers",
+ "irc_chat",
+ "stream",
+ "raid",
+ "history",
+ "streamer_url",
+ "mutex",
+ ]
+
+ def __init__(self, username, settings=None):
+ self.username: str = username.lower().strip()
+ self.channel_id: str = ""
+ self.settings = settings
+ self.is_online = False
+ self.stream_up = 0
+ self.online_at = 0
+ self.offline_at = 0
+ self.channel_points = 0
+ self.minute_watched_requests = None
+ self.viewer_is_mod = False
+ self.activeMultipliers = None
+ self.irc_chat = None
+
+ self.stream = Stream()
+
+ self.raid = None
+ self.history = {}
+
+ self.streamer_url = f"{URL}/{self.username}"
+
+ self.mutex = Lock()
+
+ def __repr__(self):
+ return f"Streamer(username={self.username}, channel_id={self.channel_id}, channel_points={_millify(self.channel_points)})"
+
+ def __str__(self):
+ return (
+ f"{self.username} ({_millify(self.channel_points)} points)"
+ if Settings.logger.less
+ else self.__repr__()
+ )
+
+ def set_offline(self):
+ if self.is_online is True:
+ self.offline_at = time.time()
+ self.is_online = False
+
+ self.toggle_chat()
+
+ logger.info(
+ f"{self} is Offline!",
+ extra={
+ "emoji": ":sleeping:",
+ "event": Events.STREAMER_OFFLINE,
+ },
+ )
+
+ def set_online(self):
+ if self.is_online is False:
+ self.online_at = time.time()
+ self.is_online = True
+ self.stream.init_watch_streak()
+
+ self.toggle_chat()
+
+ logger.info(
+ f"{self} is Online!",
+ extra={
+ "emoji": ":partying_face:",
+ "event": Events.STREAMER_ONLINE,
+ },
+ )
+
+ def print_history(self):
+ return ", ".join(
+ [
+ f"{key}({self.history[key]['counter']} times, {_millify(self.history[key]['amount'])} gained)"
+ for key in sorted(self.history)
+ if self.history[key]["counter"] != 0
+ ]
+ )
+
+ def update_history(self, reason_code, earned, counter=1):
+ if reason_code not in self.history:
+ self.history[reason_code] = {"counter": 0, "amount": 0}
+ self.history[reason_code]["counter"] += counter
+ self.history[reason_code]["amount"] += earned
+
+ if reason_code == "WATCH_STREAK":
+ self.stream.watch_streak_missing = False
+
+ def stream_up_elapsed(self):
+ return self.stream_up == 0 or ((time.time() - self.stream_up) > 120)
+
+ def drops_condition(self):
+ return (
+ self.settings.claim_drops is True
+ and self.is_online is True
+ # and self.stream.drops_tags is True
+ and self.stream.campaigns_ids != []
+ )
+
+ def viewer_has_points_multiplier(self):
+ return self.activeMultipliers is not None and len(self.activeMultipliers) > 0
+
+ def total_points_multiplier(self):
+ return (
+ sum(
+ map(
+ lambda x: x["factor"],
+ self.activeMultipliers,
+ ),
+ )
+ if self.activeMultipliers is not None
+ else 0
+ )
+
+ def get_prediction_window(self, prediction_window_seconds):
+ delay_mode = self.settings.bet.delay_mode
+ delay = self.settings.bet.delay
+ if delay_mode == DelayMode.FROM_START:
+ return min(delay, prediction_window_seconds)
+ elif delay_mode == DelayMode.FROM_END:
+ return max(prediction_window_seconds - delay, 0)
+ elif delay_mode == DelayMode.PERCENTAGE:
+ return prediction_window_seconds * delay
+ else:
+ return prediction_window_seconds
+
+ # === ANALYTICS === #
+ def persistent_annotations(self, event_type, event_text):
+ event_type = event_type.upper()
+ if event_type in ["WATCH_STREAK", "WIN", "PREDICTION_MADE", "LOSE"]:
+ primary_color = (
+ "#45c1ff" # blue #45c1ff yellow #ffe045 green #36b535 red #ff4545
+ if event_type == "WATCH_STREAK"
+ else ("#ffe045" if event_type == "PREDICTION_MADE" else ("#36b535" if event_type == "WIN" else "#ff4545"))
+ )
+ data = {
+ "borderColor": primary_color,
+ "label": {
+ "style": {"color": "#000", "background": primary_color},
+ "text": event_text,
+ },
+ }
+ self.__save_json("annotations", data)
+
+ def persistent_series(self, event_type="Watch"):
+ self.__save_json("series", event_type=event_type)
+
+ def __save_json(self, key, data={}, event_type="Watch"):
+ # https://stackoverflow.com/questions/4676195/why-do-i-need-to-multiply-unix-timestamps-by-1000-in-javascript
+ now = datetime.now().replace(microsecond=0)
+ data.update({"x": round(datetime.timestamp(now) * 1000)})
+
+ if key == "series":
+ data.update({"y": self.channel_points})
+ if event_type is not None:
+ data.update({"z": event_type.replace("_", " ").title()})
+
+ fname = os.path.join(Settings.analytics_path, f"{self.username}.json")
+ temp_fname = fname + '.temp' # Temporary file name
+
+ with self.mutex:
+ # Create and write to the temporary file
+ with open(temp_fname, "w") as temp_file:
+ json_data = json.load(
+ open(fname, "r")) if os.path.isfile(fname) else {}
+ if key not in json_data:
+ json_data[key] = []
+ json_data[key].append(data)
+ json.dump(json_data, temp_file, indent=4)
+
+ # Replace the original file with the temporary file
+ os.replace(temp_fname, fname)
+
+ def leave_chat(self):
+ if self.irc_chat is not None:
+ self.irc_chat.stop()
+
+ # Recreate a new thread to start again
+ # raise RuntimeError("threads can only be started once")
+ self.irc_chat = ThreadChat(
+ self.irc_chat.username,
+ self.irc_chat.token,
+ self.username,
+ )
+
+ def __join_chat(self):
+ if self.irc_chat is not None:
+ if self.irc_chat.is_alive() is False:
+ self.irc_chat.start()
+
+ def toggle_chat(self):
+ if self.settings.chat == ChatPresence.ALWAYS:
+ self.__join_chat()
+ elif self.settings.chat != ChatPresence.NEVER:
+ if self.is_online is True:
+ if self.settings.chat == ChatPresence.ONLINE:
+ self.__join_chat()
+ elif self.settings.chat == ChatPresence.OFFLINE:
+ self.leave_chat()
+ else:
+ if self.settings.chat == ChatPresence.ONLINE:
+ self.leave_chat()
+ elif self.settings.chat == ChatPresence.OFFLINE:
+ self.__join_chat()
diff --git a/TwitchChannelPointsMiner/classes/entities/__init__.py b/TwitchChannelPointsMiner/classes/entities/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/TwitchChannelPointsMiner/constants.py b/TwitchChannelPointsMiner/constants.py
new file mode 100644
index 0000000..93d7e14
--- /dev/null
+++ b/TwitchChannelPointsMiner/constants.py
@@ -0,0 +1,199 @@
+# Twitch endpoints
+URL = "https://www.twitch.tv"
+IRC = "irc.chat.twitch.tv"
+IRC_PORT = 6667
+WEBSOCKET = "wss://pubsub-edge.twitch.tv/v1"
+CLIENT_ID = "ue6666qo983tsx6so1t0vnawi233wa" # TV
+# CLIENT_ID = "kimne78kx3ncx6brgo4mv6wki5h1ko" # Browser
+# CLIENT_ID = "kd1unb4b3q4t58fwlpcbzcbnm76a8fp" # Android App
+# CLIENT_ID = "851cqzxpb9bqu9z6galo155du" # iOS App
+DROP_ID = "c2542d6d-cd10-4532-919b-3d19f30a768b"
+# CLIENT_VERSION = "32d439b2-bd5b-4e35-b82a-fae10b04da70" # Android App
+CLIENT_VERSION = "ef928475-9403-42f2-8a34-55784bd08e16" # Browser
+
+USER_AGENTS = {
+ "Windows": {
+ 'CHROME': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/108.0.0.0 Safari/537.36",
+ "FIREFOX": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:84.0) Gecko/20100101 Firefox/84.0",
+ },
+ "Linux": {
+ "CHROME": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.96 Safari/537.36",
+ "FIREFOX": "Mozilla/5.0 (X11; Linux x86_64; rv:85.0) Gecko/20100101 Firefox/85.0",
+ },
+ "Android": {
+ # "App": "Dalvik/2.1.0 (Linux; U; Android 7.1.2; SM-G975N Build/N2G48C) tv.twitch.android.app/13.4.1/1304010"
+ "App": "Dalvik/2.1.0 (Linux; U; Android 7.1.2; SM-G977N Build/LMY48Z) tv.twitch.android.app/14.3.2/1403020",
+ "TV": "Mozilla/5.0 (Linux; Android 7.1; Smart Box C1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/108.0.0.0 Safari/537.36"
+ }
+}
+
+BRANCH = "master"
+GITHUB_url = (
+ "https://raw.githubusercontent.com/rdavydov/Twitch-Channel-Points-Miner-v2/"
+ + BRANCH
+)
+
+
+class GQLOperations:
+ url = "https://gql.twitch.tv/gql"
+ integrity_url = "https://gql.twitch.tv/integrity"
+ WithIsStreamLiveQuery = {
+ "operationName": "WithIsStreamLiveQuery",
+ "extensions": {
+ "persistedQuery": {
+ "version": 1,
+ "sha256Hash": "04e46329a6786ff3a81c01c50bfa5d725902507a0deb83b0edbf7abe7a3716ea",
+ }
+ },
+ }
+ VideoPlayerStreamInfoOverlayChannel = {
+ "operationName": "VideoPlayerStreamInfoOverlayChannel",
+ "extensions": {
+ "persistedQuery": {
+ "version": 1,
+ "sha256Hash": "a5f2e34d626a9f4f5c0204f910bab2194948a9502089be558bb6e779a9e1b3d2",
+ }
+ },
+ }
+ ClaimCommunityPoints = {
+ "operationName": "ClaimCommunityPoints",
+ "extensions": {
+ "persistedQuery": {
+ "version": 1,
+ "sha256Hash": "46aaeebe02c99afdf4fc97c7c0cba964124bf6b0af229395f1f6d1feed05b3d0",
+ }
+ },
+ }
+ CommunityMomentCallout_Claim = {
+ "operationName": "CommunityMomentCallout_Claim",
+ "extensions": {
+ "persistedQuery": {
+ "version": 1,
+ "sha256Hash": "e2d67415aead910f7f9ceb45a77b750a1e1d9622c936d832328a0689e054db62",
+ }
+ },
+ }
+ DropsPage_ClaimDropRewards = {
+ "operationName": "DropsPage_ClaimDropRewards",
+ "extensions": {
+ "persistedQuery": {
+ "version": 1,
+ "sha256Hash": "a455deea71bdc9015b78eb49f4acfbce8baa7ccbedd28e549bb025bd0f751930",
+ }
+ },
+ }
+ ChannelPointsContext = {
+ "operationName": "ChannelPointsContext",
+ "extensions": {
+ "persistedQuery": {
+ "version": 1,
+ "sha256Hash": "1530a003a7d374b0380b79db0be0534f30ff46e61cffa2bc0e2468a909fbc024",
+ }
+ },
+ }
+ JoinRaid = {
+ "operationName": "JoinRaid",
+ "extensions": {
+ "persistedQuery": {
+ "version": 1,
+ "sha256Hash": "c6a332a86d1087fbbb1a8623aa01bd1313d2386e7c63be60fdb2d1901f01a4ae",
+ }
+ },
+ }
+ ModViewChannelQuery = {
+ "operationName": "ModViewChannelQuery",
+ "extensions": {
+ "persistedQuery": {
+ "version": 1,
+ "sha256Hash": "df5d55b6401389afb12d3017c9b2cf1237164220c8ef4ed754eae8188068a807",
+ }
+ },
+ }
+ Inventory = {
+ "operationName": "Inventory",
+ "variables": {"fetchRewardCampaigns": True},
+ # "variables": {},
+ "extensions": {
+ "persistedQuery": {
+ "version": 1,
+ "sha256Hash": "37fea486d6179047c41d0f549088a4c3a7dd60c05c70956a1490262f532dccd9",
+ }
+ },
+ }
+ MakePrediction = {
+ "operationName": "MakePrediction",
+ "extensions": {
+ "persistedQuery": {
+ "version": 1,
+ "sha256Hash": "b44682ecc88358817009f20e69d75081b1e58825bb40aa53d5dbadcc17c881d8",
+ }
+ },
+ }
+ ViewerDropsDashboard = {
+ "operationName": "ViewerDropsDashboard",
+ # "variables": {},
+ "variables": {"fetchRewardCampaigns": True},
+ "extensions": {
+ "persistedQuery": {
+ "version": 1,
+ "sha256Hash": "8d5d9b5e3f088f9d1ff39eb2caab11f7a4cf7a3353da9ce82b5778226ff37268",
+ }
+ },
+ }
+ DropCampaignDetails = {
+ "operationName": "DropCampaignDetails",
+ "extensions": {
+ "persistedQuery": {
+ "version": 1,
+ "sha256Hash": "f6396f5ffdde867a8f6f6da18286e4baf02e5b98d14689a69b5af320a4c7b7b8",
+ }
+ },
+ }
+ DropsHighlightService_AvailableDrops = {
+ "operationName": "DropsHighlightService_AvailableDrops",
+ "extensions": {
+ "persistedQuery": {
+ "version": 1,
+ "sha256Hash": "9a62a09bce5b53e26e64a671e530bc599cb6aab1e5ba3cbd5d85966d3940716f",
+ }
+ },
+ }
+ ReportMenuItem = { # Use for replace https://api.twitch.tv/helix/users?login={self.username}
+ "operationName": "ReportMenuItem",
+ "extensions": {
+ "persistedQuery": {
+ "version": 1,
+ "sha256Hash": "8f3628981255345ca5e5453dfd844efffb01d6413a9931498836e6268692a30c",
+ }
+ },
+ }
+ PersonalSections = (
+ {
+ "operationName": "PersonalSections",
+ "variables": {
+ "input": {
+ "sectionInputs": ["FOLLOWED_SECTION"],
+ "recommendationContext": {"platform": "web"},
+ },
+ "channelLogin": None,
+ "withChannelUser": False,
+ "creatorAnniversariesExperimentEnabled": False,
+ },
+ "extensions": {
+ "persistedQuery": {
+ "version": 1,
+ "sha256Hash": "9fbdfb00156f754c26bde81eb47436dee146655c92682328457037da1a48ed39",
+ }
+ },
+ },
+ )
+ ChannelFollows = {
+ "operationName": "ChannelFollows",
+ "variables": {"limit": 100, "order": "ASC"},
+ "extensions": {
+ "persistedQuery": {
+ "version": 1,
+ "sha256Hash": "eecf815273d3d949e5cf0085cc5084cd8a1b5b7b6f7990cf43cb0beadf546907",
+ }
+ },
+ }
diff --git a/TwitchChannelPointsMiner/logger.py b/TwitchChannelPointsMiner/logger.py
new file mode 100644
index 0000000..9101a20
--- /dev/null
+++ b/TwitchChannelPointsMiner/logger.py
@@ -0,0 +1,342 @@
+import logging
+import os
+import platform
+import queue
+import pytz
+import sys
+from datetime import datetime
+from logging.handlers import QueueHandler, QueueListener, TimedRotatingFileHandler
+from pathlib import Path
+
+import emoji
+from colorama import Fore, init
+
+from TwitchChannelPointsMiner.classes.Discord import Discord
+from TwitchChannelPointsMiner.classes.Webhook import Webhook
+from TwitchChannelPointsMiner.classes.Matrix import Matrix
+from TwitchChannelPointsMiner.classes.Settings import Events
+from TwitchChannelPointsMiner.classes.Telegram import Telegram
+from TwitchChannelPointsMiner.classes.Pushover import Pushover
+from TwitchChannelPointsMiner.utils import remove_emoji
+
+
+# Fore: BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE, RESET.
+class ColorPalette(object):
+ def __init__(self, **kwargs):
+ # Init with default values RESET for all and GREEN and RED only for WIN and LOSE bet
+ # Then set args from kwargs
+ for k in Events:
+ setattr(self, str(k), Fore.RESET)
+ setattr(self, "BET_WIN", Fore.GREEN)
+ setattr(self, "BET_LOSE", Fore.RED)
+
+ for k in kwargs:
+ if k.upper() in dir(self) and getattr(self, k.upper()) is not None:
+ if kwargs[k] in [
+ Fore.BLACK,
+ Fore.RED,
+ Fore.GREEN,
+ Fore.YELLOW,
+ Fore.BLUE,
+ Fore.MAGENTA,
+ Fore.CYAN,
+ Fore.WHITE,
+ Fore.RESET,
+ ]:
+ setattr(self, k.upper(), kwargs[k])
+ elif kwargs[k].upper() in [
+ "BLACK",
+ "RED",
+ "GREEN",
+ "YELLOW",
+ "BLUE",
+ "MAGENTA",
+ "CYAN",
+ "WHITE",
+ "RESET",
+ ]:
+ setattr(self, k.upper(), getattr(Fore, kwargs[k].upper()))
+
+ def get(self, key):
+ color = getattr(self, str(key)) if str(key) in dir(self) else None
+ return Fore.RESET if color is None else color
+
+
+class LoggerSettings:
+ __slots__ = [
+ "save",
+ "less",
+ "console_level",
+ "console_username",
+ "time_zone",
+ "file_level",
+ "emoji",
+ "colored",
+ "color_palette",
+ "auto_clear",
+ "telegram",
+ "discord",
+ "webhook",
+ "matrix",
+ "pushover",
+ "username"
+ ]
+
+ def __init__(
+ self,
+ save: bool = True,
+ less: bool = False,
+ console_level: int = logging.INFO,
+ console_username: bool = False,
+ time_zone: str or None = None,
+ file_level: int = logging.DEBUG,
+ emoji: bool = platform.system() != "Windows",
+ colored: bool = False,
+ color_palette: ColorPalette = ColorPalette(),
+ auto_clear: bool = True,
+ telegram: Telegram or None = None,
+ discord: Discord or None = None,
+ webhook: Webhook or None = None,
+ matrix: Matrix or None = None,
+ pushover: Pushover or None = None,
+ username: str or None = None
+ ):
+ self.save = save
+ self.less = less
+ self.console_level = console_level
+ self.console_username = console_username
+ self.time_zone = time_zone
+ self.file_level = file_level
+ self.emoji = emoji
+ self.colored = colored
+ self.color_palette = color_palette
+ self.auto_clear = auto_clear
+ self.telegram = telegram
+ self.discord = discord
+ self.webhook = webhook
+ self.matrix = matrix
+ self.pushover = pushover
+ self.username = username
+
+
+class FileFormatter(logging.Formatter):
+ def __init__(self, *, fmt, settings: LoggerSettings, datefmt=None):
+ self.settings = settings
+ self.timezone = None
+ if settings.time_zone:
+ try:
+ self.timezone = pytz.timezone(settings.time_zone)
+ logging.info(f"File logger time zone set to: {self.timezone}")
+ except pytz.UnknownTimeZoneError:
+ logging.error(
+ f"File logger: invalid time zone: {settings.time_zone}")
+ logging.Formatter.__init__(self, fmt=fmt, datefmt=datefmt)
+
+ def formatTime(self, record, datefmt=None):
+ if self.timezone:
+ dt = datetime.fromtimestamp(record.created, self.timezone)
+ else:
+ dt = datetime.fromtimestamp(record.created)
+ return dt.strftime(datefmt or self.default_time_format)
+
+
+class GlobalFormatter(logging.Formatter):
+ def __init__(self, *, fmt, settings: LoggerSettings, datefmt=None):
+ self.settings = settings
+ self.timezone = None
+ if settings.time_zone:
+ try:
+ self.timezone = pytz.timezone(settings.time_zone)
+ logging.info(
+ f"Console logger time zone set to: {self.timezone}")
+ except pytz.UnknownTimeZoneError:
+ logging.error(
+ f"Console logger: invalid time zone: {settings.time_zone}")
+ logging.Formatter.__init__(self, fmt=fmt, datefmt=datefmt)
+
+ def formatTime(self, record, datefmt=None):
+ if self.timezone:
+ dt = datetime.fromtimestamp(record.created, self.timezone)
+ else:
+ dt = datetime.fromtimestamp(record.created)
+ return dt.strftime(datefmt or self.default_time_format)
+
+ def format(self, record):
+ record.emoji_is_present = (
+ record.emoji_is_present if hasattr(
+ record, "emoji_is_present") else False
+ )
+ if (
+ hasattr(record, "emoji")
+ and self.settings.emoji is True
+ and record.emoji_is_present is False
+ ):
+ record.msg = emoji.emojize(
+ f"{record.emoji} {record.msg.strip()}", language="alias"
+ )
+ record.emoji_is_present = True
+
+ if self.settings.emoji is False:
+ if "\u2192" in record.msg:
+ record.msg = record.msg.replace("\u2192", "-->")
+
+ # With the update of Stream class, the Stream Title may contain emoji
+ # Full remove using a method from utils.
+ record.msg = remove_emoji(record.msg)
+
+ record.msg = self.settings.username + record.msg
+
+ if hasattr(record, "event"):
+ self.telegram(record)
+ self.discord(record)
+ self.webhook(record)
+ self.matrix(record)
+ self.pushover(record)
+
+ if self.settings.colored is True:
+ record.msg = (
+ f"{self.settings.color_palette.get(record.event)}{record.msg}"
+ )
+
+ return super().format(record)
+
+ def telegram(self, record):
+ skip_telegram = False if hasattr(
+ record, "skip_telegram") is False else True
+
+ if (
+ self.settings.telegram is not None
+ and skip_telegram is False
+ and self.settings.telegram.chat_id != 123456789
+ ):
+ self.settings.telegram.send(record.msg, record.event)
+
+ def discord(self, record):
+ skip_discord = False if hasattr(
+ record, "skip_discord") is False else True
+
+ if (
+ self.settings.discord is not None
+ and skip_discord is False
+ and self.settings.discord.webhook_api
+ != "https://discord.com/api/webhooks/0123456789/0a1B2c3D4e5F6g7H8i9J"
+ ):
+ self.settings.discord.send(record.msg, record.event)
+
+ def webhook(self, record):
+ skip_webhook = False if hasattr(
+ record, "skip_webhook") is False else True
+
+ if (
+ self.settings.webhook is not None
+ and skip_webhook is False
+ and self.settings.webhook.endpoint
+ != "https://example.com/webhook"
+ ):
+ self.settings.webhook.send(record.msg, record.event)
+
+ def matrix(self, record):
+ skip_matrix = False if hasattr(
+ record, "skip_matrix") is False else True
+
+ if (
+ self.settings.matrix is not None
+ and skip_matrix is False
+ and self.settings.matrix.room_id != "..."
+ and self.settings.matrix.access_token
+ ):
+ self.settings.matrix.send(record.msg, record.event)
+
+ def pushover(self, record):
+ skip_pushover = False if hasattr(
+ record, "skip_pushover") is False else True
+
+ if (
+ self.settings.pushover is not None
+ and skip_pushover is False
+ and self.settings.pushover.userkey != "YOUR-ACCOUNT-TOKEN"
+ and self.settings.pushover.token != "YOUR-APPLICATION-TOKEN"
+ ):
+ self.settings.pushover.send(record.msg, record.event)
+
+
+def configure_loggers(username, settings):
+ if settings.colored is True:
+ init(autoreset=True)
+
+ # Queue handler that will handle the logger queue
+ logger_queue = queue.Queue(-1)
+ queue_handler = QueueHandler(logger_queue)
+ root_logger = logging.getLogger()
+ root_logger.setLevel(logging.DEBUG)
+ # Add the queue handler to the root logger
+ # Send log messages to another thread through the queue
+ root_logger.addHandler(queue_handler)
+
+ # Adding a username to the format based on settings
+ console_username = "" if settings.console_username is False else f"[{username}] "
+
+ settings.username = console_username
+
+ console_handler = logging.StreamHandler(sys.stdout)
+ console_handler.setLevel(settings.console_level)
+ console_handler.setFormatter(
+ GlobalFormatter(
+ fmt=(
+ "%(asctime)s - %(levelname)s - [%(funcName)s]: %(message)s"
+ if settings.less is False
+ else "%(asctime)s - %(message)s"
+ ),
+ datefmt=(
+ "%d/%m/%y %H:%M:%S" if settings.less is False else "%d/%m %H:%M:%S"
+ ),
+ settings=settings,
+ )
+ )
+
+ if settings.save is True:
+ logs_path = os.path.join(Path().absolute(), "logs")
+ Path(logs_path).mkdir(parents=True, exist_ok=True)
+ if settings.auto_clear is True:
+ logs_file = os.path.join(
+ logs_path,
+ f"{username}.log",
+ )
+ file_handler = TimedRotatingFileHandler(
+ logs_file,
+ when="D",
+ interval=1,
+ backupCount=7,
+ encoding="utf-8",
+ delay=False,
+ )
+ else:
+ # Getting time zone from the console_handler's formatter since they are the same
+ tz = "" if console_handler.formatter.timezone is False else console_handler.formatter.timezone
+ logs_file = os.path.join(
+ logs_path,
+ f"{username}.{datetime.now(tz).strftime('%Y%m%d-%H%M%S')}.log",
+ )
+ file_handler = logging.FileHandler(logs_file, "w", "utf-8")
+
+ file_handler.setFormatter(
+ FileFormatter(
+ fmt="%(asctime)s - %(levelname)s - %(name)s - [%(funcName)s]: %(message)s",
+ datefmt="%d/%m/%y %H:%M:%S",
+ settings=settings
+ )
+ )
+ file_handler.setLevel(settings.file_level)
+
+ # Add logger handlers to the logger queue and start the process
+ queue_listener = QueueListener(
+ logger_queue, file_handler, console_handler, respect_handler_level=True
+ )
+ queue_listener.start()
+ return logs_file, queue_listener
+ else:
+ queue_listener = QueueListener(
+ logger_queue, console_handler, respect_handler_level=True
+ )
+ queue_listener.start()
+ return None, queue_listener
diff --git a/TwitchChannelPointsMiner/utils.py b/TwitchChannelPointsMiner/utils.py
new file mode 100644
index 0000000..79d7c70
--- /dev/null
+++ b/TwitchChannelPointsMiner/utils.py
@@ -0,0 +1,212 @@
+import platform
+import re
+import socket
+import time
+from copy import deepcopy
+from datetime import datetime, timezone
+from os import path
+from random import randrange
+
+import requests
+from millify import millify
+
+from TwitchChannelPointsMiner.constants import USER_AGENTS, GITHUB_url
+
+
+def _millify(input, precision=2):
+ return millify(input, precision)
+
+
+def get_streamer_index(streamers: list, channel_id) -> int:
+ try:
+ return next(
+ i for i, x in enumerate(streamers) if str(x.channel_id) == str(channel_id)
+ )
+ except StopIteration:
+ return -1
+
+
+def float_round(number, ndigits=2):
+ return round(float(number), ndigits)
+
+
+def server_time(message_data):
+ return (
+ datetime.fromtimestamp(
+ message_data["server_time"], timezone.utc).isoformat()
+ + "Z"
+ if message_data is not None and "server_time" in message_data
+ else datetime.fromtimestamp(time.time(), timezone.utc).isoformat() + "Z"
+ )
+
+
+# https://en.wikipedia.org/wiki/Cryptographic_nonce
+def create_nonce(length=30) -> str:
+ nonce = ""
+ for i in range(length):
+ char_index = randrange(0, 10 + 26 + 26)
+ if char_index < 10:
+ char = chr(ord("0") + char_index)
+ elif char_index < 10 + 26:
+ char = chr(ord("a") + char_index - 10)
+ else:
+ char = chr(ord("A") + char_index - 26 - 10)
+ nonce += char
+ return nonce
+
+# for mobile-token
+
+
+def get_user_agent(browser: str) -> str:
+ """try:
+ return USER_AGENTS[platform.system()][browser]
+ except KeyError:
+ # return USER_AGENTS["Linux"]["FIREFOX"]
+ # return USER_AGENTS["Windows"]["CHROME"]"""
+ return USER_AGENTS["Android"]["TV"]
+ # return USER_AGENTS["Android"]["App"]
+
+
+def remove_emoji(string: str) -> str:
+ emoji_pattern = re.compile(
+ "["
+ "\U0001F600-\U0001F64F" # emoticons
+ "\U0001F300-\U0001F5FF" # symbols & pictographs
+ "\U0001F680-\U0001F6FF" # transport & map symbols
+ "\U0001F1E0-\U0001F1FF" # flags (iOS)
+ "\U00002500-\U00002587" # chinese char
+ "\U00002589-\U00002BEF" # I need Unicode Character βββ (U+2588)
+ "\U00002702-\U000027B0"
+ "\U00002702-\U000027B0"
+ "\U000024C2-\U00002587"
+ "\U00002589-\U0001F251"
+ "\U0001f926-\U0001f937"
+ "\U00010000-\U0010ffff"
+ "\u2640-\u2642"
+ "\u2600-\u2B55"
+ "\u200d"
+ "\u23cf"
+ "\u23e9"
+ "\u231a"
+ "\ufe0f" # dingbats
+ "\u3030"
+ "\u231b"
+ "\u2328"
+ "\u23cf"
+ "\u23e9"
+ "\u23ea"
+ "\u23eb"
+ "\u23ec"
+ "\u23ed"
+ "\u23ee"
+ "\u23ef"
+ "\u23f0"
+ "\u23f1"
+ "\u23f2"
+ "\u23f3"
+ "]+",
+ flags=re.UNICODE,
+ )
+ return emoji_pattern.sub(r"", string)
+
+
+def at_least_one_value_in_settings_is(items, attr, value=True):
+ for item in items:
+ if getattr(item.settings, attr) == value:
+ return True
+ return False
+
+
+def copy_values_if_none(settings, defaults):
+ values = list(
+ filter(
+ lambda x: x.startswith("__") is False
+ and callable(getattr(settings, x)) is False,
+ dir(settings),
+ )
+ )
+
+ for value in values:
+ if getattr(settings, value) is None:
+ setattr(settings, value, getattr(defaults, value))
+ return settings
+
+
+def set_default_settings(settings, defaults):
+ # If no settings was provided use the default settings ...
+ # If settings was provided but maybe are only partial set
+ # Get the default values from Settings.streamer_settings
+ return (
+ deepcopy(defaults)
+ if settings is None
+ else copy_values_if_none(settings, defaults)
+ )
+
+
+'''def char_decision_as_index(char):
+ return 0 if char == "A" else 1'''
+
+
+def internet_connection_available(host="8.8.8.8", port=53, timeout=3):
+ try:
+ socket.setdefaulttimeout(timeout)
+ socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect((host, port))
+ return True
+ except socket.error:
+ return False
+
+
+def percentage(a, b):
+ return 0 if a == 0 else int((a / b) * 100)
+
+
+def create_chunks(lst, n):
+ return [lst[i: (i + n)] for i in range(0, len(lst), n)] # noqa: E203
+
+
+def download_file(name, fpath):
+ r = requests.get(
+ path.join(GITHUB_url, name),
+ headers={"User-Anget": get_user_agent("FIREFOX")},
+ stream=True,
+ )
+ if r.status_code == 200:
+ with open(fpath, "wb") as f:
+ for chunk in r.iter_content(chunk_size=1024):
+ if chunk:
+ f.write(chunk)
+ return True
+
+
+def read(fname):
+ return open(path.join(path.dirname(__file__), fname), encoding="utf-8").read()
+
+
+def init2dict(content):
+ return dict(re.findall(r"""__([a-z]+)__ = "([^"]+)""", content))
+
+
+def check_versions():
+ try:
+ current_version = init2dict(read("__init__.py"))
+ current_version = (
+ current_version["version"] if "version" in current_version else "0.0.0"
+ )
+ except Exception:
+ current_version = "0.0.0"
+ try:
+ r = requests.get(
+ "/".join(
+ [
+ s.strip("/")
+ for s in [GITHUB_url, "TwitchChannelPointsMiner", "__init__.py"]
+ ]
+ )
+ )
+ github_version = init2dict(r.text)
+ github_version = (
+ github_version["version"] if "version" in github_version else "0.0.0"
+ )
+ except Exception:
+ github_version = "0.0.0"
+ return current_version, github_version
diff --git a/assets/banner.png b/assets/banner.png
new file mode 100644
index 0000000000000000000000000000000000000000..7782fe00e2c62841235d7a2ea294c0ca6b744537
GIT binary patch
literal 105907
zcmeEt_gj-q^KU4Eihzm;3J9W7g;1n~P(?(6&_aSX`>m$BGVR5i7XbhO?b9cZ
zv;lx~q$YXLc`DLZGj=G0^mW5s>6ts!+1lO99Bu_ruylT5#qre1+{Q}V%G}b&wcSb@
z0HENq(|P9p45B7w;p`-6e%43O+sTDA8UT=%^L8<}aIkXccwuE@2b1C4LN{=7*jdVO
z>We{yATEklwsuc^;Z{&zbsY;|2MdrTr<^Q@w6_$AfRmNGIfu8CBg{?8TZZ!=x>BU}
z*<&Cl$3I=%9b`EFOzIg#lS9!NZp9%cC@x?jEGo$%0TL7z14)WJxX&RXBrFaT0s(~|
z2nY#F34x@9ggO3xI7!^#mex|*j~@T!MfxSfY3uIpA_W9`d3gzXi3&QyZGgfc5C|wF
z0u&JuAoUP%^MSdWdketa?)*#Pk(HYT+|I?_&KbsWM$!C*vxmD3CuydCL~wHX4_TPo
zUzkXM0lm##fWm@8XCnRE5d!&tcXe|5Pj5GOZL9x1-~TINHys}rE1
zJ7=t1q!i&+=I+jL9cO39e^J!5b#`}lvvqdiP*glaO@f0*-wtN!?B&M$&lm_q>M6|4
z-5h3N_4JVpCy9!not>rBWAR51L>~()h>I#I2@5Md1U-Hv_DJNBIOu_h!UNHVqW_M4
zjL=u|U#ffM?46Ka~CNBa#K3HUDF}q#yq=ek&Ns;^8FIb~kg>2aqM*dHP5}$9rsJ
zk~+v(KLAgduJc;kAF|gs(JQqyh_kBhjN?CV!7@&HskK%(LH#_XH^=tf*G$-lPskU@
z$TRL{)V_Y*d-b8~%K5LZ3>8*JFnF{jF1A9wVG*?4U3kA8ozHLQwSQc4IyIRH7WLYk
z=(7(brn{x^gj4>rDgmEmL6kv%n(_orz~859%w!yY9%{Co2ax^K43c^bIREG2pfI`o
zpVsHdD**C8O^I86#@_N5*`}VS0jWWxuBwuNn`Y;EOcv`
zhk{wd*5EAG0u0^*S3WN>)`N-{w5l1V8DU-`yFyOQp`L}?{AvQXW(8qFLw5GT`3j#s
zJ|MmFZeGmUANxIpOwB{}3=ZUoC$6D%i;*AoDI4ED{5yTs3M}vqCvFjb^ZvCMNbX=v
zJ5S=1k;q1sYf9`Q$%JxbDd%dBPAY>7B7O^xO{TYCa^x6swogWSKSK9
z5o>msM!}eZtphTRhM67e@jZ$?=_mw=Nx1-syhkp7CUfJ3=%?8+!1y^1mxOcRjd1i0
zXoFUH@@C#hq&$nnnECgLIi-icdc;}FnqQCKWZ4uxHgB$uZ9A|7=Et(VsBIqnpcXJR
znEJx?3ZM-T5p+fppnjg^NX)8#&CyO*WVS_NBJh@$M}1uzgpi74TB)u+h)kfp=``9#
z-_#ptvNvCzb4wf-I`7Y1qgH1+H2d8_t*%AqjtzBC-y>>{GjcPpN%|Cz)#2;QPcs(0
z^~GVRhRiE}>>eHh-71JR`8cltZ4fV?8no68>)jOBmHgQ(marxy)-vdb?N$9nQTMqh
zW40_V)UmDPR;2&=xWuC``PeIgHT$*HAXNaX0J;1blg0~Hit=D-FSuLjE;7c~%}4hY
zNBys#$u;Ujx_+lkgHOwS+fBWMKifaE)t@To-yGv`v5lfdYZucJ4Iy269In
zFUON^&L0Ar8Q6&!j1KhFaxFDS_Qje}$~{0N6&c4Fr@<(pORbB6;|E=#aN1S(+9%3e
zSTq?Xyv`FqEBtE~!cRS;&
z;v2VG7QiKQI{7oAdDImV-ELw&W0yh{B;)XVkD)-8)sC;f6NmdI3M1pJ?=ifUWG+;4
z$0yWG3>H8p11-baL*MawR8;VQDw-SW>NJNyF%Iz-0rD>Yk9kDi-Zw2|T5aYU=~fy56_$-o@XiJT8#yH54Qo|h}80)=rPHc25$
zA%)BT1T^xgJdCP_zTZ{4z)r}H#r|iLUB?hYZob>WD5}DFxE{Yh?ZWff!Or$gaN|?R
z`;IK_9V_um56Z_JvDQ%4uDIc(2)X<x$mrQ>7j565xC_L~mHkxe@*_PXd}
zJzBTP_G3bA)1c1O4boygi?TkeKQhmAgl6hZx(qyHr#ivss~_8-2TU{U^^@7A9Kxyu
zClT`*=HI(U3#E*b=7qjaO84ZbJzmw(Zht+z4(g_7YdidmbTDo+cu`^YN#(9!M~*hT
zgOlyZEoydNIpQ(Z
z8o@+ohObN7fFs?i@09og!=<(t3Y$`}09m(P>*26%{8!zCUa3Jr(U
z-T?^xaRY-l8nl1M7>Jgwh05t=OIkBg>_7;ZD^nF(L)Dn2M8oIyIVHhT`Ucw2{D{yNZ6_7M8&Fba|sVAS{%A5cI*Wm+ibW+M*|?2Cm{mJQ>-`ZTR=iR
z7TFurWBU^@W;xaMi;bwQD7
zrnWU0ab(chr#$k)_>rCd#R`5wIdgl9qTk8B6k*f}lK&dnx%%RzeLWL=sGQcd(D2qJ
z@*AGHQx^F@_{G;YCf-OS8p1kIlmkxo`WU~kW(qY{%Ahl5nr-3LuLS)L`OD`z9J=t;
zB{j(oY5}kG9R{6*agU)9x+X%4F@{|dI3Vk-=X3vJZ;d9t;utKLCIpsyc)njeryvWn
zXDc4bvlqv`K`KLViQncMF|`pCtT&&(xcygOHcVuRvR;uW#5npIWqp1otc{gLrOx2e
zxmyr%z{wZnwJ`F8kw3ybC+AtXShFi8vHr!uH0MRVke(M7|3Y`BuVQd8-Hkcu?fXBc
z5-tGv+K8%T66n-2L_sLd3%3Och}U=2sT&^Dp`#EY?Hy+}*+_XMAoTLJx$QVv+YxN+96yRN;?=vH$^^ro<^V1(a{03k2|S$0
z4CK@Ltq+TJYD4w~NI4N4GjJmBXOgc(7+FHX-&w!nSyHVzInuXV6hmZ9$e)tjORr7=
zDJocpnAoqGn$~IUz@gESml}U4{@KSF9x+pOh>Ndc5P06y$~C@4YO2;b*x3FL7&HxO
zkOR*#wOt#peWhG9FzE1q4ttVqtS1#c1U@CY)y(8OwA!Dy<>oEJ0yf3VM+V{0bP=3H
zj^{4ltyqaf6*vj5kiT#-vygKQ*wxK%$!21Ut9r`U~GSSz^Y_P#BCiYkbAimdTR)XYDJ&c+{%^wjFQVHD$u_|2oyB70ldtq8Q?
zgK~qR7KLy5-RG)W2R|dZAp&AJPAGJvRe||8Y-z)TDXQtGVCdD>=u2a#kM`5^Ztk@h
zf%mU3v18vm)N61vsN`N%@j*!&p^RwH1#4Q#Y1SKSAvfCMM^zHm8FoB}j+ry#@}w%~
z49}(_|5sDSwL(wC3%mk%OON*aI45yfqoI)r5k0SAPjb57^@x!{PpFX&8&|F9`AlqQ
zpb)aO+T3>`uZ7A(Ji*O`n{N3VGRyF0zY6!&9jm83(1JrBZMR^lq;K|5a@Fn)Jv+89
zcIeLO!d!LzYSXO1ax>IC%OCkA%~%UfjU5wv)|;~_TQpu}V&qFzRw|ckTvDjh6yfl$
zq&*8meiuN=b^Z_bjcFkY#Et0F&pmb1L#c9yJI^qb6Vrl5TGu8GcytG2)K`k)fe!Va
zi#Hl*7Vq=xo=6s!PWr9BX7a*XeR?Z-=^c1)!VoiA#H2Lj@g}VPL&GLM&l>BAwR#+1
zy1aZK%lGn#M#*VpG{uC@rSVGIU5-v*FReT;DtD)n$j@fHRDcn(WnQLL>BXE;g
za4yiaX;4UE`nKTq6=y40v|7?}C0F2nT5PR@Gb+*ydwBaGL&@L5I
zvy&g|yU56tpCO#7UE-*}SZ=pCq?PmCpIuK;t}SZb7oIf67`v<8BFOBhSYJn8FQXWc
zsYpi*9m)8y>aI;+Je}t}<(piwxi-|X7=g>zUFJ3T`IJC_cXZL@tp3DZEMx{0m9x%K-@e)qRl-B(WzjvoBjMVcXGW#Ys7Jg>4h#Y(
zp8?hQl5M%WA*c0S955~SY0-OEfGztJ)vJbvtt*5z6!l!^KHbhn57P1>uzKM>1m>J*
zTG5P%?xEGEt3c`Ntgx4N%kd90U5g
z=x{JWJ(U&r+gfRyUH|hF^FaCUzuOAOQu&8i~+D_=8}NX17(v`j5c$g&OYA#H`_k-%In-B
zu;1P+(BFs338$HzmH45)%0N1+q}JTUK}2#1qB0*par*)-xoUT*n+V`PQx>(dWFfr>C7YCBz=owIEx6fZ|!`U9cM%^IRk)xJ3%=f73*`Q8bgfy@#|&hN@LV=8OPMY&@ddC)T~`|
zDYz`K`F9l-sL#{V9PN~1OHu2sJEQYZG}m*bg-LK2GHm+g-6#{w`AzeR(Rws6ZQbP!
zVaoHMZa16;SD|l)tf;VVpRl&|m_IDdU7c9WT@f2L&F>+O7aeVxO%}OJ@eaFsFOsp2
zrJJ$-lXz~F9JbH5+oCZZo*XbI{S_llceeJ-8md$~)e|#m;?uuGbW7=Ct<~RBRFi_`
z?aMXfo!Q}WDbhxjwHPWV%;sD0K3}Z-s^!C?F50y<+o7ie;PUOPReL)-ofgLijRTv}
z$oe!gS}QMc$uflnuxUW7DT2Uv7#kj5-GpVYy<09Ou@>i5s{-Y&TvMGU`be`L^4w{(
zz2A`6=CY~yyy;I+3t)YA?LAJWY!ZRIB6q+gEtdGrd^{;EZ7|h=JtFLUdV{0+1k$U!
zKYIS~fKL2@WMPr`Nq)SsPHLXe=3GSn^mw>F;rzVj*JnKM-MQCR*(VqrM+KYoO{%qv
z5iNYN5!nG!6!_Vye99qIW95{0c7+FiwIR?a{$}ZFCJ5%aDTh!?gD?5yd@~TMtV~{J
z-M&Q`dDVjOPimj~aABqO{APoji-O60tB%A3iAs13XymR|=t8k}8Q9HLMwKx(vAT~0b
ztAflDI)6>cg75N#wLNBbisutUaUs|Ve(XBhF1g3AKbX${1B=%^(FB^6KkO=k>=Lfj
zOc?8{R~If+6z$;kTJS@%NL`@c>|qc43xDsUwlyCcG;xf}i*J17(dF~+$jRl;Y`pOe
zdBd0Xft>Xo*_|6jwM*Gab=%jhZf+#8M5=_mx)>bHw*Yq)Vm=_gz38yvJ+7a06pq}?
zpm9<*$pH`K0E)y*yfHLHzfH>e?8(pJ9+BgJ7&kw-<8
z=C$%~F;~kXqDNw15&F-6aH5a{ZyY3YGm4Ax*{;6{V{R*s@G%8GbFJa3tmHW#Dfuqo
zMPZtk`HVw<43ZSjPL(oW47$_C8gw@8APPRF2t)VZ)l#@x#t!;Rd0uUFw0pQbuVr=2
z#5V(tB0u@$bmQW*RPf+%o#^JxvPqb!2ro$;BZ3tc4GGPnIaAN=!;u3`LWUDD5THSc
z?S3*}{njXmm{TF)pl*Y<)_@J2@M4ns#h%)yXK7!)wf`W@GkccS$kQHU(PMi!SDggP
zp8u%FsGeXErFM7DDLzfwD3#5L55oqa>*8KXu+Y2DQ@(mbX-PcxMk(fpx1V^aNszd_
z^n48HXfc(ZZ#_G5Awfn~uLs03RpF_IN|SI0kSqUni!PcK&BfZ^XzcF}i8)l{bSJVD
zmpLR4ItIsK$baGRcJme=Y>#u^`}$+p_kP`3f6!ZML{V3_RH5Ya_Vra{O!a%O`xM_X
z74VjRV6m}JsH{6ySNpWlX#R40QHVoa^2g~3PVUsWj@V{j0N8Bih}H<#-EatJCGSq`
z%SIe@O4v>n^76$i(~&vl#j4yJ>`#bge=QX-`xIVVN41m~;O^*s^8ikLw20Z%8yWo4
z@y@`NBaaYj0#38J=V2N++4)1z(xKt(!806)BmzO)?yQ?MZLG;xE#+NRC#
z;4ViOyY$nu$Y8A2qH@gWX1SeFW&L(h`WnMlKoQ;Vuht)+%6@LOQEc@E8Ep^Rn%38~
zUUZhiH+BTBUlS8=PnU)IY5yi9CVQ5NvDqaU3VCc@kcs~#T-n?@pE19b{zxQyslu}>
zLBgyk8tp!n&s1Q%>oY)>8T98c=`!fJ9G5!W9STy^UM&-W<{}-I98uyKC0wMKaX7WQ%8Ukg6X
z(({U4NhKF*i0x%KT48F)+?*ma@j9&b#XF#clS8Oen}uT4Rx)rb9$R-bkiyB4qAjhC
zkMYt~rW#HYdKhx)Ud`vqh84DvM2vC+^u<;+1*XKoUQZIC#R{lFqhm)(AWz*2JKUB&
zxDUpv--~sc6VuK_9j4Th+q-FQq1*J#jDJO+
zzZ)K$n&Hysi4rR>Yg3fzX_#y2pkEj)wJ5k{;P&D7)IN*M&J1B}XQQ>s=zbcw7yEdn
z*KO{0R>(swrWCYsWi1}3@<0Nqy`vlPAq`%;u=cgri@iMw6EVR=;57CFZd
z?c98p{KaG=-LwXtTQ>_iyaSo712;w6HZ(&8>F1N%)^zvzhZ=isb+*RorTQP;FoaKq
zND-PM!~(jF%hlH2P&B~%Bs4npksp_f$XNg0dj~Nzg_?^4F76IRf>^dax{@+)V6U%)
z6xV7^j#sQk`E3qTG@PcU`OgG0`l06!w>d#QX!dbC&w&CnWF>JmIi|yMjuh1!Fb(58
z^(&<91-Ay0JTyA+6yP+8`VLfw!!mXq(}OIkRtOY=f4+WHpQDzXFVTQCsacn3_`L*S
ziG89YXNV3PDYC{7g<1$eXon83DNhJ#T5gV~Plz?K25Hq!D+^J2ry{2#$JU>*962#<
zeWj^YOBdB}i%-cJ=r%*P*rXS<57^{q@Mfx1kI@?bVmKgcVocMPI&3DVtSarN@->D*{bQv~7&tk=F&}
zj0D)&cK9vPLKM*Iz=v`(kN#FSynCw1|HBaJ{#e1Y!c+NW%@?xr*Wy3i?5&hsho{Nn
zW+vG_fMZi7ilrz=1)t3NcwTw`#C+mC^DX!8>VqqFYs#yRii)+>CLUF8+6C%HLaKp_
z2Ze^oPq_BN!qNv@ohCc7=p5319IFK^zA^pK_<=(1SZsVG>zpd4cZyL8AEIx+_S9`?
z;`eoDeZg3rUsn#uEH3
zy>DJ8*|NLKTYdXQMqMqav80<1s_Q_e?FT`QR6NoCKtfJ|GC8##lF$=3oKJ5Q%EX
z@LgAJTMkMHnEE~G*9dDdzotfbOP6^JH2s)qtj8RZ^uQ{~`d4zX6_e?gY$oF@1Dfl6
z<1u2xVXk={y{6AM{>6<&-HEEvG*(A_}>OURzOm*D{9nE?*
z2uRp%=4L{o4U_Xtf~Y6R^v&y)H{)pS@hTmbA~g$pA->I>rk^{~h9W-%^9+`?JE>G{
zW=u6j77(FOFSd@>?1#5+EjcJQR7b?ywnP;7&AD|$+sal|vajy)58ZA&Bzq|LT>4LP
z8SoB_dU&|h&{Koy0NFukeHYt9z3zkt(RxNIfVCw%=znTdI<)O$;Kmv)+6GMN<1PG-cv}UF
zETI2(*H}GTV_853MAu6v@uQR`>~))6-v`2I^+lucq%!{<>7wZ>oF;i=P84dJf6?1^
zF9KHeRHAmhZY@DdDa2Uo5FQdulgA(!b|yJMh$uJl_#%e<_S9Vuv_N><(WbzqT;;&(
znvUYsHjT9FU8xzWJu(8D3*ad>|4hzs_urom;q*?o8kc4T?=QKLBHSUcERo)KeA@qE
zms9kd58;c@LKgYjdA6W4@RsG&o;xkZVXM4JF>)orPoBj4-WR+#&t7DzrsL({kecGK+l;o(6
zRDJ4Jfid4|(r|;))^Qu6uP~a4S1y73gyP@Asn44uK4izKh2p+!d?2K_ZupH+y2jw*
zI56rUI~-NL$J&)7Zonw_MzK~UyH;?UaLX^?nvz2%PZ@vaFEOz$Qm}Wsx%jm4G*=>U
zC9~s!I5MHrtqI@}`6r;#{2;9{8&3n%5I?{{maQ3G^5
zp&W1Ke^?W?S#Qx^I}qn#a+bIZIotR%ZGoMCp
zbA}pZ&QfBMQ~s!q%Fx#yE|0|kiAYxczU|EEl&lTMmlvrQ^i7ky?*(hlDK!&(U2|?v4;IRg0usL9Lq_p
za*ftq?u&~D$AZM!Bm@-R+4Hk&lDF8qIHiNG)b**K6d-lXx5+fQX^->VdwxdiI3Bu7
znY$w(J`}(6S2KmF=`pX4cWB0g9hHVfZC;Su_g6*EwTsU6fm>^4f&t(8oe#S5Szv}H
zKk0KQ0*u|I#l)oRo}i$Pxf1bvaVI%z-G4?>-SAH2cD;u|I`4a?gCi>J=8pbiNfj|Jd-n`ziWwRND3I{i>`{e~aj?T71Km4?7WR^2c;R{EX<
zH(a9PNbWptDrz)+DCOMzntwTh*Li+I5?<(g9$>MLMUrmT!}CoL72`djfw7s@Q0{PP
zxu{W-CtlK1+TII=KTtg#yt4PLmQ|v=V{b02ng$;(&gi30CkCJ3Es0$!=IZT}X76mS
zUn=d?Fy{U||8pn6XA)fIbm(!)n3YU5sUi%X$PqsQzw^6wQ`-qmnVUD}iD-tfo?JyLg|4
z`sJF_P(oKiOw%WAbbXfgF!i;2HYo`pG86rFOmeJqBGGnhVi8{z$Sd0M;qs6vb*Eq%
zydJrj9~!h2)Y2SG_Ktxv=*$R$7%~)wK+BG8W?}iMHnA4FPt-Y8_D}B?Ovo%nb;suv
zYM-1|`EJH}o_?3M*Kh)B_$^(Oo*~@J{%qD>zlD@O#0(uvr9l@*AnUhxG_zWY-rP
z-WUU2$mM)4u?y$L%1A~)1T;O;8p9wmoybe?0w{w>Yym;@?cKsQU}9X7{Sc4WF$M_#
zr0}h(h;-q3y3Hx}D@MwRp^o39IL>q01hYRbx*;)9kla(`K_1zAY0G-?`tP6o69pW8
zsOm{q){BnlQ%^_T44<)iLa92s2iLhezxOjLjpdf3nIf9Al$M=S-~LpMGm;3{>VLG^PS-`W!19hryZ79*!^@P<6kp#S1@t)SCYag^
zs&wvnwIfHCD)Pp5Bu?r&Sx(AdZ6r#eOoGgrdkB@szv77^PqO$-v%PEO#<$og$1c01
zA!$hkALX%7HfFznnfLd8)nYjzWrsF|rsp5nrQPA)W9S-3R2kE9>*^m1c&Vbd3X3+u
z*NCx<4@~bjEE+1kMXfC^i4-3MU9Fpro8MxVrF;-y9s3^s%2dhJ$m4dDMc@OIQVq1u
zQ*jJyLdCYZgd}Vd(foFA;wuJ}W1ZyNG-#-+f0;@sl*mDKhW*U5uP=Mq6Kj6yp!3-f
zvpc@qKc7;z&n@jco)X*!YkU34yPpu~Rb%T70k#r+ytXxuVk0YgRMe0tN>+LbPsRtv
z&jnATy{opi(U=xDvYKpV`rdnz9jwZ4CnwjE>-P3l6dj5gLM;~E2%49xs`iu2p7S*u
z0hn3-LXg=N8UPf*U4=q*Fb6b|c=bmx&L=Syl{E?_3W;{w&rqm+^m3*PkB`%?5lWPW
z`nYJx0YyVSfBSpjz~az2U8mNU(>D04z?PRYB@2@9a3!VK&x7FQU*O5lpKe+dANTe?
z_v6^8T3Lq(^u1x=C6_e)mvIY~_Hu%6Vt&M<5&55TuITxnv{X=dbfk84_Gb>=qjaNH
ziCxBfIZ5e8`KgFz;qljrR^@aJ=1T{n@!(imk|)HJC;Tk&l|p@z0gFklW>H5tK`W$Q
z%IW3ezF|IzpD%Nh8QbhxSL>R=kflgc2A!cD)F(7P*a=B3UVFtZ4hUC2fmLT*HNduP
zXg?Fuo!|#t(&x=>;YZ(+s~f2Mz*(Zl1eO9uzF
z>|Wa;f#v&-A9cQMd#mCX?98Tx3AN6zQTksMW@M|auJP@TF>=}HhFzdPmgfa6axXG&
zMKs8XYqUlg{p5X9dor`9wB9^>
zS~asWare6Ctm)vuB1cc
z$E#7A^#@$wpl{5`j(ZB8&soURMjg9~oQzd4cNMz1;TBVyzKz=LIFD$Q7T2V|+kJ
z;InRzF27&2gaz+Yhx+^s*NFVdpmjBZsYOxnwY)o|G%z5DK=l0hX+TmHw$95+18AIb
zTycMi%jPINvf_I@^U{!%saO>>$bks;=Ph>l(STH+B
z$|squZKe3&%rZ3zMhe>M&1%&}5sO)1ceN!TIpCfCfKKZA*8TS_)x^ioE#!*(lR+!n
zG+?IZu2AUl62jz0&D-6yh)j;v&_MC+t1X&=29Zp+R%BTbxtwh$vb5rDseABA$y%`e
zl5Fk7XO;Ju@g~a}%QOuc!lw#gq=}?;IC5Qw?dGNDWvBT)w@JB{FQ-n8{?F|aOlgTC
z(aOug7XhEsWJS}=*KR`hPw7TKTT6G*Ujdol%YNk6*Xlz(A=GCG8$U{=VOXqx44RT1
zuQ5+>`IYhJs#C}sCug&8gIuP9&ta;HyBx1GUcI_n0kk-FyVQJU$|Iq{dGdv6_HPsf
zKJit(M+|H|rVjN)A|oK#-p6E_VIWe4VnW_VigKbl)TAfXQGFEzg*7+A3p>dc@l29eHS7DBpN+m
z-3X^OyQxo&eX|HW1wcq^@ReL8P6sq<_Zk5RnjPV~f^I4{S;MW<9A_Y+I~>6dbf=-rM<0hs~kBgo1o-y=I^
zb7(}9HB_Kz=-af!106J3o#Kg%xp?@Tpp`lsfyky^S=JSWG6-9rlgk+*$doo
zB3NVn^q}Pd>-v#4w)NA>17>!1QiP*Aid+(HkLt?A$ZhvANc8z#lf7~n%>T(Ipf=ge
z72xtKHEXSjMx65gaA!39%g+l8T|Sll@sU>{i$6Q{j1iw*4_MnzrV97CqFcJ6qZggJ
z)UR#W^+lvS;*roLy}ETN8Uu1eyl?jEM|WJ3cG2GYB{9`kzs`<|>t+p^4vR|hS0`p6
zDLiTcKVOb4J2pQw2msX5>pRy
zX9ZoZLcDY~37t^MQnDFlV*kgu_PidybPaUmxrCaeTf@Gt77YeU($f
z8mk(;tavADT<=?dFCLzEtiW9cr)s!ocf||rC)zt+SI0JGnAdQa6Ml;p@%(@Qeg#@#n?zhH|?p`#J
z8;S+mSnn@!nVdYyz!l~`zO=w;7Fgr(d7|pMYQ@%6NJhM%KE0}cHYboJCh7VnbBg;Y
zLF-$#Efc|;7(jWEq3BSBuP(FK^Sh|JZ8Gl%>%n{zz>kTrW!@(e*MD4o<|had^B`44
zO*J)UocV#lkPGa@d&!GsBQwq@R-loMMz@{&)zS2;$2zD%jb;5&yE
zDhgFU2DMYac8eXCv2s80po_c7I0pG1h_0V!z
z5wW}{hHb~S9LdW{H`9Ao_tttkO`cA3YREkit?{74&VZxRr#|J8BAZ;F;d$?A>glc<
z^Z;e2qIh~h`Z6;%FYNMhf13Z@|9p;JF*u{Y)+gYXSR^a~sq_cV2*r8z0TUCtf;C$6whHOL7XRtH!d=_KRu0O!^eiI##VE3KEX;-qT2x4#=TJogxr)yMls+{T-T
zC?+{gbT_Sb^lo-&wUMF(qtdD!fu;MXM?-AuWVK%hMi}gMxWC9<DoBmDI4bS(p5=A#_m1ZE$C{H&MRVr_H?znu0Btr>`@znZeV!AKmn
zyuVnjVZ|`Trj^C;^?rb5SU1&Ts1KMe1jP_DAS3F0WP%K6q0;qbq(4`IC8Ij@rfb-&
z-XgwCx9=I(MYyrW%dEW)vGhOq(tF`**F8D0?1maC7wP*W9>M;tSge7O`ZKjnTor7E
zD5xa!%5kq1K;IS`v_CQ)TWAB942nWrA*G>FF(iq^upzfU
zGZ3AH-#T0%Bb)i`XuJ0Unw!LD?@p1`=&I8lODOQYxOlp#tQC(o%S>jwK9QpC-wDfQ
z5DOe~y(2dSvQ?C3nHsG=5l=Z}PhU;;!D5zn>xKHB@rVH@%NF;F{eAF-!-Q@ejUD%W
zzwA&S^s7e&CYZ{KX?4rs?~1-p#=kCm2d>;wo`s)`hb
zQFQdt?buxv(LDorloHxMGb!3+f?(}Y+gEMsMXdjv%#l)
zO~}j2#mk^KI!{0Gz#KeY`HMW!U^{;mq69`vyS+5=k0vygD|U~i{B%9(b>$B$Pu`ag
z&8SUFJ6PAmr$_JZ{RkfKKCU#I0B4s@uCFJgOa}H(#N~Dkr*;V->m3!pJZHLu(rL0}
ze#}tbN799-a@2h1HY`v6(H^G|%anNXLr}$LN
zC4U&P{M)M`)+UJXHt!41rPwDm
z_O?&18;gKZLlB|N8UMbZ7KQC=CF!
z2wDB1!GJ1V{o!@`{_eN$>0XZ3LlLZ4G;hrOAvNt)(oK2P#Kh+bV!b4#KT*MBF*JB=
zi11p9t4Fiqc9GCKo%=_lCrcf_AS_2VQ~T>(1zcWMUfqeqHN--<+>^lXO;10JXEZIs
zKVA&}nJmBDKueaa%XUP(1En~LB3%uT_LeHruDzT$cBp(1790>{ICD2S9XW9vuYb4a
zWCX){8Ni^EoButGR-P}@e_H$`dEv>{ed^g?+LvvAY|0Ja_{mi}&8O}AFtAs>hOy0+
z5W@{jh3iiEbuD_=mO&v;n>As=++Hq1X?W)zTn}1Nxl$%-pzecX|2L(mrIfPXi3y2h
zsr_edlR>{xn-7=wovORVyqwU__fz6>^$Jt!eOGU!;wvk1{P<%z7}ZiZdee^C@~LL!
zdRM4t8xM}%-jXVInF~l3Bd0jj-SE4{5AIo|Wv?Bs?=wj6m>K<(7<2h@Bc&DibmOZT
z7(Z%oq&8f$BW|%-EH@Dq7tUxVaCcc3R0|nUJ>BJ3(h4JZ{mk;hB8a1k1M~9!!
z4+LP-e7-qP^}qtG@!_!p2#?u&qIx7Lj#aZcWE7Szlx0jm(A94wCL%w|aSBD9y-L!$
zG(EB=c2X*ulM7z3%t>fSp|&$!wSa#txIGhqz~z$m3c@Mq7J%ywFDU^p?!toE?wOjt
z3w4%gXw6|H$Y*n~%ASbE?xhHfX5q(eUwNEbiM)M*FhefdRLZ*HFb$(eU*5A+KA0-;
z4C^;nkVskLG1y}qMLmV?kh3dBQ%HpgLDEMTkds3ve6cF*?kWm?@$EY0qs7I!lB?9G
zhd*#$(8wuDh(K{u4|S<#B7aErPJV<^&?MQ?*J#WHoN9&hHo`cSMXX9;vZ%vFD1`fx10*
zg7D(#jxPF9W2A0fod4y_vj;B2foiidF_SL?%TRf(Ww}c^dTw%kvm5E(ay&hla{Sj>
zKGZJZE1l{W*4CF!msf4vrtqOO&b{|;_Lw{sfY?8c@eOdl8W7W7J?98Ep@VV78mQW)@$L=8TCA+EiHMUbrn^5
zkFk`psOLk}*EvD&OPwyaklx;VCl2PGj?Hn*
zog}RZEsU)5gL3J5j!hy~8D?C{f0}bu&Xa9%jZm7iE~O=&%v0$1)o;~C?l2603d5Dg
z`=%9GItfSqWGy7UK#l6QFZn
z{C<;TM}|PeNFBb#7MidLBFRGhm*yR0UGQv1z$aXF-!PVi^YQl!Q=>g)8}*1QjgKCY
z_Jc-S)ry~X20XcTk*vVx9g-+C0;Cnt_M+U3zj)2QDSnjQao%v1V-oSSXENrgx7V-_
z-fY2_FCkd-Fr8i210xpz!<>%Q%g9Z75-`&fOraUpT?qw9Hntis3oYO&ym&J(pHpJ~
z$7~vA6I*LIvaE|H)RrL&Grxb^>q3Vc;zn-;U{EQBA}IB=%14+Ywu_I=FJ3_xd!`@z
zsz%ond+6(MW(~EQkd{~J>Nd46*cvG9XGsw=1|n078V)K(?mc|0lo&ivAg>yGpl
zAHF;I{YKwn{>6Gs&wfe7<#|^(pw+K+-(X8&8Vbx
zd3kvZgZa~Us=T&`z$%a#f}$wfRR{LjDDEoeZ9a
z`(|1W2%b+IQAf(1X~fU&wjrkhkJDjCqla`P(Ldkw>!m%;zMP0BN(T~LL6Hg?=5hP<
zjt4hyS3V$^EKc@+IZofAke=w-cqXJR?b;yVMhnghAMFUwc@-u{SIkt<{tCH2yQzP(
zhj`kPDqzEYd|G)uIGuaa0UB4G_lyIbyzGhJ4u}TCp5$XMLC=
z=aBK5m=Es7?22nW0bP8E*eEgVqZIgMU`sB|@lnH}=XPumKZr;U&&A7j@}^&%eE>S-
zaT?=Q`8-D2VrSfzp8#Gs%zw9jhm({StlC8CGc^w%32(99b-HrfD+A^EZe0xY^P_Z1
zOz2V2Wx1-A376;OwXOaA9Q1U%3MaV+-t;}o3I}sVS=w%j^+BplL6^-{CZi7i=P0+&
zolY)|PfZp0@yLPYb_7LAxTnwqIVM`vP}*p*nG>G5#S?lXnC-X|@I}Ie9A2^(1Ja1{
zAl~zh&U*3p{(~1f!*}d{P}j*{2VfM(129Q6D3q~7^#mk(IX+V09Jo&?l8Djv!4zE-
z6GaI}YAm)F$N`rf*%WHlftp_3h2ACvt)=Wl_V-8m%07=!bu8f7ku?Ng`!^qYxIJa2
zxMTV-(%XG>z++80EWC0uT?mLazrmZQj%egAl*j!P790Fxo5F-%
zy->QijPz>FZ5p_e`eK>AadaXweqfhEP4mwRnW=P0+pU2&-&7sG$(yd+xG}Mx4
zRdtGtmpF=2*SGzV)vZGzoEqtfCF$^j?l<3#>I%I{AYn@dzA9ncG(nr-
z=8yN)hE5dENV)Z&n#!Kzkfa%0qzRzS%z3S@2P+$rMDd~g-|;cXSg#Dv{sZ6pY)wMjG}6@TMMAz
z`e7+Z6i(OaSo__r@yPzNGU9+cx`|jV+l8Qw>U+n~XhcGI)fX3fdGOwe>G-GH9fYix
zu;FGrRlIIQ+2c}}Krf%!EBAqJDf60$BcBpIvd`57NK-tGOsOTVgDcNreE(xI1^2y1
zrxJH(;v}NOiYTJ^okL{rge0!|QjZVI?YqPuTYn=cdfO_L37rp4tz|(Qd%t;r#%ZeG
zWp-wKbZ~~r!>$`e=!Z@$m90PaGK`jc`XU)q!VXcFTocUa3EyVMw>PN)z)hb28KVK?+b#u-mS2PtpIE
zshyu{SYE#x>)CFqExRxx38e48*s$|t*>1ZPu{hP1e4kK6i@vu~oyXm=lf4my{nxkR
zb__vX3Ui8Tb(JC-6qBf*@udEH#8bj&%i4PLb&@oeI`p2tQv)suHtBtiY;x?G`&f0QVtT*T-R|M)SMO))@a?M7;bIXk_-#5(Ar;Jl
zVHmg6!t+SR07DG=Q8?6ch#1y7PAqkI%|$D2Bl~Mqmq1NTD6GC@lyzJ0bqUdKM)ch}
z@>Wgksg`=KB;b+y1lK<;!$L7&&UCWWiT3!|4nNi5<|I{t9I*RKP~_nx&IP>uPUPt)
zQbtx5_CTN59d@T=Sjewpdt}l!JYG1B8AL1)Cyt@7)5A#k3iMdp)`p4iR>}1Xtge#dTU#6fNe^`7&gxcB6^XD?JNQ#35
zsvq0)N_A&gH0sdHr&y;Msp|S(Cg|7w#a$P)&};W6Lq;)mG-z~E#9^fo?IeQ;9pvt?
z%}RmgOclCvzJy|8!n)Mqi>g(sv$jnY3HL*+eO}6OT0qi96QTbrmORc-B>m=YTxuBT
zktY8+*I_7z0{L$F;JII8bLIVUu?pUdQ!?Pqi?OsC_s4K)ZCLu|7(8lt)GrYIy{7~8
zS|?6^CpL(7_va6so={rTfE^4%fZ`xLv0ZC4*>ckIA78CEXj$ovpr_IwTv!tB+gh|P
z1xt&+b1~h(wz8x(`hD1#4l9ub@4|Q_I>;WMo@8em+K^<7Hv}qa3H2ARwFH4=tgp6=
zYdGw0138>};-)s1Za_f~ra>!L42uFx%YP^bT!CM9gyrV}KA|>}ZvZcQF9g8M_>IKT
zNTY8~fd+c9Rj0QDl>IfNpj%E(voSsye{J)Q>oOHhv0M2PXC5=5+uQ}hMqKOoAdZ;m
z3$3`z4KfRW;!ulYS+Slp^M48ETtSBaq^?_|$dw-T$$p2&&w{U^5|^F$tmYnN4^m&L
ze<5bKS%YZ-*-9M7^7$Ks5!HR!NYL$Ipj4aihff0PRnnBi^;SW=AQkOptGyyKS%a5w
zc{r7ZUcz!ACh<-SMj!4#BrRMAmr-~1*st|u@4>X0=U?q?euYO$4krHSaiE4az=#MT
zxG?DM-t#i=95Rcs*#_^CAj@IfU(5;Ka!JR%=DFJ_x90AT+2NKkb>_yzyyiZn)90od
zRhy5pDV~!T2cq)$iA+-2OPKX|Q%iyqsoLAQJFoWdsK+c})2>@`IIHdN#c>j*2H)bo
z3y}bz!EC)9H~=iP;^@0E$NXE#x!&XrVOi3krQ{`b)?R+mV=A?*?p%R}_NN^s5Pav_
z-U${}i$r73CD?AOJ8|$qW%_X))J4c~Hkr}=cPgp#`&!Ep}LB2TYdzN@zr$dw8ZE{`*QwY@D8O@`4>-JCdIZ~9D>dc;
zFL5-B?xRXCNjwuw(mAQ$MMg*~tUva|>?}qm=67Hgmm-ZtMf&$PVB!|0+>;PPc{~Yj
z&(^RyJ0~!f0+WyD
zI+dwh+6v+_@U$?9pP@c*1yZ$#c%LB^Gmri|J>|i^_dJXG{e75>!OpfF)nf}Ah97jV
zk^UId+*9HR5$4t0ziu>k;)QK};ZdRPt9V(>mdB78`?HzhJfBtpFEkTLs_A*yIRfYg
z1n*J8cz~>*m=RlS@4^X-Z5y0bxFq0HgfC!BzX_W=zpM4s8T$B#n>=!_?sfzvng9+e
znArh{X$nS^38h6XY@9Kd&EOt(G5r>H@)D66@bCL?lu-MzC?Y@qI;^`9wu_&DXs8iN
zTmF=OU<>3-Ba!%6O+dZqsMM2S7GY=<8Y5Yzu_3jURkOdM#Y%_UymP}@@xJkPs
z^PD1N0LI74oh_{I9A|kZM&1MR7GGD^h2yGcTy%m09x9WhyhxytY?jkC4aTNNr{DSDo={8ipz=hFCHqy
zf!#8D@K=I=U<3Hufv0lLu%nDxSbCy!3J_@ND@p$KmULN4#lmQ8%!-m`6)7*MM3g8U
z4caJh+Va@pdDULprILU1r}%e!qNFSGs#fEU%Zy9rMg-Q{)}tAGG=G$ki0TI=_BZ
zYM_~!L8w{2$C3}Dc$GQV4|=M
z3Z8P5H}|h#Mj4V)kQ?9J5o5MFf;rtmB0pmD{BS#N#K{SpaNRz!h>#@I#}{4BmNi?I
zlT)#i#)aNgC(?9hiFpSRs+wR7a9l$y-hXBz+~wo<%^+3R!wPZqp62HzWNj?_5L?><
z2Ym+DJ3r14kbJOSQ7WK{#4aG&l=L{HH+*!BRh5BxlfqNFG72#b7aAyVOB?;4e+fms
zKaL!VguwD8%mhzntkcB5brtNi=P`=XKL#!PKv)Ug-(7{)%eJ~vD5G4|Fgo22Sv!aB7yhSrL~Zjsx{pY
zY3_k8fuhn=`F=Z(@N5b_$dj&O`y`%|Rm~1JrpR3`gvQN=Rr^7;%*d10!(^Mee5V{h7xFXO10S)$9*ZyW@6pE70jg}$w^
zU>ex|(g_XiqdBBhRksS&Ff+GY;-!LgpXUdTsc75C)$nxIk*Beo-~2R_MhhgtaLl~aebhfE1WCn`)d~qSx9%jEDNxS8q?Ti;wcYg
zHqa3yIS!7Zw<%*M3%`#{Gq11izx&DclS%kBQ+W{zgB>2_XA=xxbz#_RCwiOA}zMAoQ1>c
z=5v{05fPFM9n$f*>QCW(2~S_$=Q&TR6sk+wF0y_ds$rxbn~2p2yveR_e$*#V5tUpF
zlLxl~)TR7S6Wyy-Pk6?gSWrXvFX4h0#$(<(7<=%+%_yC)NZp2@Q2hPodHo6Ho76q|
zP|)|7HPK%s7P11!`%5Sko*mSDNfH5cKP|k~gO19N@AS99);oDq-3t6Lo?Up_Th^^F
z@^$HRe)`wP>Y(gi15@AM;|}-dl(Ot2P-YTNuCTVRrB`|)Q4M~M%~>;KD6a_<7EL>N
z*8F#m7)T?D!=IVNICwZ;kzQf^_z)RKQiaU4)Zhy^>v#gV+-xjO*xvBQfK=#?J>nEJ
z1@qk>q=TM{yunW!psW1m5VYrC4;==LaRRTRbwdzgIr&o6@(+rJQxqfMbAFhgpD+128~HKh0SH)P1H-ci@qJwI`y(KJa7(vL%Q76ih)k8#Knc%@R1Z^
zbQAKaUS;}o>|nxu0n#2xL%XS;FQSBC&^rv+l^3I=2a(wOAG%O~m`aqV*@rk@bP~ch
zov$JfhRUylh|W!q^0Ct@`_&=4zdEf=sW)2fz3&)@FIRf~u{!~80St!7=x|IhwLw0GNJsU8^zG_ehdgk_A;K&&iJ>u7Jc1N`Idc
zPDm2-?(nc;xPV}!yVqC{-&diKeCVVkd_twb=z!-o=|Co#G^@pJi!g9=GaG3!h3Y6h
zSy@r;pltZ>+GOh>4)FrtpL1mqDU|=mN@{Np&|X*xW0Pit0PHlFzX~JrIS<~80Uea>
zQ;;7Eiqf!gk3Gd*Jr^A8#~wXEJ%*UDCb^qGLZp$auH*eDyZEeB-|fT0p+}Wbkn5+x
zB_7Mw>gVVY7LAXqMp~U|F}-j48;C}an{a%W*ua6MCgFZlH8W$JRP5e8I1$2(?RTfe
zi(6<(`!93T7r!R0Ni^vZzk}x&8+qYK14<~J7h2P*a2m?oRrc+o7z4w>Umwr$j|&0%
zZ=wCW&eP1(EEzBheK4$p2i(?SS%xI}W-$i}GXMpegEEaRT9g12e*
zG>y@+Cbu3NGpj%kTZ2y=+V<(@wCx2*k{@Fd(Ei2rfuk7*;3yT$1_XYwe>@ElZ_j90
zBH$gC_dp7D(@^ucFHJar`|na29W=l6GCf#!%-7EBp8I||?Xq?|@rq=)hb8LDT$6y`
z!)l;A=A*-)`GS%nV&Uaa$KqqkMDXV7C*#Qk*Ga>*dmpE-J<@OZ!)%cLBRaZ?R%RL8
z7jr>CrXm?Sn43h4o-oALyHw1u1bcKy7TY(x99`Hz}tK2?nkT1%nf8FSsYV|
z4UQ}-g}IqgMA+*;S(5kFgrZ7LlJ=ay0d;8r;4zVxkgWrc;Zi~~w&w>eD7HSRcx}KImEPHt^cH~%-AcTUjvZcf2%8#wb?O?9IK&>t9v9nTSG_q%aa5W<(05Mh<6pVCXv{_h_?d?V_(AV_tXD(Fw0sI$pjxe4P
z83+2s9fbM4MOHiGCC+Nr{N-j-f2aHDJI(BaP{y}gA~4dnDcW%LFTskmu?M0Xchg{E
z_p4!Y>P=gJr_~Y0S${jMNbiIUf19B7bk%
z@&Wzuv>1!<7ip|P1@2y@il^KW<)_qC6T!!mNr4L@dd+;4#7A{I-)1*`Kh#hva+${%
z$qB|#4#RDjQass^hwZ4FcK*=6e%E;D#e&Qi+*~7|9nB&SPdh`nhf2l(k3X>~I*;94
zci+6ZDr!uwO>q9#hxY!mDl7yLEeEXUJp*rNT=O~8;=M>eHoVxI>LNXU(d4SIw3NNu
zT`^PS!@x!AQJWW<<)ABssdfsp`ogfKv)O!>o9f1J^f*(M
zW%()$S=AKoCfy#>MhQ#axWR?&@v+0BpKE3=$%InOE~}RShrX|m!9sXhyv$c0CDayZ7m};XTj$fZ
zH&)$WHzxID#RjuH--P#>!sI0XE^iDy#^Tna8Y9Q
znDjGC%-V@9Vh6O>`O%>y)VkWpHj5yR^UrU6)u*uu6Ywd2xZOK*
zr8^(T_K2JX(4olbP?ycosVeR3@AHt
z0kg(zFD}qR?VkHcR~9Z=nJBnCS~REMM@4GYjj5P9Zmo;Gu}DGk8kkyt`-vxYQujA8
zPT9KJyLVy5jn+EMiLT~u#*MYH77F6#u-*|z)jWkNUKnuRt8HAZ^p1#hMjmRtQMk=v
zCcr&@_ZIP4vJSBpKBylCIeJ1zoAXOFp?(T7-w^cor8cSBgKcZmsQp>siPwLe)z^8D
zzhnYLbQGXNQ=H@S9ba
zwE@k=m%7}@pV>JfV!(E}E}?EFV^xy$o{1gGc}ti&{jIsrd;{e5bf
z!6Ap2O=Rt^cl%wOozLyC)rZAtdO#$!aDU1#f>(L6heuF~9U#_)IM=)PQ}>>HP`Uyz
z&L&N}=Q*S#6T0s9S{DBMtlC{lzF7qB2F_5@R(Hnix{VYp(YJTleM|T6Eb|{iS-Qrp>U;kxg*>^$Qs;QF15+BxOA46=M0(VQjFP~t?qaYq&0ukHglhX$xcn9ISAqS
zqvlMW4YJT)c!*eS)mFFuY*Tm$O~Qznn9-)37Omf}sMTd8(t1PlKK2K+yhI^)U$w@52FQD4$Nug9yT6l?)|r{h8WzI;EBxpu
zt?11^0@uo*$Vk%PdkvWPnv!
z0@jN&no|Y-fe+6MW`K8cppMdb&ZgI%8&V?fb2)k3LZSIT@tS@GxYoJ*eVQ~o{lfcg
zfjZ}3#ha6$oO%y-n)t3D|L=xoNpkCG=)rUD1Jek&SI`0gOOZo68Z6>&jt^dG4-QMY
zD0Bv(8eW-dQvdkY!lst>ni}Ghdh1ybGkh%f%{y2wGwW@eS;?ygGI9Dd|aE*cMVzZC{s~?Neida-lSK-EoBIU0FYetE;rhDRUj0@^^Gqyq_jo
zVz>jh4yV5V$W?o15r+eF(yM
z6^uZJ5(m^dEL&ER-EmN^{FQ=)8P4rnHkP$qU;Tu%c$qFh$YmPildj5yu#a>N@8GEc
zyN)LhTLSVEn{4-}+e}J^f@8kedPw)QCgh4|zeE>t>DSoiMt62U=poCY<}`tsFiE;Z6uN}W}5=OW~f{+j7($HbYCJjeoGBXEkUa+>rsE2BUvMqH
z3kz*kuC+10taJCwjOM(YtMgbI=k&~{a0U9RA#1tXgJ83aUgCBz(aCn19O=K^;8bk)|9~WQrI-mS)x`bse_c9B54?5N_5dv#TQ^Sr&IOd~ix>Lm;IMgrmlE~vB8Z{2KIfc&P)plpE6}#n%
zJYrY?lj7L;{lTfzvApy1FzOji_@7qrZ?)&tTNRGn#!D7vH@m(%XlDcTN0E3i$++aQ
zqj~a}1>RXeP=a=Qc{(Ur=DA8fo{iE#<4JJX>B?R~Pj8-wh`S-A1sj+Y$IS;wC)6-7KL*^21X^@DcKSb{bh^*=rD
z-vLkv9H6VXXw3mc!>AHNxcm;H%qW=+xkbIAOxi8rGoxj7-B6*}VM&suIFdPXMAEGG
zN~@pea1DmHlP6v7RR8>2EG~7Mi)~M;4^*>e68&au!@XDXNQ8>AgowP&R8bm~82!OJ
zvIUr|MvHG)AO(vX^W}@7E>;1i*x*6e0$S3f9X=^%zHd9<&g+3pi_DLlu2wuk&V}ez
zXg1ZjK!Y~5rJ`eiY=!v$--%H6ZsmF_vr3|l-d0_=7396+@KBpgGj%rz9JHE0qx(3>C-HI^!7+ycjwX~wg2lzb)f
z=RJXzW76-QM&mK`U=dA?YYMVfUUmp)7Ne{ukVaKIo635V>Hy)7$?uHAai8po&drar
zDvM|@aPq&_6Nm&PTur+IBwEz9ceOvQ3PXqFBo(R3EQL2-wg>04kH(w2=L<@OAKNOwn<
z4>nLTFD86$hfY1EZ_U3LZ{WxU1r}sY_1+2K9@+RA-W~M5YL;G|wH!z7zCo0^ewN3p
z$pS=&vII$aQyE86Cq)h|eB_$+Wc(Z-_G5*tv0@EThEg5Hge#^*h8m_LxF!Z()I44h
z1(?7Nx**6Ii6{^!oehb5_-*595_hv!!b%wtCdxzf^0`h#;KZ^GQh>4J-A9C>%jD%V
z`S(q6V95^Trp!f{#nNH7qyo-Dhb0L{$eV_V~eA_`)?Z-#?Z!j
z^{s~|1AB_Sl+=#6PvtmeZ1i5ZV{pJl01T>bF
z6K@>h)1^S}kTKV#&>kKp$;z}o-G_@#t2;>f#|C1GweX>dvikU)Gn-Op)n=VfDxoV+
z>}YPgHw!JG;klqBoh9ozI>#*4&j#!uPH;RES{>sDM?7wxuF)1d586H@RnHl35E8?m73I25loy4
z`I2OL7|87%MT)BAcj9k1@n!deIV82Q+9*tNal5AMvV~ddp&(Z7Z{6_qLI(V$Gg2TI
z)vV*MDR80dImvLEzS+LvQmEk4P_Ibd(>O&@x^I8V4ve3bU=F32f2iK=^V593GCIjr
zX&963yVy{8JnA%VF>JipC<+S6*xByRLTUxY2n$~-@+Ff$D5peekJk9J)6vPf+qt2#E
zJMHm0#*04WHIbsgx_ps4p=lV$gZmW{;U9u$E&;|$r5Y2~j$3de-}gjE|BQcGd^2?1
zgvd?|EFhbHntFTtVa(gnZ=PG^vh%6kRMpeKfX!V3}#_;pX7i)Vx(y&AM*71E&J-?5|UHy<&S&h68t@x#9!UD7n>
z-!}uXEAMz6NiHiS5^soqqJ`Qp@>C+Wedc5asL7Ym3mVR$29*6_>%8OP(e_;a;;4b3
z;4;yOK_EV{eQh;*B+!l9V`BXY^Jh3pW>@ZyfLCdkk{btyjm*tX=OX*9l{*5gMSzv2
z-Z~CJ)!Ko57RVtmYcJqjx~HrMkljY+QQFQ9YLw5bcV=Q}@j7CP1T@2b{ffne9hDv*
zR>Bs@Nx8pkBtCTd*73J2q|gSpX(ib48|pm}p(&0YSTD)6XvBbu*e!D(hlM1!c_d^F
z5%U`qEWL_JpeIGz&L=;P%t(1hlu}Nq{cT5XEY7-jH9okRLl(CZ-Ve&jth2dqfO-WI
z)R|)`1$BM?mD}zh-=9H~-x|c6TDX1=ceSc;HsvE486a
z9b9sF*oF)SgJm0w0B=Tm-a2NmT!MhFmRbKT@SAW-EJp*t#nG!dQrOP(Lq<$JYT
z1o;8Zvyu2nX7QJjUw`M@9rFs!=j>GryJNpUP%I=v(y*a~HG)JNBFZ4{yrJL%$Q>v!
zhJRkqSxIk`osxzFHUtm>hr-YHv}3NnJRRLav?F&S`rt#9*Xkaq|7d4({b_6NsQ7?F
zZIZ`*eJUmnBE;~b3=+tA@`~1SEU>AcM)w%jNFh-
zMFkXArxK`1rK5>&9_B*H&L)lN`ZMH;U8Ku;9{THx`rEGVT={<3KO!$lnWKLIV#^?8
zCBMk}5s22eZX^rWmFHc|4q4#~iEzZK-YNX)XZ*RUNt>QhXN4kPrnq%Aih>WfR`PMP
z4qd8ATSgx8<|2idt8Q+Ghn(U+qIhF-aE@C{G>o?3A?1Y)MN7>iGGktv90!Dxyl>aoy`sY1
zdp5FfHzPeW!6F|v)ZLJ@g$Xoo*x#uV(=?U)J->BT>OP*m(|shBtgiv~FR#gQFn_2u
z0|vMRIyvuMJ64u&>Eg!e(&N581b4dL^V1kHQ+ydHB@f^7TW~avazn_palBrTHnDDkcq*3ASgKFpeDCE+?Yc%d@cxLc_I47nmzYqrwnLb8}~5=5?YV
zu$>5rdE6Amp52KKrxYxyf7q|Z)umqDxTd_-nIvdVX}6M6Z2j43t}IUPY*an)3-8sp
z<2B*)x%~x?bpd4#yS4=7Z5_@&7s+Fxn*MrtVKi%4a&T2+r;lQpLMhM%7^y68Y9nIl
zHjquIt3j{999pQm1EtvJz0X;9*dM+_{ZM_#`z&lq4xxmbz5^D67SR^ZGT8i~UuFc(
z?rw%(hbKoFP8+Ha!yCBqv8Q^Je7f`Ch)t}b+32=3YUE}9MCo0b`1Z!oe)bp%mganX
zshvk#xq=cxw`Y*i`Vd35R%ve30;`xl>7egVEle2IOq9N5NP}E@;elY@TAh!tzm9;r
zDEp;9jA;*oznumcVZV-je=5)~OIB%&q|bN8^aIxP{*=h&lRKyvfg_P0_Mn?W>#>qe
z-nB#ON6cl}#JQ5PPv$Hc_~QglU#05}A)tL5Q>*O3%~RYmPx{y_W~*n=XG64r4C$=h
zi{b+}0}LS*Q2&Vwy3eRRcPM$9MAAayqi+yr#p
zfIMOD=X8HARp&0SPnZ2cs9)!suYUtWd;!%)X^|+wwZ!!~=ZkxjQCVvmBr>b$?;7f8
zY>EA#XrT4&fc;JO?@4$HS3x~6Ih-Q#b6)^^kQ$Sncq^T(*(XH2B5gOK2mFY;k{8!!
zdWDZzi;2u(y7X$q1GxZoiNjJJolJ)cd)kKu5R26Lag05flLKHk=(9IukHtLOnX*kI
zk!|u3b5u}NFyO%TCguI3A)oD}epA}ieuob`YjDU%V~Yp#IW!!{-LI_mZRexAznn}5T1o3L%Gn@^`3xPsy-eE+{$fza(*GDM1_sZZdz{3B*4ZJKQPB^i1rRH0N5fob0d
zU@p}?nEP?eO9B^QdF%^7uM)pJNkFJQIeQ)mU(C^l}|UYBReE01!h
zaNl14wvcNuJR4Y+0JsvZATGtiZ`xV$>*8FJ>_LxPX$96~!D(EauqLt*gqQPhnPW!-
zBdn<(*~|x<>w>ld1*KUl^*l`qn|2rm5e;HtDRZBhFI){J{HlN=DitIE?cw(w$2bCM
zq|xLD{d6q2G_B?7P`Vyf<+Lo3J8UMT)SJ&TL&h-?@H`cecG^2rzgdX(?=wzrZoM3M
zD$dlm{MzNhCJZM?}TGaTmu&yy4d&E`Ddq;T@=_>I0TPWjst8|d|m#L@~*KHlPhPpXk!86Eev;F#6>C}uttJ<7<
z&XE`+c-wjo)~(``=yIh}{*wUK&yVK^SfEdiK>|@Ea$7o89rdW-WxI=)*RmA__up{Z
zz$O{^CK}PTjdn6WiR*5Ihxrd9gh^>V#;7)YH<}Hwwh4DsnNpS7<{7Bz0xrP2ie`iW
z{}jv=p^6T8o=9lh8E5uexT?&Iac}HAi$3Ay=PcQ68ay(X##Pv3;>ed6S7uW3_W^sp
z$$4|9!y=QO)sng{QOd1g(lM%)q!n(Q8qw;8>!;gVhPXkd)8}N(Eo^rUFvz8UA_qM}
zH%+Yg1(+MX+_PjVa0IECS*IwxLAPU`d8t!6;>_i`sIVTZ608r8Xl0Bjw7(eOl6mPq
zTCInz_XT+1$E|UtB4~s%_FF5Uand6y4|UEBu%G|wn@6gaS23sbp+l+A&`*Av3b{0?
zhRRmikBOcUS&VrD^k4s^3?pWTJ3%@N7^AULpV;h_$K>!_(P62wv5PDU+mD2z)41hk
z=X+QFxpt#YbCluNhQn2O5BuckMudp+Se$gm%i~#!C8-sAN+@p$*LN;g_9}#WlwgCv
z-U-TDSF_b;7K4I!iK*8~U3NnBF`tj#6V)@mKuJY#L;RA}^3eZwB8>Ir;1=$oB@qSdHA*k$l0ew!HyrMotP2#`)fQuL{vf?kK=jjM
z%IbAaMiWBJ*l=!Z_2!0k&Ej6n^~wNX^=2aH81(#JjiZqp6`v&Jz*o!r8L$Q-frs?(
z*Ne78{nEvC?h&dOboINI$kuUlM939}h&*-zO?F}p(Lg#f?{=mS558w46KxU~A}D!d
z@a}th5XGs6T9fLhGzyh>C5w%N1S-JDBBJT`x`3?XT}f!%ytV}OB5d;qrIWo~BvrFZt}#s_L9i#~GWM#s|IDl@D6*tj7!p9CG0S0~H)02Q+UWVajO_8LJ46pY$=?#5Jo
zlFupq*7xI^-BCkC>S=SeYX90
zEdj1LAfL|iShKxQ-zBtxj)zSO1j>T7sj^M9mcVjz^Dvp#Q3e*;IV6Y$yskW>E}Yu<4cG<1@M47vS{*BnIP+Mb?I)Qo4~mLZ%d=IhJmth
z%a|y!xf2!=Za>IG5VFe#_*U3q9o<`e#zO)G3+v)%?2MZZWaJ#Kfj@twpnDY7$dIIs
zjt2AKDfIIuNj$$p9i+pDTOoa^!WHd2-!Fqb_<&wCFU?Wg*#1%EuBgvxp#19+Qx
zp+oB$86~Nda5DcBBW$KC`13rLjx?eG-D2iv+Sl%9!X|XDDNSQ*JK7acd2~nuKW-vr
zghu-Vl&eTiSMRGnKK>01T|5M*Pa#ZMhVpFwjxyiqo!)ruF1a%4vQgQrObRPVkvS*u
zs7pX#^R2{irPx(<{>foZJ?MSeVG3baaiBLelC4Z2BoL+2p}k+b$f;F`Pett?#9!8%r}1g`lF*B0`oEB3&!y=Nz&
zWpYPM@O+{7qzk#W*P2XN*&s}BPm#L_`mEg5zu=QS?@dZ5Y4a<$K11IN)S^}ynELlQ
zcERNSo5y`G%4%xgOm(t2i;)(a
zYJu37i&-BxDiTiG!nhRQF3{*2(wm(gDgMC)*=p!=Dku9V$PoUy3%AC)U||?oYrQEK
z9*!Lit4R-iZ(G00n<0_v37@j8ga_(Bz&DhR`VKntUJI{up3YxVRX;GFiF)dNAN)m}
zM9k1e@>pLi&Q3vXBpJm;sasPQ%i{(be0xzpz
zDX07v$@ahcShSeWmih?t!161qjMxn%y|#5MW2FUKD?nd^kZclrMVP2E&-QM2X(atH
zce#~Js)8Wd9^5kvg3oDTlyoDHi3i>By-KX~y-fzsjkCAmBS^3M73(zW#E7?A9_VWN
z%MXa7$vP+}Oh^>pdrP)Af{6<$Pe!4>-tPG;TiP#
z;Nc;rulgLiH^@YGT%>kPATWp@c#a!p-WgJ%Zz-Ccxi407!pXI9n>V$qmCfz4MMkk%
z7I}qMP6hR;6!NJNR_i7jh%LYc{@)@Xz1*Kc*k$b<8kf_`Q--AQCqnK(#>M_j<$U}3
z)h^OKKZ5LeRQiV;7P33R>WmXINpNsQ`;jvj;rG1=NSllM1{jpC?kLucf9bs7K&ib_
zJL13axgSI|Y#;=sQ9AcxCcXBodxUjgmI(SmV~kAy=s&*_^=X?H^$Ae{y^01|x_Ou|
z{~yWtU$I63fSeT{(2x_ntRNFGDTjwU*zJ&-->wm@taq~H)|d1-knh?!Hs>p2=H7LD
z*L|=-4*BOs;xLQ>7Lta;QQvi&Rd=A4KH-V
z1qY)H5a(wNPYgwktr!>9X#<@TgdR&d=3wiE~fg(h$3qaIaNo4n9oCHy
z(hMfyVAax_-yj~kL;6_XDEb{%l-Q!GfWXtTwD^6{e@6H@D2?c26~MOAd863m8VjoV
zbrT9}*gSY1N_m;b`}ah{oBgr<-bhNb_a(W6C;mdTyh3X?TY_cvF6F~(mOnP}dRPd=
zT3&uy9;NS}N5(OR5J{@mymzd-$HO-Tvk9&sMYBDBTWQru%=%EKl+{c-uOvZd6kZ=X
zH*h0(MXi&^c0+N2y7Kn|#DLyakmp#B{#C#09;aNkbz2j7Krq#oX;A4GEsr}r5gc8i
zI~DE!t9I9(@gPTzz^AiNE9fFt3?tNVa0sp-pi1I6%aI0kv{-OBeDR=S?Y)SgEOX6y
zC+pTZ&F}XZ=;h_{qvYEW_tbvyl|U469`OxG9xD1&$%c53X!t27Afn|x;|
z`e+x
$M8nDWFtFn+V)ms0MGVhFBz35~$g!S@6T+hd9)N
zjf;2oIF3^qYi|6qj0Fx^^zhwh;ysL@o^QgvVddUEAePcUU9{#TIxCnIM|LiV?8
zpNk@Q+$zf(Ha%zZ6W^@|n44!fm^}4s8p*h*9zPGvj%6W=#?mllJg6|M1A$_e&iwbM
z5#uC-kgx7@bq^)_dOo|ISdjrb=e`%lr4GJI52znf)GOVh(fH!Aybn!lQ7c
zYx%fi(r{*SE;7EYV}u?C(WN`@Mga*wf~G3YYPP(-Y@+vu+TB5E&cQko9k3-LyFvdJ
zy2lNrblVnZX?SeLx*&BZJfr%Ifg=5m)S8R+e!{hzS2dh96z?lty_w)?LN
zz6oHzzYL1raL{l~zSgBjNRUK(b&|1k2a`6njsud=!-Sd6gR1)mL#9T1O_{8wzLE&P
z*QB{YK~}!q`PlO?HTy0uC2NIdWQ*EzuR=9@(MI_GxnQ%r*&4H~mcD&cM9Y~h;rYIr
zHz4YLoSh*C!U+DC>iloCFF@cV*W*JV8PG8uTtO8QfD>mcTX8ibWf^L>F5O9m6MG@m
zhIiD|{fVui-{$`Rk%x2{DKcX85v*B3BGTqO;OcDYudowKaiWt<;q-_R>ng{3-z1nfZoApw$78z
zi;X1+Pf!f8gJ9=-YBB-*J<~_l$%SS7Zw(Kq!LB&!g9!9$5{zUg>(Mk_-2`>JLW
zKJ{g2DNm;cqcsvsJ>>t}f)WuK83x|UpEwkncgd%U)QTJkh!^*O27PZ(gUZ%MHP5Ui
z7egilLvg_O-V~i7&D5F&bnxeRHCwj&RvOPORcJ)&mIbo_Dht}|sSTN;$NIbPtVOcF
z@_iN!zfPAH78Gn`O($4h6n@$P#OJi8s{-5i><*OfAF=|%f(;FG1Lml&RAFIKn_aes
z`^*K6?@3sMu%1_AM*;!dz=^nlTZx~!o|e@ek6*KZ$e&FZ9e5vz-5&ODTDAmJQc^%M
z1J-bT(LFgrZzVp20TxCY#u=xA<77qE1#4ax!se3?G3o@{jetyZo~s}1{H
zDY-4;tVZ7PssEXRV1|22T^u!pt!LqBb1+UwY3q}4_A2ZJ*}i!0TaQCdY*8cP~cYf0E@v_CBodGys&~T?XAGC6#
zl4c5Sa%Pn63eB~h$X2cM4}1~AB|`f}>iZl&1NVO|BXwg^1X*7;&$p(~WL(vyHrW{#
zU|F_p*LADgS$nPELh{6++UIUB>Mew(^mMXLC4~Dp>vEI9|Jw~EC>AopjI@~1Q98`7
z+ww-uj=kV!dO0o{oJQ0pYcnWZiYPyUt-(wD$wMh7zK0z{QMn==mjpoNZ+oNsiKc#5
z!2)8|gV^J;D7Oo2y1$R4m*>iPsAqs2aLJV
z)wr_PTo2Km&cEDMBKM{aX}lM
z(hz_{{9l2Gf9_8u>O*%-FnTL*KFBoVYl9T$R{GY^fe>GBcFA2!KnS)&QN*add=J}MB4BvI=>(QUs?qbdyi8f}
zwY{)My>OdKp(N55?3?>rf;?kWOHX_I;&l5cQdhFBce-WYI_G~DIF$_E_6QUFaQYzX
zKmIwqN5~sitIp(k=b&QC&>OArVw|PIA&~gjI9H89=N3(CvD^AbRaNw}0D-xt8#tP<
zLE#~z>XEG$iA}G(3wt3`Zw-Z<+6E7?{ucAbq?R$;99u{j0eAN>Gfm&x
z{>c#BR0hQa6t;O0v(orFl`ir?bexa%A%vR$?`TTNu%Fb{da@nAlzT{KVxbWobbMtz
zo%n)`hmt#-@mavQ6ztyZm(bI)+N0ZUU$(C1KrE5ZfaC&)OO5VGWkv57b%m7A_Sqps)M3FZec`|h>Q8f
z>$@SoCx&D~%Z2`lFoA*2QJd$z!CGzGdv6w=>|fpV#)Ie5j^v9`unE%>``V9qeH~O7
zlgw;T+SFqdkRXM#Lofiw?q^^lt~XTGtST5o`X4M9b$sy6A)b`gUwt+I?NoQ?M+(Aw
zcS(SJWuH;gYp3jAN<=eeadGspPSb@a#p)E0?_(Mc5W1$NLPnnYQ&>&iy*!bpu&+Q3I(UQZ4WB${ZOTT9uCbpU
zs+i@zr+KxT^3r(oVIN
zqx$(Z(2&SjnI)(4BvI8!3RkkCZ~~Xhi)i5?dXS+P!Ok)DSSSj)0d+zKEw|Wv;izlp
zKsa$_z{T|qd9hRAc!}0G^)c-SME!EU3Lllqi>&vj3oGDmopt{l%X}{PPxL!IiC4NEZ#Q7
zt4;GZnB$yBllO0Zf4Yv}I#eH8&zv`xOSikcJEX6i!u=(p=p_Ev-GrJnAH-`&;?V#P
z2WSnj7u3dG^qrL~)oYl5JKpdV*xcwDq?)Xyo@f#mzaslgoDbxObyq2F>-3w3t=#P-
zG1!pOAVN#=1?=Gw8E|5^y-ca;G2C1+w|*G+p{Ke(--fba15P=V;u6X@^QcO>Vp{h3
z76VW8FY!8{39N4fKyo3S2r{x{&Wy!I&)cp*iri*G?aXQI`-dnP@&h_0?743fYyDPC
z?%OywTLLW2xg~%5N%BAXe4?&5@fv?9n^=@OoW{qc!KkgGPdX(_@wCQ;lPDuVtI?p|
z-Ljdewur4h5tx}KP~jV--Mm7K?e%|{8T&^5Nigr~NN^Q-VIcEs(#L1rJg|-^V$lv6
zKBf_5XvEm{XZ%doh9T|J;N2UF3fV$cpA6%?)Gy$FF}|N8`%Uy#W@GsEsz>yatnzj{
z#(18v&=;Dgiq?Fmh~tqE0*gQ>@xZJCDUAX-wNKOv>GI3zOp3VlC3h;-=f_z
zRghj2gsWRgHFOrrI3h0LQ%GKk#!1tl8ERxxOUj7Sb@LbyB>MK&w*CIume(Q%cZo*e
zfkSLk6P_dGU}LM?Hc3;jJwsYkFqTi*M^Ak*QYGc_{jdE_O-I7;y`o=G`Q_#xb*e$7
ze7n_{&Xm#F=>+56P1*;0;i4{W?iHM`Q1jFO9^L9@?NmuwQm%6W=KyaF-iSfuKn7IJ
zZW55CAl+<&bZppXqz1_b0Z+fNqa&4eXULW);NZ_j>WVfI+8QV6jQ=!
zN|+YSd5pk-g+W#7-`DP{Maw4O#4?O|3
zTbBn-dMdQeNeq-sVCEPe86#y5o6cKqCFGxU8snL-c^gERjv+B_RuGz412}T_J5unj
zu_H&2G?aGkzs`(00bJ!qof#fo5Mx7xSSe8OE)Y&b*fO!+#>$SZpZ^Le*Wj?6UEmPM
zraUof|1iu)5<^9eQ^bUP3(~-q$6Z{2z#GMkv&}}W?Q8kZvP%7tMlElpQysTwErX$q
zes)qL>1R`-wa(skqxh8ziv^cU0n(2>?CF}7+gI@-T=ypq?_YvE+n1ZgFfZBSso8nZ
zP7bDZVUBQH@vyHJv0iay(!Q5`vx?^c-FO+|HOx8RzBp3pD8hth9
zsgdx#EiV+?ZCVNf`Do!Nhs4Ah_Hm_$J2D6BGP$;(?CjX++39?W>EOMgagqJyiZobFG%-UL`t#`hWy~TQE!k?y
zP#Q}$l(-yvk@Kj|B?DxUo&*f!_(j0xL3AShXKI-Kn%Hdk<7;dSJJRtK9g=E)llt@}
zVCW`9O~MNcnF9R|8#5G-l}qIC2U)hQ>E3i@aT-tv2_2A?p=@nzY?#RK4?--~@N^_|
z8%8`{2$G|jJJFQqZMfW&$Y#wI%`x(@7%>BEI;T7Q$UVaN#)hGf|+haG*PWg2N@Lf_frawJ^hjdf8;lW^L%
z6&pBl9;QX0Fdl^w7KE9f2nrhQ)&(jF!q-|5VwR!F6}N#$wAEqEY~d7`$>Ft33-&qi
zi;{s|(lCjFXwJ2w^-Q18IOgofC;$W*+g+jfTL{$7WBhq((-z$1^R<)_I|^SB
zoo@*C9qsi^A_a7>zh5xLSZi@s3hat%8G(oo5`01cL0j?8EK%JKTBY^RRFU=-mah18yexo@|RRfM?dlwiA98WtCpVrzC}f!@_Wx
zRSB#>Ldanp(oeLWXpLuaAu(Q%iW`I%v$X&639$B!Fb=@5Tm>ur;t@#ms*P*`V1UIt
zkY6KFcK(h~b~*l@mHrhX8F3a*emoB0D=vrbeXd&{dRdma6NTJ?*XJ&CBX#E1M7T%mF0K_jvG(Z4)rVU;Zc5I=OuCbXIYIIs?
z5VL@md&^MI#5KPpZiR$}fCRhQ08RMB{%3j2!K;o~DcpzyS3!ADftgdd84dnDFcCN5
z7h{0oT37_s#@lx@LAxR<8iur|sJBGoyPf+)j`ywlSB90j7%x;jU(a?WJ!krg_Zso&
znJ+){XBwwr^TNZ!2M3@>>idWS`=5Ru37|E6Ers5%Da)TZs%=n5^o?79wm#62E#jm!
z;JWgq+*-p*%_B%Tp0F(X7{5KP>?7Clsv
zp0D1`je1HJviK0CuN=ICLu<=r`Eir=yOK8JgBn0pd(mh|OZp
zJC^!Qu=2=lMrd8uTHUYpOaohm>c0%%v~}sqG7^&oCKF}HR*Ab+9!&Yxl9!4>(fdPe54nX~Y83dFJJJXe_k}l`Q4Ne+^!I3TpO20;p9Xh
zqxFeldN}I&4GMW=(`!*C2V&66QE!*XNx($ATbLOydiCs==O#-@73J}XFu}o({_p5cJ|NyM%-@FI;TA)
zMDmQ>UOYE-tKfb*G8{c18?ML>^W;{Yoh~IT{pZ&r#6wUvVeTj4PS5YXTp^U2F!Pp5
z)rXz@TS=Onkg79$w!_m?<^6bd6hb*TbP%!!>-UxSuFHvMH`uYA9<`u0u&{#
zi9*RwHzHO$%R|Qp|8<>#+h~-Bv?23zZd>A4;T*1UlP&jD@(n%m(0D*TkR@P~(ad$E
zxUbuF<7H#&HrxB{)!Q;wrt(PK3n^TgT0BxhOcUEIWlkZ|#nM`+W{D@Tf@1IU
zKz@#m^uVQGlAPzK-klMVG9G`VQ(caJM+>e-3m&b0z%E_$WQKKs
zm!-!Zk|7_CgXXrc%|eIP0^(I`8aP%B3k8>p_Q6VGk425Wgr%PiTRxDYBnQjEXh#aT
zBmY&TMx5T~=}z1fv&WL-P)!cfv?5JKmM8=FUz&1>MDg#`qlTxX^276&qbEW2|1jfg
zbSMA2cOCsHS`*FeT{BEX_Ukx-j-`s#rjF*9{Dd+@6sWAl)ZoMw^$+y3Dfm(hNbK#p
z;RLe%dDQ3HD;_a?fZ{u;-fD^T-HqR~eYysm3ACDqGxU65levtP^~WWLAGNH0A!7K(
zPufd~J};^E_(QPl!~Rhnp)s
z@3p${C%eD0@k)XEN~?AEACtVBp;NCtd?u(Bx(W6Sq@fnBx5#0G(N&dft)r8afn-9WZH3x&gUeSwf{Y!Hcdqn7&Dq9_S*R#jYn+<^Es?10r
zPUc4`AHv_=U<>m5FT(?#5^}`kaaX@}(bPtG=|??vvmc~rtQp5Wkc5fO+A<-U5@HAN
z%k}7$HmvF?1$d*xLEPRvBhmv7y9BF+vMmZgFLo^|u3M6E%)`Mp)eNjJL#8(i{kJiO
zDyDv)U9WE&uFBj5%^$BEtoX?!rL(|*!wLXZq@qTlc5esbn_iL?~Q5xg=nx)Entd;~U|$F8cpu4?=ZYn4N#CnasaQR`7|u)yV|k
z8NE1fy>7JkD0YNw6(l>|6SWJi%MA>ERkWUN6u0y
zL+keol*)2fb3Z>jVu>}ls}=nlryHLhBd?{NBI!#G$O-uns)m-a9=aQ+_=(UQ86ARhU0MOfGEXMgw^l}@o?`qN;O22geXz#ei2jF<;f!PV#1^DI-CCn9
z(U?wy9aNNu3>MkrB)UkPjb@w-lApvd>w5;-EP!F@=BZNo1JQeuW@-o6siq^_v90K+
zd{)^0;!@C6els~T_v|VfrGw*G*H}JkORgY
zJ8ObrvVxf;(?yA;c^C2B1I~6r?m3_e!W<{@2+e~qW=fFZ=4=?FPK4pe!c_ZlHchH)OU){icW
zSP8V4-KRGd^BUiU5?u&LN(S%{6KGjcaOUl(2)#=nZJW>4I&T0hf~%gbR0AmlqG
zcQc|fe-aq!aQq0BfI9x+2OC@sV+wq`oL@ROuY
zv)nfmzB3ake9gc?dO32luveT$mjpWAszML8(FO?*!%mOCAbdB!&NObQ$UeGo;&cD?{=!+UBxn;8P-a
zkMsD2fue`OUdJWr=7d(ue}=ZpAo~y>=r$h0$V3&id}&g~AT?wL2F(;LX@=8slu(2-
z5#j7pKy%Je-+){DsOg_zf^`|xuU(bIei5qengwFrO3IRPpr3cKPJ_3eU@&RPFpE$$
zOUq=Q0nbRN66f$~5+8H)6S8Ke8rXzCIgR{ru>utxrF@sLO=ELTg*U?bqp@!>
zE-@NK5quU3YI5$g6KZAyKA9YzDdHTWkVw1>~quN>1k
z3u;C7M&1Z*WSOc65P3S1njZHcInqWq=kk;AI}QphM5fYns3nKOW4sDHx6p$rT$}l#l4qw(90t6jtfy=~fG`8Sf?-zyP4t%UNT}Lb1b=ImuZ2wOd?j$?SY{$7E2gQLp^ejJe-QC
z7OE7CsI?)EOITPyTG{Vm+@C@NVH`|P>(c&D9^QXHkaU4Z8sJ;Kh;j4w|6?QvW-h4C@xas&7L<6neE65@P!i47%MzlI6
z*7a8z)RDY{!gyzQgi$<&vO<@~$6ogv$#WGe~h<8nv
zp7r~j#plv&z`!Q^i7e#p#f@#n1x4+*`!?MH6fuq^=R9qB(v%nu<3UY!X4AQcdR5?lZJM{J
zK}Yf^1@8|0cEOtV8dolKpdyVm3vHuQO>n)#L;ic>xaeo@Y0VJT?jk|%;}wAYtrh`C
zdyS7ITl7WXNJ)_UMR32E{yM{~i_}YuL`)KKznVFLm4Oupx+#B2WH0CjOD2EVPlTV&
z7#K;!Zi5SmqyamcB6FntNIv}_Lz6_rVX1M2_r%it7&~eh`G|a$xx@qSSpcwUmE|XQ
zYlqh{_@Hi|usuQQ>~6eGd~hHJOC8t0Wpo?BZrdnKQFQ!EFI#Es(tPr-CU$5t`7h${
zrf9vo<$2d2PmAD