diff --git a/.github/workflows/ci-test.yml b/.github/workflows/ci-test.yml new file mode 100644 index 00000000..52195876 --- /dev/null +++ b/.github/workflows/ci-test.yml @@ -0,0 +1,166 @@ +on: [push] +name: Run Test Cases +jobs: + test: + strategy: + fail-fast: false + max-parallel: 1 + matrix: + python_version: ['2.7', '3.4', '3.5', '3.6', '3.7', '3.8', '3.9'] + runs-on: ubuntu-20.04 + steps: + - name: Checkout repo + uses: actions/checkout@v2 + with: + ref: ${{ github.ref }} + - name: Setup miniconda + uses: conda-incubator/setup-miniconda@v2 + with: + auto-update-conda: true + channels: conda-forge + python-version: ${{ matrix.python_version }} + activate-environment: qiniu-sdk + auto-activate-base: false + - name: Setup pip + shell: bash -l {0} + env: + PYTHON_VERSION: ${{ matrix.python_version }} + PIP_BOOTSTRAP_SCRIPT_PREFIX: https://bootstrap.pypa.io/pip + run: | + MAJOR=$(echo "$PYTHON_VERSION" | cut -d'.' -f1) + MINOR=$(echo "$PYTHON_VERSION" | cut -d'.' -f2) + # reinstall pip by some python(<3.7) not compatible + if ! [[ $MAJOR -ge 3 && $MINOR -ge 7 ]]; then + cd /tmp + wget -qLO get-pip.py "$PIP_BOOTSTRAP_SCRIPT_PREFIX/$MAJOR.$MINOR/get-pip.py" + python get-pip.py --user + fi + - name: Setup mock server + shell: bash -el {0} + run: | + conda create -y -n mock-server python=3.10 + conda activate mock-server + python3 --version + nohup python3 tests/mock_server/main.py --port 9000 > py-mock-server.log & + echo $! > mock-server.pid + conda deactivate + - name: Install dependencies + shell: bash -l {0} + run: | + python -m pip install --upgrade pip + python -m pip install -I -e ".[dev]" + - name: Run cases + shell: bash -el {0} + env: + QINIU_ACCESS_KEY: ${{ secrets.QINIU_ACCESS_KEY }} + QINIU_SECRET_KEY: ${{ secrets.QINIU_SECRET_KEY }} + QINIU_TEST_BUCKET: ${{ secrets.QINIU_TEST_BUCKET }} + QINIU_TEST_NO_ACC_BUCKET: ${{ secrets.QINIU_TEST_NO_ACC_BUCKET }} + QINIU_TEST_DOMAIN: ${{ secrets.QINIU_TEST_DOMAIN }} + QINIU_UPLOAD_CALLBACK_URL: ${{secrets.QINIU_UPLOAD_CALLBACK_URL}} + QINIU_TEST_ENV: "travis" + MOCK_SERVER_ADDRESS: "http://127.0.0.1:9000" + run: | + flake8 --show-source --max-line-length=160 ./qiniu + python -m pytest ./test_qiniu.py tests --cov qiniu --cov-report=xml + - name: Post Setup mock server + if: ${{ always() }} + shell: bash + run: | + set +e + cat mock-server.pid | xargs kill + rm mock-server.pid + - name: Print mock server log + if: ${{ failure() }} + run: | + cat py-mock-server.log + - name: Upload results to Codecov + uses: codecov/codecov-action@v4 + with: + token: ${{ secrets.CODECOV_TOKEN }} + test-win: + strategy: + fail-fast: false + max-parallel: 1 + matrix: + python_version: ['2.7', '3.5', '3.9'] + runs-on: windows-2019 + # make sure only one test running, + # remove this when cases could run in parallel. + needs: test + steps: + - name: Checkout repo + uses: actions/checkout@v2 + with: + ref: ${{ github.ref }} + - name: Setup miniconda + uses: conda-incubator/setup-miniconda@v2 + with: + auto-update-conda: true + channels: conda-forge + python-version: ${{ matrix.python_version }} + activate-environment: qiniu-sdk + auto-activate-base: false + - name: Setup pip + env: + PYTHON_VERSION: ${{ matrix.python_version }} + PIP_BOOTSTRAP_SCRIPT_PREFIX: https://bootstrap.pypa.io/pip + run: | + # reinstall pip by some python(<3.7) not compatible + $pyversion = [Version]"$ENV:PYTHON_VERSION" + if ($pyversion -lt [Version]"3.7") { + Invoke-WebRequest "$ENV:PIP_BOOTSTRAP_SCRIPT_PREFIX/$($pyversion.Major).$($pyversion.Minor)/get-pip.py" -OutFile "$ENV:TEMP\get-pip.py" + python $ENV:TEMP\get-pip.py --user + Remove-Item -Path "$ENV:TEMP\get-pip.py" + } + - name: Install dependencies + run: | + python -m pip install --upgrade pip + python -m pip install -I -e ".[dev]" + - name: Run cases + env: + QINIU_ACCESS_KEY: ${{ secrets.QINIU_ACCESS_KEY }} + QINIU_SECRET_KEY: ${{ secrets.QINIU_SECRET_KEY }} + QINIU_TEST_BUCKET: ${{ secrets.QINIU_TEST_BUCKET }} + QINIU_TEST_NO_ACC_BUCKET: ${{ secrets.QINIU_TEST_NO_ACC_BUCKET }} + QINIU_TEST_DOMAIN: ${{ secrets.QINIU_TEST_DOMAIN }} + QINIU_UPLOAD_CALLBACK_URL: ${{secrets.QINIU_UPLOAD_CALLBACK_URL}} + QINIU_TEST_ENV: "github" + MOCK_SERVER_ADDRESS: "http://127.0.0.1:9000" + PYTHONPATH: "$PYTHONPATH:." + run: | + Write-Host "======== Setup Mock Server =========" + conda create -y -n mock-server python=3.10 + conda activate mock-server + python --version + $processOptions = @{ + FilePath="python" + ArgumentList="tests\mock_server\main.py", "--port", "9000" + PassThru=$true + RedirectStandardOutput="py-mock-server.log" + } + $mocksrvp = Start-Process @processOptions + $mocksrvp.Id | Out-File -FilePath "mock-server.pid" + conda deactivate + Sleep 3 + Write-Host "======== Running Test =========" + python --version + python -m pytest ./test_qiniu.py tests --cov qiniu --cov-report=xml + - name: Post Setup mock server + if: ${{ always() }} + run: | + Try { + $mocksrvpid = Get-Content -Path "mock-server.pid" + Stop-Process -Id $mocksrvpid + Remove-Item -Path "mock-server.pid" + } Catch { + Write-Host -Object $_ + } + - name: Print mock server log + if: ${{ failure() }} + run: | + Get-Content -Path "py-mock-server.log" | Write-Host + - name: Upload results to Codecov + uses: codecov/codecov-action@v4 + with: + token: ${{ secrets.CODECOV_TOKEN }} diff --git a/.github/workflows/version-check.yml b/.github/workflows/version-check.yml new file mode 100644 index 00000000..a951ed06 --- /dev/null +++ b/.github/workflows/version-check.yml @@ -0,0 +1,19 @@ +name: Python SDK Version Check +on: + push: + tags: + - "v[0-9]+.[0-9]+.[0-9]+" +jobs: + linux: + name: Version Check + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v2 + - name: Set env + run: echo "RELEASE_VERSION=${GITHUB_REF#refs/*/v}" >> $GITHUB_ENV + - name: Check + run: | + set -e + grep -qF "## ${RELEASE_VERSION}" CHANGELOG.md + grep -qF "__version__ = '${RELEASE_VERSION}'" qiniu/__init__.py diff --git a/.gitignore b/.gitignore index 05a1b20a..261e665c 100644 --- a/.gitignore +++ b/.gitignore @@ -36,6 +36,7 @@ pip-log.txt .coverage .tox nosetests.xml +coverage.xml # Translations *.mo @@ -44,3 +45,5 @@ nosetests.xml .mr.developer.cfg .project .pydevproject +/.idea +/.venv* diff --git a/.scrutinizer.yml b/.scrutinizer.yml new file mode 100644 index 00000000..eb9b64b5 --- /dev/null +++ b/.scrutinizer.yml @@ -0,0 +1,10 @@ + +checks: + python: + code_rating: true + duplicate_code: true + variables_redefined_outer_name: true + +tools: + external_code_coverage: + timeout: 12000 # Timeout in seconds. diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index f8e1d6fd..00000000 --- a/.travis.yml +++ /dev/null @@ -1,16 +0,0 @@ -language: python -python: - - "2.6" - - "2.7" -install: - - "pip install coverage --use-mirrors" -before_script: - - export QINIU_ACCESS_KEY="X0XpjFmLMTJpHB_ESHjeolCtipk-1U3Ok7LVTdoN" - - export QINIU_SECRET_KEY="wenlwkU1AYwNBf7Q9cCoG4VT_GYyrHE9AS_R2u81" - - export QINIU_TEST_BUCKET="pysdk" - - export QINIU_TEST_DOMAIN="pysdk.qiniudn.com" - - export PYTHONPATH="$PYTHONPATH:." -script: - - python setup.py nosetests - - python docs/gist/demo.py - - python docs/gist/conf.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 37273436..40f8fc7b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,51 +1,247 @@ -## CHANGE LOG +# Changelog +## 7.16.0 +* 对象存储,优化并发场景的区域查询 +* CDN,查询域名带宽,支持 `data_type` 参数 + +## 7.15.0 +* 对象存储,持久化处理支持工作流模版 +* 对象存储,修复 Windows 平台兼容性问题 + +## 7.14.0 +* 对象存储,空间管理、上传文件新增备用域名重试逻辑 +* 对象存储,调整查询区域主备域名 +* 对象存储,支持空间级别加速域名开关 +* 对象存储,回调签名验证函数新增兼容 Qiniu 签名 +* 对象存储,持久化处理支持闲时任务 + +## 7.13.2(2024-05-28) +* 对象存储,修复上传回调设置自定义变量失效(v7.12.0 引入) + +## 7.13.1(2024-02-21) +* 对象存储,修复上传部分配置项的兼容 +* 对象存储,添加上传策略部分字段 + +## 7.13.0(2023-12-11) +* 对象存储,新增支持归档直读存储 +* 对象存储,批量操作支持自动查询 rs 服务域名 + +## 7.12.1(2023-11-20) +* 修复 CDN 删除域名代码问题 + +## 7.12.0(2023-10-08) +* 对象存储,分片上传支持并发上传 + +## 7.11.1(2023-08-16) +* 修复 setup.py 打包丢失部分包(v7.11.0 引入) + +## 7.11.0(2023-03-28) +* 对象存储,更新 api 默认域名 +* 对象存储,新增 api 域名的配置与获取 +* 对象存储,修复获取区域域名后无法按照预期进行过期处理 +* 对象存储,更新获取区域域名的接口 +* 对象存储,bucket_domains 修改为 list_domains 的别名 +* 对象存储,新增请求中间件逻辑,方便拓展请求逻辑 +* 对象存储,新增备用 UC 域名用于查询区域域名 -### v6.1.3 +## 7.10.0(2022-11-15) +* 对象存储,修复通过 set_default 设置 rs, rsf 不生效,而 SDK 自动获取的问题(v7.9.0) +* 对象存储,支持直接从 qiniu 导入 UploadProgressRecorder +* 对象存储,优化分片上传 ctx 超时检测 +* 文档,更新注释中文档链接 -2013-10-24 issue [#77](https://github.com/qiniu/python-sdk/pull/77) +## 7.9.0(2022-07-20) +* 对象存储,支持使用时不配置区域信息,SDK 自动获取; +* 对象存储,新增 list_domains API 用于查询空间绑定的域名 +* 对象存储,上传 API 新增支持设置自定义元数据,详情见 put_data, put_file, put_stream API +* 解决部分已知问题 -- bug fix, httplib_thunk.py 中的无效符号引用 -- PutPolicy:增加 saveKey、persistentOps/persistentNotifyUrl、fsizeLimit(文件大小限制)等支持 -- 断点续传:使用新的 mkfile 协议 +## 7.8.0(2022-06-08) +* 对象存储,管理类 API 发送请求时增加 [X-Qiniu-Date](https://developer.qiniu.com/kodo/3924/common-request-headers) (生成请求的时间) header +## 7.7.1 (2022-05-11) +* 对象存储,修复上传不制定 key 部分情况下会上传失败问题。 -### v6.1.2 +## 7.7.0 (2022-04-29) +* 对象存储,新增 set_object_lifecycle (设置 Object 生命周期) API -2013-08-01 issue [#66](https://github.com/qiniu/python-sdk/pull/66) +## 7.6.0 (2022-03-28) +* 优化了错误处理机制 +* 支持 [Qiniu](https://developer.qiniu.com/kodo/1201/access-token) 签名算法 -- 修复在Windows环境下put_file无法读取文件的bug -- 修复在Windows环境下创建临时文件的权限问题 -- 修复在Windows环境下对二进制文件计算crc32的bug +## 7.5.0 (2021-09-23) +* 上传策略新增对部分字段支持 +## 7.4.1 (2021-05-25) +* 分片上传 v2 方法不再强制要求 bucket_name 参数 -### v6.1.1 +## 7.4.0 (2021-05-21) +* 支持分片上传 v2 -2013-07-05 issue [#60](https://github.com/qiniu/python-sdk/pull/60) +## 7.3.1 (2021-01-06) +* 修复 ResponseInfo 对扩展码错误处理问题 +* 增加 python v3.7,v3.8,v3.9 版本 CI 测试 -- 整理文档 +## 7.3.0 (2020-09-23) +新增 +* sms[云短信]:新增查询短信发送记录方法:get_messages_info +* cdn: 新增上线域名 domain_online 方法、下线域名 domain_offline 方法和删除域名 delete_domain 方法 +* 对象存储:新增批量解冻build_batch_restoreAr方法、获取空间列表bucket_domain方法和修改空间访问权限change_bucket_permission方法 +## 7.2.10 (2020-08-21) +* 修复上传策略中forceSaveKey参数没有签算进上传token,导致上传失败的问题 +## 7.2.9 (2020-08-07) +* 支持指定本地ctx缓存文件.qiniu_pythonsdk_hostscache.json 文件路径 +* 更正接口返回描述docstring +* 修复接口对非json response 处理 +* ci 覆盖增加python 3.6 3.7 +* 修复获取域名列方法 +* 修复python3 环境下,二进制对象上传问题 -### v6.1.0 -2013-07-03 issue [#58](https://github.com/qiniu/python-sdk/pull/58) +## 7.2.8(2020-03-27) +* add restoreAr -- 实现最新版的上传API, - - io.PutExtra更新,废弃callback_params,bucket,和custom_meta,新增params -- 修复[#16](https://github.com/qiniu/python-sdk/issues/16) - - put接口可以传入类文件对象(file-like object) -- 修复[#52](https://github.com/qiniu/python-sdk/issues/52) +## 7.2.7(2020-03-10) +* fix bucket_info +## 7.2.6(2019-06-26) +* 添加sms -### v6.0.1 +## 7.2.5 (2019-06-06) +* 添加sms -2013-06-27 issue [#43](https://github.com/qiniu/python-sdk/pull/43) +## 7.2.4 (2019-04-01) +* 默认导入region类 -- 遵循 [sdkspec v6.0.2](https://github.com/qiniu/sdkspec/tree/v6.0.2) - - 现在,rsf.list_prefix在没有更多数据时,err 会返回 rsf.EOF +## 7.2.3 (2019-02-25) +* 新增region类,zone继承 +* 上传可以指定上传域名 +* 新增上传指定上传空间和qvm指定上传内网的例子 +* 新增列举账号空间,创建空间,查询空间信息,改变文件状态接口,并提供例子 +## 7.2.2 (2018-05-10) +* 增加连麦rtc服务端API功能 -### v6.0.0 +## 7.2.0(2017-11-23) +* 修复put_data不支持file like object的问题 +* 增加空间写错时,抛出异常提示客户的功能 +* 增加创建空间的接口功能 -2013-06-26 issue [#42](https://github.com/qiniu/python-sdk/pull/42) +## 7.1.9(2017-11-01) +* 修复python2情况下,中文文件名上传失败的问题 +* 修复python2环境下,中文文件使用分片上传时失败的问题 -- 遵循 [sdkspec v6.0.1](https://github.com/qiniu/sdkspec/tree/v6.0.1) +## 7.1.8 (2017-10-18) +* 恢复kirk的API为原来的状态 + +## 7.1.7 (2017-09-27) + +* 修复从时间戳获取rfc http格式的时间字符串问题 + +## 7.1.6 (2017-09-26) + +* 给 `put_file` 功能增加保持本地文件Last Modified功能,以支持切换源站的客户CDN不回源 + +## 7.1.5 (2017-08-26) + +* 设置表单上传默认校验crc32 +* 增加PutPolicy新参数isPrefixalScope +* 修复手动指定的zone无效的问题 + +## 7.1.4 (2017-06-05) +### 修正 +* cdn功能中获取域名日志列表的参数错误 + +## 7.1.2 (2017-03-24) +### 增加 +* 增加设置文件生命周期的接口 + +## 7.1.1 (2017-02-03) +### 增加 +* 增加cdn刷新,预取,日志获取,时间戳防盗链生成功能 + +### 修正 +* 修复分片上传的upload record path遇到中文时的问题,现在使用md5来计算文件名 + +## 7.1.0 (2016-12-08) +### 增加 +* 通用计算支持 + +## 7.0.10 (2016-11-29) +### 修正 +* 去掉homedir + +## 7.0.9 (2016-10-09) +### 增加 +* 多机房接口调用支持 + +## 7.0.8 (2016-07-05) +### 修正 +* 修复表单上传大于20M文件的400错误 + +### 增加 +* copy 和 move 操作增加 force 字段,允许强制覆盖 copy 和 move +* 增加上传策略 deleteAfterDays 字段 +* 一些 demo + +## 7.0.7 (2016-05-05) +### 修正 +* 修复大于4M的文件hash计算错误的问题 +* add fname + +### 增加 +* 一些demo +* travis 直接发布 + +## 7.0.6 (2015-12-05) +### 修正 +* 2.x unicode 问题 by @hunter007 +* 上传重试判断 +* 上传时 dns劫持处理 + +### 增加 +* fsizeMin 上传策略 +* 断点上传记录 by @hokein +* 计算stream etag +* 3.5 ci 支持 + +## 7.0.5 (2015-06-25) +### 变更 +* 配置up_host 改为配置zone + +### 增加 +* fectch 支持不指定key + +## 7.0.4 (2015-05-04) +### 修正 +* 上传重试为空文件 +* 回调应该只对form data 签名。 + +## 7.0.3 (2015-03-11) +### 增加 +* 可以配置 io/rs/api/rsf host + +## 7.0.2 (2014-12-24) +### 修正 +* 内部http get当没有auth会出错 +* python3下的qiniupy 没有参数时 arg parse会抛异常 +* 增加callback policy + +## 7.0.1 (2014-11-26) +### 增加 +* setup.py从文件中读取版本号,而不是用导入方式 +* 补充及修正了一些单元测试 + +## 7.0.0 (2014-11-13) + +### 增加 +* 简化上传接口 +* 自动选择断点续上传还是直传 +* 重构代码,接口和内部结构更清晰 +* 同时支持python 2.x 和 3.x +* 支持pfop +* 支持verify callback +* 改变mime +* 代码覆盖度报告 +* policy改为dict, 便于灵活增加,并加入过期字段检查 +* 文件列表支持目录形式 diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 00000000..a4b79902 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,30 @@ +# 贡献代码指南 + +我们非常欢迎大家来贡献代码,我们会向贡献者致以最诚挚的敬意。 + +一般可以通过在Github上提交[Pull Request](https://github.com/qiniu/python-sdk)来贡献代码。 + +## Pull Request要求 + +- **代码规范** 遵从pep8,pythonic。 + +- **代码格式** 提交前 请按 pep8 进行格式化。 + +- **必须添加测试!** - 如果没有测试(单元测试、集成测试都可以),那么提交的补丁是不会通过的。 + +- **记得更新文档** - 保证`README.md`以及其他相关文档及时更新,和代码的变更保持一致性。 + +- **考虑我们的发布周期** - 我们的版本号会服从[SemVer v2.0.0](http://semver.org/),我们绝对不会随意变更对外的API。 + +- **创建feature分支** - 最好不要从你的master分支提交 pull request。 + +- **一个feature提交一个pull请求** - 如果你的代码变更了多个操作,那就提交多个pull请求吧。 + +- **清晰的commit历史** - 保证你的pull请求的每次commit操作都是有意义的。如果你开发中需要执行多次的即时commit操作,那么请把它们放到一起再提交pull请求。 + +## 运行测试 + +``` bash +py.test + +``` diff --git a/LICENSE b/LICENSE new file mode 100644 index 00000000..ba646be9 --- /dev/null +++ b/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2014 Qiniu, Ltd. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/README.md b/README.md index 6c95d9e1..d5291e97 100644 --- a/README.md +++ b/README.md @@ -1,43 +1,81 @@ -Qiniu Resource Storage SDK for Python -=== +# Qiniu Cloud SDK for Python -[![Build Status](https://api.travis-ci.org/qiniu/python-sdk.png?branch=develop)](https://travis-ci.org/qiniu/python-sdk) +[![@qiniu on weibo](http://img.shields.io/badge/weibo-%40qiniutek-blue.svg)](http://weibo.com/qiniutek) +[![Software License](https://img.shields.io/badge/license-MIT-brightgreen.svg)](LICENSE) +[![Build Status](https://github.com/qiniu/python-sdk/actions/workflows/ci-test.yml/badge.svg)](https://travis-ci.org/qiniu/python-sdk) +[![GitHub release](https://img.shields.io/github/v/tag/qiniu/python-sdk.svg?label=release)](https://github.com/qiniu/python-sdk/releases) +[![Latest Stable Version](https://img.shields.io/pypi/v/qiniu.svg)](https://pypi.python.org/pypi/qiniu) +[![Download Times](https://img.shields.io/pypi/dm/qiniu.svg)](https://pypi.python.org/pypi/qiniu) +[![Scrutinizer Code Quality](https://scrutinizer-ci.com/g/qiniu/python-sdk/badges/quality-score.png?b=master)](https://scrutinizer-ci.com/g/qiniu/python-sdk/?branch=master) +[![Coverage Status](https://codecov.io/gh/qiniu/python-sdk/branch/master/graph/badge.svg)](https://codecov.io/gh/qiniu/python-sdk) -[![Qiniu Logo](http://qiniutek.com/images/logo-2.png)](http://qiniu.com/) +## 安装 -## 使用 +通过pip -参考文档:[七牛云存储 Python SDK 使用指南](https://github.com/qiniu/python-sdk/blob/develop/docs/README.md) +```bash +$ pip install qiniu +``` -## 单元测试 +## 运行环境 -1. 测试环境 +| Qiniu SDK版本 | Python 版本 | +| :-----------: | :------------------------------------: | +| 7.x | 2.7, 3.3, 3.4, 3.5, 3.6, 3.7, 3.8, 3.9 | +| 6.x | 2.7 | - 1. [开通七牛开发者帐号](https://portal.qiniu.com/signup) - 2. [登录七牛开发者自助平台,查看 Access Key 和 Secret Key](https://portal.qiniu.com/setting/key) 。 - 3. 在开发者后台新建一个空间 +## 使用方法 - 然后将在`test-env.sh`中填入相关信息。 +### 上传 +```python +import qiniu -2. 需安装[nosetests](https://nose.readthedocs.org/en/latest/)测试工具。 +... + q = qiniu.Auth(access_key, secret_key) + key = 'hello' + data = 'hello qiniu!' + token = q.upload_token(bucket_name) + ret, info = qiniu.put_data(token, key, data) + if ret is not None: + print('All is OK') + else: + print(info) # error message in info +... -运行测试: +``` +更多参见SDK使用指南: https://developer.qiniu.com/kodo/sdk/python +``` - source test-env.sh - nosetests +## 测试 -## 贡献代码 +``` bash +$ py.test +``` -1. Fork -2. 创建您的特性分支 (`git checkout -b my-new-feature`) -3. 提交您的改动 (`git commit -am 'Added some feature'`) -4. 将您的修改记录提交到远程 `git` 仓库 (`git push origin my-new-feature`) -5. 然后到 github 网站的该 `git` 远程仓库的 `my-new-feature` 分支下发起 Pull Request +## 常见问题 -## 许可证 +- 第二个参数info保留了请求响应的信息,失败情况下ret 为none, 将info可以打印出来,提交给我们。 +- API 的使用 demo 可以参考 [examples示例](https://github.com/qiniu/python-sdk/tree/master/examples)。 +- 如果碰到`ImportError: No module named requests.auth` 请安装 `requests` 。 -Copyright (c) 2013 qiniu.com +## 代码贡献 -基于 MIT 协议发布: +详情参考[代码提交指南](https://github.com/qiniu/python-sdk/blob/master/CONTRIBUTING.md)。 -* [www.opensource.org/licenses/MIT](http://www.opensource.org/licenses/MIT) +## 贡献记录 + +- [所有贡献者](https://github.com/qiniu/python-sdk/contributors) + +## 联系我们 + +- 如果需要帮助,请提交工单(在portal右侧点击咨询和建议提交工单,或者直接向 support@qiniu.com 发送邮件) +- 如果有什么问题,可以到问答社区提问,[问答社区](http://qiniu.segmentfault.com/) +- 更详细的文档,见[官方文档站](http://developer.qiniu.com/) +- 如果发现了bug, 欢迎提交 [issue](https://github.com/qiniu/python-sdk/issues) +- 如果有功能需求,欢迎提交 [issue](https://github.com/qiniu/python-sdk/issues) +- 如果要提交代码,欢迎提交 pull request +- 欢迎关注我们的[微信](http://www.qiniu.com/#weixin) [微博](http://weibo.com/qiniutek),及时获取动态信息。 + +## 代码许可 + +The MIT License (MIT).详情见 [License文件](https://github.com/qiniu/python-sdk/blob/master/LICENSE). diff --git a/codecov.yml b/codecov.yml new file mode 100644 index 00000000..0aab28d3 --- /dev/null +++ b/codecov.yml @@ -0,0 +1,28 @@ +codecov: + ci: + - prow.qiniu.io # prow need this. seems useless + require_ci_to_pass: no # `no` means the bot will comment on the PR even before all ci passed + +github_checks: # close github checks + annotations: false + +comment: + layout: "reach, diff, flags, files" + behavior: new # `new` means the bot will comment a new message instead of edit the old one + require_changes: false # if true: only post the comment if coverage changes + require_base: no # [yes :: must have a base report to post] + require_head: yes # [yes :: must have a head report to post] + branches: # branch names that can post comment + - "master" + +coverage: + status: # check coverage status to pass or fail + patch: off + project: # project analyze all code in the project + default: + # basic + target: 73.5% # the minimum coverage ratio that the commit must meet + threshold: 3% # allow the coverage to drop + base: auto + if_not_found: success + if_ci_failed: error diff --git a/docs/Makefile b/docs/Makefile deleted file mode 100644 index 1ef535fd..00000000 --- a/docs/Makefile +++ /dev/null @@ -1,3 +0,0 @@ -all: - gist README.gist.md > README.md - diff --git a/docs/README.gist.md b/docs/README.gist.md deleted file mode 100644 index 0e148bea..00000000 --- a/docs/README.gist.md +++ /dev/null @@ -1,499 +0,0 @@ ---- -title: Python SDK 使用指南 ---- - -此 Python SDK 适用于2.x版本,基于 [七牛云存储官方API](http://docs.qiniu.com/) 构建。使用此 SDK 构建您的网络应用程序,能让您以非常便捷地方式将数据安全地存储到七牛云存储上。无论您的网络应用是一个网站程序,还是包括从云端(服务端程序)到终端(手持设备应用)的架构的服务或应用,通过七牛云存储及其 SDK,都能让您应用程序的终端用户高速上传和下载,同时也让您的服务端更加轻盈。 - -SDK 下载地址: - -**文档大纲** - -- [概述](#overview) -- [准备开发环境](#prepare) - - [安装](#install) - - [ACCESS_KEY 和 SECRET_KEY](#appkey) -- [使用SDK](#sdk-usage) - - [初始化环境](#init) - - [上传文件](#io-put) - - [上传流程](#io-put-flow) - - [上传策略](#io-put-policy) - - [上传凭证](#upload-token) - - [PutExtra](#put-extra) - - [上传文件](#upload-do) - - [断点续上传、分块并行上传](#resumable-io-put) - - [下载文件](#io-get) - - [下载公有文件](#io-get-public) - - [下载私有文件](#io-get-private) - - [断点续下载](#resumable-io-get) - - [资源操作](#rs) - - [获取文件信息](#rs-stat) - - [复制文件](#rs-copy) - - [移动文件](#rs-move) - - [删除文件](#rs-delete) - - [批量操作](#rs-batch) - - [批量获取文件信息](#batch-stat) - - [批量复制文件](#batch-copy) - - [批量移动文件](#batch-move) - - [批量删除文件](#batch-delete) - - [高级管理操作](#rsf) - - [列出文件](#list-prefix) - - [云处理](#fop) - - [图像](#fop-image) - - [查看图像属性](#fop-image-info) - - [查看图片EXIF信息](#fop-exif) - - [生成图片预览](#fop-image-view) -- [贡献代码](#contribution) -- [许可证](#license) - - - -## 概述 - -七牛云存储的 Python 语言版本 SDK(本文以下称 Python-SDK)是对七牛云存储API协议的一层封装,以提供一套对于 Python 开发者而言简单易用的开发工具。Python 开发者在对接 Python-SDK 时无需理解七牛云存储 API 协议的细节,原则上也不需要对 HTTP 协议和原理做非常深入的了解,但如果拥有基础的 HTTP 知识,对于出错场景的处理可以更加高效。 - -Python-SDK 被设计为同时适合服务器端和客户端使用。服务端是指开发者自己的业务服务器,客户端是指开发者提供给终端用户的软件,通常运行在 Windows/Mac/Linux 这样的桌面平台上。服务端因为有七牛颁发的 AccessKey/SecretKey,可以做很多客户端做不了的事情,比如删除文件、移动/复制文件等操作。一般而言,客服端操作文件需要获得服务端的授权。客户端上传文件需要获得服务端颁发的 [uptoken(上传授权凭证)](http://docs.qiniu.com/api/put.html#uploadToken),客户端下载文件(包括下载处理过的文件,比如下载图片的缩略图)需要获得服务端颁发的 [dntoken(下载授权凭证)](http://docs.qiniu.com/api/get.html#download-token)。但开发者也可以将 bucket 设置为公开,此时文件有永久有效的访问地址,不需要业务服务器的授权,这对网站的静态文件(如图片、js、css、html)托管非常方便。 - -从 v5.0.0 版本开始,我们对 SDK 的内容进行了精简。所有管理操作,比如:创建/删除 bucket、为 bucket 绑定域名(publish)、设置数据处理的样式分隔符(fop seperator)、新增数据处理样式(fop style)等都去除了,统一建议到[开发者后台](https://portal.qiniu.com/)来完成。另外,此前服务端还有自己独有的上传 API,现在也推荐统一成基于客户端上传的工作方式。 - -从内容上来说,Python-SDK 主要包含如下几方面的内容: - -* 公共部分,所有用况下都用到:qiniu/rpc.py, qiniu/httplib_chunk.py -* 客户端上传文件:qiniu/io.py -* 客户端断点续上传:qiniu/resumable_io.py -* 数据处理:qiniu/fop.py -* 服务端操作:qiniu/auth/digest.py, qiniu/auth/up.py (授权), qiniu/rs/rs.py, qiniu/rs/rs_token.py (资源操作, uptoken/dntoken颁发) - - - - - -## 准备开发环境 - - - - -### 安装 - -直接安装: - - pip install qiniu - #或 - easy_install qiniu - -Python-SDK可以使用`pip`或`easy_install`从PyPI服务器上安装,但不包括文档和样例。如果需要,请下载源码并安装。 - -源码安装: - -从[Python-SDK下载地址](https://github.com/qiniu/python-sdk/releases)下载源码: - - tar xvzf python-sdk-$VERSION.tar.gz - cd python-sdk-$VERSION - python setup.py install - - - - -### ACCESS_KEY 和 SECRET_KEY - -在使用SDK 前,您需要拥有一对有效的 AccessKey 和 SecretKey 用来进行签名授权。 - -可以通过如下步骤获得: - -1. [开通七牛开发者帐号](https://portal.qiniu.com/signup) -2. [登录七牛开发者自助平台,查看 Access Key 和 Secret Key](https://portal.qiniu.com/setting/key) 。 - - - -## 使用SDK - - - -### 初始化环境 - -在获取到 Access Key 和 Secret Key 之后,您可以在您的程序中调用如下两行代码进行初始化对接, 要确保`ACCESS_KEY` 和 `SECRET_KEY` 在调用所有七牛API服务之前均已赋值: - -```{python} -@gist(gist/conf.py#config) -``` - - - -### 上传文件 - -为了尽可能地改善终端用户的上传体验,七牛云存储首创了客户端直传功能。一般云存储的上传流程是: - - 客户端(终端用户) => 业务服务器 => 云存储服务 - -这样多了一次上传的流程,和本地存储相比,会相对慢一些。但七牛引入了客户端直传,将整个上传过程调整为: - - 客户端(终端用户) => 七牛 => 业务服务器 - -客户端(终端用户)直接上传到七牛的服务器,通过DNS智能解析,七牛会选择到离终端用户最近的ISP服务商节点,速度会比本地存储快很多。文件上传成功以后,七牛的服务器使用回调功能,只需要将非常少的数据(比如Key)传给应用服务器,应用服务器进行保存即可。 - - - -#### 上传流程 - -在七牛云存储中,整个上传流程大体分为这样几步: - -1. 业务服务器颁发 [uptoken(上传授权凭证)](http://docs.qiniu.com/api/put.html#uploadToken)给客户端(终端用户) -2. 客户端凭借 [uptoken](http://docs.qiniu.com/api/put.html#uploadToken) 上传文件到七牛 -3. 在七牛获得完整数据后,发起一个 HTTP 请求回调到业务服务器 -4. 业务服务器保存相关信息,并返回一些信息给七牛 -5. 七牛原封不动地将这些信息转发给客户端(终端用户) - -需要注意的是,回调到业务服务器的过程是可选的,它取决于业务服务器颁发的 [uptoken](http://docs.qiniu.com/api/put.html#uploadToken)。如果没有回调,七牛会返回一些标准的信息(比如文件的 hash)给客户端。如果上传发生在业务服务器,以上流程可以自然简化为: - -1. 业务服务器生成 uptoken(不设置回调,自己回调到自己这里没有意义) -2. 凭借 [uptoken](http://docs.qiniu.com/api/put.html#uploadToken) 上传文件到七牛 -3. 善后工作,比如保存相关的一些信息 - - - -##### 上传策略 - -[uptoken](http://docs.qiniu.com/api/put.html#uploadToken) 实际上是用 AccessKey/SecretKey 进行数字签名的上传策略(`qiniu.rs.PutPolicy`),它控制则整个上传流程的行为。让我们快速过一遍你都能够决策啥: - -```{python} -@gist(../qiniu/rs/rs_token.py#PutPolicy) -``` - -* `scope` 限定客户端的权限。如果 `scope` 是 bucket,则客户端只能新增文件到指定的 bucket,不能修改文件。如果 `scope` 为 bucket:key,则客户端可以修改指定的文件。**注意: key必须采用utf8编码,如使用非utf8编码访问七牛云存储将反馈错误** -* `callbackUrl` 设定业务服务器的回调地址,这样业务服务器才能感知到上传行为的发生。 -* `callbackBody` 设定业务服务器的回调信息。文件上传成功后,七牛向业务服务器的callbackUrl发送的POST请求携带的数据。支持 [魔法变量](http://docs.qiniu.com/api/put.html#MagicVariables) 和 [自定义变量](http://docs.qiniu.com/api/put.html#xVariables)。 -* `returnUrl` 设置用于浏览器端文件上传成功后,浏览器执行301跳转的URL,一般为 HTML Form 上传时使用。文件上传成功后浏览器会自动跳转到 `returnUrl?upload_ret=returnBody`。 -* `returnBody` 可调整返回给客户端的数据包,支持 [魔法变量](http://docs.qiniu.com/api/put.html#MagicVariables) 和 [自定义变量](http://docs.qiniu.com/api/put.html#xVariables)。`returnBody` 只在没有 `callbackUrl` 时有效(否则直接返回 `callbackUrl` 返回的结果)。不同情形下默认返回的 `returnBody` 并不相同。在一般情况下返回的是文件内容的 `hash`,也就是下载该文件时的 `etag`;但指定 `returnUrl` 时默认的 `returnBody` 会带上更多的信息。 -* `asyncOps` 可指定上传完成后,需要自动执行哪些数据处理。这是因为有些数据处理操作(比如音视频转码)比较慢,如果不进行预转可能第一次访问的时候效果不理想,预转可以很大程度改善这一点。 - -关于上传策略更完整的说明,请参考 [uptoken](http://docs.qiniu.com/api/put.html#uploadToken)。 - - - -##### 上传凭证 - -服务端生成 [uptoken](http://docs.qiniu.com/api/put.html#uploadToken) 代码如下: - -```{python} -@gist(gist/conf.py#config) - -@gist(gist/demo.py#import_rs) - -@gist(gist/demo.py#uptoken) -``` - - - -##### PutExtra - -PutExtra是上传时的可选信息,默认为None - -```{python} -@gist(../qiniu/io.py#PutExtra) -``` - -* `params` 是一个字典。[自定义变量](http://docs.qiniu.com/api/put.html#xVariables),key必须以 x: 开头命名,不限个数。可以在 uploadToken 的 callbackBody 选项中求值。 -* `mime_type` 表示数据的MimeType,当不指定时七牛服务器会自动检测。 -* `crc32` 待检查的crc32值 -* `check_crc` 可选值为0, 1, 2。 - `check_crc == 0`: 表示不进行 crc32 校验。 - `check_crc == 1`: 上传二进制数据时等同于 `check_crc=2`;上传本地文件时会自动计算 crc32 值。 - `check_crc == 2`: 表示进行 crc32 校验,且 crc32 值就是上面的 `crc32` 变量 - - - -##### 上传文件 - -上传文件到七牛(通常是客户端完成,但也可以发生在服务端): - -直接上传二进制流 - -```{python} -@gist(gist/conf.py#config) - -@gist(gist/demo.py#import_io) - -@gist(gist/demo.py#put) -``` - -上传本地文件 - -```{python} -@gist(gist/conf.py#config) - -@gist(gist/demo.py#import_io) - -@gist(gist/demo.py#put_file) -``` - -ret是一个字典,含有`hash`,`key`等信息。 - - - -##### 断点续上传、分块并行上传 - -除了基本的上传外,七牛还支持你将文件切成若干块(除最后一块外,每个块固定为4M大小),每个块可独立上传,互不干扰;每个分块块内则能够做到断点上续传。 - -我们来看支持了断点上续传、分块并行上传的基本样例: - -上传二进制流 - -```{python} -@gist(gist/conf.py#config) - -@gist(gist/demo.py#import_resumable_io) - -@gist(gist/demo.py#resumable_put) -``` - -上传本地文件 - -```{python} -@gist(gist/conf.py#config) - -@gist(gist/demo.py#import_resumable_io) - -@gist(gist/demo.py#resumable_put_file) -``` - - - -### 下载文件 - - - -#### 下载公有文件 - -每个 bucket 都会绑定一个或多个域名(domain)。如果这个 bucket 是公开的,那么该 bucket 中的所有文件可以通过一个公开的下载 url 可以访问到: - - http:/// - -其中\是bucket所对应的域名。七牛云存储为每一个bucket提供一个默认域名。默认域名可以到[七牛云存储开发者平台](https://portal.qiniu.com/)中,空间设置的域名设置一节查询。 - -假设某个 bucket 既绑定了七牛的二级域名,如 hello.qiniudn.com,也绑定了自定义域名(需要备案),如 hello.com。那么该 bucket 中 key 为 a/b/c.htm 的文件可以通过 http://hello.qiniudn.com/a/b/c.htm 或 http://hello.com/a/b/c.htm 中任意一个 url 进行访问。 - -**注意: key必须采用utf8编码,如使用非utf8编码访问七牛云存储将反馈错误** - - - -#### 下载私有文件 - -如果某个 bucket 是私有的,那么这个 bucket 中的所有文件只能通过一个的临时有效的 downloadUrl 访问: - - http:///?e=&token= - -其中 dntoken 是由业务服务器签发的一个[临时下载授权凭证](http://docs.qiniu.com/api/get.html#download-token),deadline 是 dntoken 的有效期。dntoken不需要单独生成,SDK 提供了生成完整 downloadUrl 的方法(包含了 dntoken),示例代码如下: - -```{python} -@gist(gist/conf.py#config) - -@gist(gist/demo.py#import_rs) - -@gist(gist/demo.py#dntoken) -``` - -生成 downloadUrl 后,服务端下发 downloadUrl 给客户端。客户端收到 downloadUrl 后,和公有资源类似,直接用任意的 HTTP 客户端就可以下载该资源了。唯一需要注意的是,在 downloadUrl 失效却还没有完成下载时,需要重新向服务器申请授权。 - -无论公有资源还是私有资源,下载过程中客户端并不需要七牛 SDK 参与其中。 - - - -#### 断点续下载 - -无论是公有资源还是私有资源,获得的下载 url 支持标准的 HTTP 断点续传协议。考虑到多数语言都有相应的断点续下载支持的成熟方法,七牛 Python-SDK 并不提供断点续下载相关代码。 - - - -### 资源操作 - - - - -#### 获取文件信息 - -```{python} -@gist(gist/conf.py#config) - -@gist(gist/demo.py#import_rs) - -@gist(gist/demo.py#stat) -``` - - - -#### 复制文件 - -```{python} -@gist(gist/conf.py#config) - -@gist(gist/demo.py#import_rs) - -@gist(gist/demo.py#copy) -``` - - - -#### 移动文件 - -```{python} -@gist(gist/conf.py#config) - -@gist(gist/demo.py#import_rs) - -@gist(gist/demo.py#move) -``` - - - -#### 删除文件 - -```{python} -@gist(gist/conf.py#config) - -@gist(gist/demo.py#import_rs) - -@gist(gist/demo.py#delete) -``` - - - -#### 批量操作 - -当您需要一次性进行多个操作时, 可以使用批量操作。 - - - -##### 批量获取文件信息 - -```{python} -@gist(gist/conf.py#config) - -@gist(gist/demo.py#import_rs) - -@gist(gist/demo.py#batch_path) - -@gist(gist/demo.py#batch_stat) -``` - - -##### 批量复制文件 - -```{python} -@gist(gist/conf.py#config) - -@gist(gist/demo.py#import_rs) - -@gist(gist/demo.py#batch_path) - -@gist(gist/demo.py#batch_copy) -``` - - -##### 批量移动文件 - -```{python} -@gist(gist/conf.py#config) - -@gist(gist/demo.py#import_rs) - -@gist(gist/demo.py#batch_path) - -@gist(gist/demo.py#batch_move) -``` - - -##### 批量删除文件 - -```{python} -@gist(gist/conf.py#config) - -@gist(gist/demo.py#import_rs) - -@gist(gist/demo.py#batch_path) - -@gist(gist/demo.py#batch_delete) -``` - - - -### 高级管理操作 - - -#### 列出文件 - -请求某个存储空间(bucket)下的文件列表,如果有前缀,可以按前缀(prefix)进行过滤;如果前一次返回marker就表示还有资源,下一步请求需要将marker参数填上。 - -```{python} -@gist(gist/conf.py#config) - -@gist(gist/demo.py#import_rsf) - -@gist(gist/demo.py#list_prefix) -``` - -一个典型的对整个bucket遍历的操作为: - -```{python} -@gist(gist/demo.py#list_all) -``` - - -### 云处理 - - -#### 图像 - - -##### 查看图像属性 - -```{python} -@gist(gist/conf.py#config) - -@gist(gist/demo.py#import_fop) -@gist(gist/demo.py#import_rs) - -@gist(gist/demo.py#image_info) -``` - - -##### 查看图片EXIF信息 - -```{python} -@gist(gist/conf.py#config) - -@gist(gist/demo.py#import_fop) -@gist(gist/demo.py#import_rs) - -@gist(gist/demo.py#exif) -``` - - - -##### 生成图片预览 - -```{python} -@gist(gist/conf.py#config) - -@gist(gist/demo.py#import_fop) -@gist(gist/demo.py#import_rs) - -@gist(gist/demo.py#image_view) -``` - - -## 贡献代码 - -+ Fork -+ 创建您的特性分支 (git checkout -b my-new-feature) -+ 提交您的改动 (git commit -am 'Added some feature') -+ 将您的修改记录提交到远程 git 仓库 (git push origin my-new-feature) -+ 然后到 github 网站的该 git 远程仓库的 my-new-feature 分支下发起 Pull Request - - -## 许可证 - -> Copyright (c) 2013 qiniu.com - -基于 MIT 协议发布: - -> [www.opensource.org/licenses/MIT](http://www.opensource.org/licenses/MIT) - diff --git a/docs/README.md b/docs/README.md deleted file mode 100644 index b27d0d9a..00000000 --- a/docs/README.md +++ /dev/null @@ -1,695 +0,0 @@ ---- -title: Python SDK 使用指南 ---- - -# Python SDK 使用指南 - -此 Python SDK 适用于2.x版本,基于 [七牛云存储官方API](http://docs.qiniu.com/) 构建。使用此 SDK 构建您的网络应用程序,能让您以非常便捷地方式将数据安全地存储到七牛云存储上。无论您的网络应用是一个网站程序,还是包括从云端(服务端程序)到终端(手持设备应用)的架构的服务或应用,通过七牛云存储及其 SDK,都能让您应用程序的终端用户高速上传和下载,同时也让您的服务端更加轻盈。 - -SDK 下载地址: - -**文档大纲** - -- [概述](#overview) -- [准备开发环境](#prepare) - - [安装](#install) - - [ACCESS_KEY 和 SECRET_KEY](#appkey) -- [使用SDK](#sdk-usage) - - [初始化环境](#init) - - [上传文件](#io-put) - - [上传流程](#io-put-flow) - - [上传策略](#io-put-policy) - - [上传凭证](#upload-token) - - [PutExtra](#put-extra) - - [上传文件](#upload-do) - - [断点续上传、分块并行上传](#resumable-io-put) - - [下载文件](#io-get) - - [下载公有文件](#io-get-public) - - [下载私有文件](#io-get-private) - - [断点续下载](#resumable-io-get) - - [资源操作](#rs) - - [获取文件信息](#rs-stat) - - [复制文件](#rs-copy) - - [移动文件](#rs-move) - - [删除文件](#rs-delete) - - [批量操作](#rs-batch) - - [批量获取文件信息](#batch-stat) - - [批量复制文件](#batch-copy) - - [批量移动文件](#batch-move) - - [批量删除文件](#batch-delete) - - [高级管理操作](#rsf) - - [列出文件](#list-prefix) - - [云处理](#fop) - - [图像](#fop-image) - - [查看图像属性](#fop-image-info) - - [查看图片EXIF信息](#fop-exif) - - [生成图片预览](#fop-image-view) -- [贡献代码](#contribution) -- [许可证](#license) - - - -## 概述 - -七牛云存储的 Python 语言版本 SDK(本文以下称 Python-SDK)是对七牛云存储API协议的一层封装,以提供一套对于 Python 开发者而言简单易用的开发工具。Python 开发者在对接 Python-SDK 时无需理解七牛云存储 API 协议的细节,原则上也不需要对 HTTP 协议和原理做非常深入的了解,但如果拥有基础的 HTTP 知识,对于出错场景的处理可以更加高效。 - -Python-SDK 被设计为同时适合服务器端和客户端使用。服务端是指开发者自己的业务服务器,客户端是指开发者提供给终端用户的软件,通常运行在 Windows/Mac/Linux 这样的桌面平台上。服务端因为有七牛颁发的 AccessKey/SecretKey,可以做很多客户端做不了的事情,比如删除文件、移动/复制文件等操作。一般而言,客服端操作文件需要获得服务端的授权。客户端上传文件需要获得服务端颁发的 [uptoken(上传授权凭证)](http://docs.qiniu.com/api/put.html#uploadToken),客户端下载文件(包括下载处理过的文件,比如下载图片的缩略图)需要获得服务端颁发的 [dntoken(下载授权凭证)](http://docs.qiniu.com/api/get.html#download-token)。但开发者也可以将 bucket 设置为公开,此时文件有永久有效的访问地址,不需要业务服务器的授权,这对网站的静态文件(如图片、js、css、html)托管非常方便。 - -从 v5.0.0 版本开始,我们对 SDK 的内容进行了精简。所有管理操作,比如:创建/删除 bucket、为 bucket 绑定域名(publish)、设置数据处理的样式分隔符(fop seperator)、新增数据处理样式(fop style)等都去除了,统一建议到[开发者后台](https://portal.qiniu.com/)来完成。另外,此前服务端还有自己独有的上传 API,现在也推荐统一成基于客户端上传的工作方式。 - -从内容上来说,Python-SDK 主要包含如下几方面的内容: - -* 公共部分,所有用况下都用到:qiniu/rpc.py, qiniu/httplib_chunk.py -* 客户端上传文件:qiniu/io.py -* 客户端断点续上传:qiniu/resumable_io.py -* 数据处理:qiniu/fop.py -* 服务端操作:qiniu/auth/digest.py, qiniu/auth/up.py (授权), qiniu/rs/rs.py, qiniu/rs/rs_token.py (资源操作, uptoken/dntoken颁发) - - - - - -## 准备开发环境 - - - - -### 安装 - -直接安装: - - pip install qiniu - #或 - easy_install qiniu - -Python-SDK可以使用`pip`或`easy_install`从PyPI服务器上安装,但不包括文档和样例。如果需要,请下载源码并安装。 - -源码安装: - -从[Python-SDK下载地址](https://github.com/qiniu/python-sdk/releases)下载源码: - - tar xvzf python-sdk-$VERSION.tar.gz - cd python-sdk-$VERSION - python setup.py install - - - - -### ACCESS_KEY 和 SECRET_KEY - -在使用SDK 前,您需要拥有一对有效的 AccessKey 和 SecretKey 用来进行签名授权。 - -可以通过如下步骤获得: - -1. [开通七牛开发者帐号](https://portal.qiniu.com/signup) -2. [登录七牛开发者自助平台,查看 Access Key 和 Secret Key](https://portal.qiniu.com/setting/key) 。 - - - -## 使用SDK - - - -### 初始化环境 - -在获取到 Access Key 和 Secret Key 之后,您可以在您的程序中调用如下两行代码进行初始化对接, 要确保`ACCESS_KEY` 和 `SECRET_KEY` 在调用所有七牛API服务之前均已赋值: - -```{python} -import qiniu.conf - -qiniu.conf.ACCESS_KEY = "" -qiniu.conf.SECRET_KEY = "" -``` - - - -### 上传文件 - -为了尽可能地改善终端用户的上传体验,七牛云存储首创了客户端直传功能。一般云存储的上传流程是: - - 客户端(终端用户) => 业务服务器 => 云存储服务 - -这样多了一次上传的流程,和本地存储相比,会相对慢一些。但七牛引入了客户端直传,将整个上传过程调整为: - - 客户端(终端用户) => 七牛 => 业务服务器 - -客户端(终端用户)直接上传到七牛的服务器,通过DNS智能解析,七牛会选择到离终端用户最近的ISP服务商节点,速度会比本地存储快很多。文件上传成功以后,七牛的服务器使用回调功能,只需要将非常少的数据(比如Key)传给应用服务器,应用服务器进行保存即可。 - - - -#### 上传流程 - -在七牛云存储中,整个上传流程大体分为这样几步: - -1. 业务服务器颁发 [uptoken(上传授权凭证)](http://docs.qiniu.com/api/put.html#uploadToken)给客户端(终端用户) -2. 客户端凭借 [uptoken](http://docs.qiniu.com/api/put.html#uploadToken) 上传文件到七牛 -3. 在七牛获得完整数据后,发起一个 HTTP 请求回调到业务服务器 -4. 业务服务器保存相关信息,并返回一些信息给七牛 -5. 七牛原封不动地将这些信息转发给客户端(终端用户) - -需要注意的是,回调到业务服务器的过程是可选的,它取决于业务服务器颁发的 [uptoken](http://docs.qiniu.com/api/put.html#uploadToken)。如果没有回调,七牛会返回一些标准的信息(比如文件的 hash)给客户端。如果上传发生在业务服务器,以上流程可以自然简化为: - -1. 业务服务器生成 uptoken(不设置回调,自己回调到自己这里没有意义) -2. 凭借 [uptoken](http://docs.qiniu.com/api/put.html#uploadToken) 上传文件到七牛 -3. 善后工作,比如保存相关的一些信息 - - - -##### 上传策略 - -[uptoken](http://docs.qiniu.com/api/put.html#uploadToken) 实际上是用 AccessKey/SecretKey 进行数字签名的上传策略(`qiniu.rs.PutPolicy`),它控制则整个上传流程的行为。让我们快速过一遍你都能够决策啥: - -```{python} -class PutPolicy(object): - scope = None # 可以是 bucketName 或者 bucketName:key - expires = 3600 # 默认是 3600 秒 - callbackUrl = None - callbackBody = None - returnUrl = None - returnBody = None - endUser = None - asyncOps = None - - def __init__(self, scope): - self.scope = scope -``` - -* `scope` 限定客户端的权限。如果 `scope` 是 bucket,则客户端只能新增文件到指定的 bucket,不能修改文件。如果 `scope` 为 bucket:key,则客户端可以修改指定的文件。**注意: key必须采用utf8编码,如使用非utf8编码访问七牛云存储将反馈错误** -* `callbackUrl` 设定业务服务器的回调地址,这样业务服务器才能感知到上传行为的发生。 -* `callbackBody` 设定业务服务器的回调信息。文件上传成功后,七牛向业务服务器的callbackUrl发送的POST请求携带的数据。支持 [魔法变量](http://docs.qiniu.com/api/put.html#MagicVariables) 和 [自定义变量](http://docs.qiniu.com/api/put.html#xVariables)。 -* `returnUrl` 设置用于浏览器端文件上传成功后,浏览器执行301跳转的URL,一般为 HTML Form 上传时使用。文件上传成功后浏览器会自动跳转到 `returnUrl?upload_ret=returnBody`。 -* `returnBody` 可调整返回给客户端的数据包,支持 [魔法变量](http://docs.qiniu.com/api/put.html#MagicVariables) 和 [自定义变量](http://docs.qiniu.com/api/put.html#xVariables)。`returnBody` 只在没有 `callbackUrl` 时有效(否则直接返回 `callbackUrl` 返回的结果)。不同情形下默认返回的 `returnBody` 并不相同。在一般情况下返回的是文件内容的 `hash`,也就是下载该文件时的 `etag`;但指定 `returnUrl` 时默认的 `returnBody` 会带上更多的信息。 -* `asyncOps` 可指定上传完成后,需要自动执行哪些数据处理。这是因为有些数据处理操作(比如音视频转码)比较慢,如果不进行预转可能第一次访问的时候效果不理想,预转可以很大程度改善这一点。 - -关于上传策略更完整的说明,请参考 [uptoken](http://docs.qiniu.com/api/put.html#uploadToken)。 - - - -##### 上传凭证 - -服务端生成 [uptoken](http://docs.qiniu.com/api/put.html#uploadToken) 代码如下: - -```{python} -import qiniu.conf - -qiniu.conf.ACCESS_KEY = "" -qiniu.conf.SECRET_KEY = "" - -import qiniu.rs - -policy = qiniu.rs.PutPolicy(bucket_name) -uptoken = policy.token() -``` - - - -##### PutExtra - -PutExtra是上传时的可选信息,默认为None - -```{python} -class PutExtra(object): - params = {} - mime_type = 'application/octet-stream' - crc32 = "" - check_crc = 0 -``` - -* `params` 是一个字典。[自定义变量](http://docs.qiniu.com/api/put.html#xVariables),key必须以 x: 开头命名,不限个数。可以在 uploadToken 的 callbackBody 选项中求值。 -* `mime_type` 表示数据的MimeType,当不指定时七牛服务器会自动检测。 -* `crc32` 待检查的crc32值 -* `check_crc` 可选值为0, 1, 2。 - `check_crc == 0`: 表示不进行 crc32 校验。 - `check_crc == 1`: 上传二进制数据时等同于 `check_crc=2`;上传本地文件时会自动计算 crc32 值。 - `check_crc == 2`: 表示进行 crc32 校验,且 crc32 值就是上面的 `crc32` 变量 - - - -##### 上传文件 - -上传文件到七牛(通常是客户端完成,但也可以发生在服务端): - -直接上传二进制流 - -```{python} -import qiniu.conf - -qiniu.conf.ACCESS_KEY = "" -qiniu.conf.SECRET_KEY = "" - -import qiniu.io - -extra = qiniu.io.PutExtra() -extra.mime_type = "text/plain" - -# data 可以是str或read()able对象 -data = StringIO.StringIO("hello!") -ret, err = qiniu.io.put(uptoken, key, data, extra) -if err is not None: - sys.stderr.write('error: %s ' % err) - return -``` - -上传本地文件 - -```{python} -import qiniu.conf - -qiniu.conf.ACCESS_KEY = "" -qiniu.conf.SECRET_KEY = "" - -import qiniu.io - -localfile = "%s" % __file__ - -ret, err = qiniu.io.put_file(uptoken, key, localfile) -if err is not None: - sys.stderr.write('error: %s ' % err) - return -``` - -ret是一个字典,含有`hash`,`key`等信息。 - - - -##### 断点续上传、分块并行上传 - -除了基本的上传外,七牛还支持你将文件切成若干块(除最后一块外,每个块固定为4M大小),每个块可独立上传,互不干扰;每个分块块内则能够做到断点上续传。 - -我们来看支持了断点上续传、分块并行上传的基本样例: - -上传二进制流 - -```{python} -import qiniu.conf - -qiniu.conf.ACCESS_KEY = "" -qiniu.conf.SECRET_KEY = "" - -import qiniu.resumable_io as rio - -a = "resumable upload string" -extra = rio.PutExtra(bucket_name) -extra.mime_type = "text/plain" -ret, err = rio.put(uptoken, key, StringIO.StringIO(a), len(a), extra) -if err is not None: - sys.stderr.write('error: %s ' % err) - return -print ret, -``` - -上传本地文件 - -```{python} -import qiniu.conf - -qiniu.conf.ACCESS_KEY = "" -qiniu.conf.SECRET_KEY = "" - -import qiniu.resumable_io as rio - -localfile = "%s" % __file__ -extra = rio.PutExtra(bucket_name) - -ret, err = rio.put_file(uptoken, key, localfile, extra) -if err is not None: - sys.stderr.write('error: %s ' % err) - return -print ret, -``` - - - -### 下载文件 - - - -#### 下载公有文件 - -每个 bucket 都会绑定一个或多个域名(domain)。如果这个 bucket 是公开的,那么该 bucket 中的所有文件可以通过一个公开的下载 url 可以访问到: - - http:/// - -其中\是bucket所对应的域名。七牛云存储为每一个bucket提供一个默认域名。默认域名可以到[七牛云存储开发者平台](https://portal.qiniu.com/)中,空间设置的域名设置一节查询。 - -假设某个 bucket 既绑定了七牛的二级域名,如 hello.qiniudn.com,也绑定了自定义域名(需要备案),如 hello.com。那么该 bucket 中 key 为 a/b/c.htm 的文件可以通过 http://hello.qiniudn.com/a/b/c.htm 或 http://hello.com/a/b/c.htm 中任意一个 url 进行访问。 - -**注意: key必须采用utf8编码,如使用非utf8编码访问七牛云存储将反馈错误** - - - -#### 下载私有文件 - -如果某个 bucket 是私有的,那么这个 bucket 中的所有文件只能通过一个的临时有效的 downloadUrl 访问: - - http:///?e=&token= - -其中 dntoken 是由业务服务器签发的一个[临时下载授权凭证](http://docs.qiniu.com/api/get.html#download-token),deadline 是 dntoken 的有效期。dntoken不需要单独生成,SDK 提供了生成完整 downloadUrl 的方法(包含了 dntoken),示例代码如下: - -```{python} -import qiniu.conf - -qiniu.conf.ACCESS_KEY = "" -qiniu.conf.SECRET_KEY = "" - -import qiniu.rs - -base_url = qiniu.rs.make_base_url(domain, key) -policy = qiniu.rs.GetPolicy() -private_url = policy.make_request(base_url) -``` - -生成 downloadUrl 后,服务端下发 downloadUrl 给客户端。客户端收到 downloadUrl 后,和公有资源类似,直接用任意的 HTTP 客户端就可以下载该资源了。唯一需要注意的是,在 downloadUrl 失效却还没有完成下载时,需要重新向服务器申请授权。 - -无论公有资源还是私有资源,下载过程中客户端并不需要七牛 SDK 参与其中。 - - - -#### 断点续下载 - -无论是公有资源还是私有资源,获得的下载 url 支持标准的 HTTP 断点续传协议。考虑到多数语言都有相应的断点续下载支持的成熟方法,七牛 Python-SDK 并不提供断点续下载相关代码。 - - - -### 资源操作 - - - - -#### 获取文件信息 - -```{python} -import qiniu.conf - -qiniu.conf.ACCESS_KEY = "" -qiniu.conf.SECRET_KEY = "" - -import qiniu.rs - -ret, err = qiniu.rs.Client().stat(bucket_name, key) -if err is not None: - sys.stderr.write('error: %s ' % err) - return -print ret, -``` - - - -#### 复制文件 - -```{python} -import qiniu.conf - -qiniu.conf.ACCESS_KEY = "" -qiniu.conf.SECRET_KEY = "" - -import qiniu.rs - -ret, err = qiniu.rs.Client().copy(bucket_name, key, bucket_name, key2) -if err is not None: - sys.stderr.write('error: %s ' % err) - return -``` - - - -#### 移动文件 - -```{python} -import qiniu.conf - -qiniu.conf.ACCESS_KEY = "" -qiniu.conf.SECRET_KEY = "" - -import qiniu.rs - -ret, err = qiniu.rs.Client().move(bucket_name, key2, bucket_name, key3) -if err is not None: - sys.stderr.write('error: %s ' % err) - return -``` - - - -#### 删除文件 - -```{python} -import qiniu.conf - -qiniu.conf.ACCESS_KEY = "" -qiniu.conf.SECRET_KEY = "" - -import qiniu.rs - -ret, err = qiniu.rs.Client().delete(bucket_name, key3) -if err is not None: - sys.stderr.write('error: %s ' % err) - return -``` - - - -#### 批量操作 - -当您需要一次性进行多个操作时, 可以使用批量操作。 - - - -##### 批量获取文件信息 - -```{python} -import qiniu.conf - -qiniu.conf.ACCESS_KEY = "" -qiniu.conf.SECRET_KEY = "" - -import qiniu.rs - -path_1 = qiniu.rs.EntryPath(bucket_name, key) -path_2 = qiniu.rs.EntryPath(bucket_name, key2) -path_3 = qiniu.rs.EntryPath(bucket_name, key3) - -rets, err = qiniu.rs.Client().batch_stat([path_1, path_2, path_3]) -if err is not None: - sys.stderr.write('error: %s ' % err) - return -``` - - -##### 批量复制文件 - -```{python} -import qiniu.conf - -qiniu.conf.ACCESS_KEY = "" -qiniu.conf.SECRET_KEY = "" - -import qiniu.rs - -path_1 = qiniu.rs.EntryPath(bucket_name, key) -path_2 = qiniu.rs.EntryPath(bucket_name, key2) -path_3 = qiniu.rs.EntryPath(bucket_name, key3) - -pair_1 = qiniu.rs.EntryPathPair(path_1, path_3) -rets, err = qiniu.rs.Client().batch_copy([pair_1]) -if not rets[0]['code'] == 200: - sys.stderr.write('error: %s ' % "复制失败") - return -``` - - -##### 批量移动文件 - -```{python} -import qiniu.conf - -qiniu.conf.ACCESS_KEY = "" -qiniu.conf.SECRET_KEY = "" - -import qiniu.rs - -path_1 = qiniu.rs.EntryPath(bucket_name, key) -path_2 = qiniu.rs.EntryPath(bucket_name, key2) -path_3 = qiniu.rs.EntryPath(bucket_name, key3) - -pair_2 = qiniu.rs.EntryPathPair(path_3, path_2) -rets, err = qiniu.rs.Client().batch_move([pair_2]) -if not rets[0]['code'] == 200: - sys.stderr.write('error: %s ' % "移动失败") - return -``` - - -##### 批量删除文件 - -```{python} -import qiniu.conf - -qiniu.conf.ACCESS_KEY = "" -qiniu.conf.SECRET_KEY = "" - -import qiniu.rs - -path_1 = qiniu.rs.EntryPath(bucket_name, key) -path_2 = qiniu.rs.EntryPath(bucket_name, key2) -path_3 = qiniu.rs.EntryPath(bucket_name, key3) - -rets, err = qiniu.rs.Client().batch_delete([path_1, path_2]) -if not [ret['code'] for ret in rets] == [200, 200]: - sys.stderr.write('error: %s ' % "删除失败") - return -``` - - - -### 高级管理操作 - - -#### 列出文件 - -请求某个存储空间(bucket)下的文件列表,如果有前缀,可以按前缀(prefix)进行过滤;如果前一次返回marker就表示还有资源,下一步请求需要将marker参数填上。 - -```{python} -import qiniu.conf - -qiniu.conf.ACCESS_KEY = "" -qiniu.conf.SECRET_KEY = "" - -import qiniu.rsf - -rets, err = qiniu.rsf.Client().list_prefix(bucket_name, prefix="test", limit=2) -if err is not None: - sys.stderr.write('error: %s ' % err) - return -print rets - -# 从上一次list_prefix的位置继续列出文件 -rets2, err = qiniu.rsf.Client().list_prefix(bucket_name, prefix="test", limit=1, marker=rets['marker']) -if err is not None: - sys.stderr.write('error: %s ' % err) - return -print rets2 -``` - -一个典型的对整个bucket遍历的操作为: - -```{python} -def list_all(bucket, rs=None, prefix=None, limit=None): - if rs is None: - rs = qiniu.rsf.Client() - marker = None - err = None - while err is None: - ret, err = rs.list_prefix(bucket_name, prefix=prefix, limit=limit, marker=marker) - marker = ret.get('marker', None) - for item in ret['items']: - #do something - pass - if err is not qiniu.rsf.EOF: - # 错误处理 - pass -``` - - -### 云处理 - - -#### 图像 - - -##### 查看图像属性 - -```{python} -import qiniu.conf - -qiniu.conf.ACCESS_KEY = "" -qiniu.conf.SECRET_KEY = "" - -import qiniu.fop -import qiniu.rs - -# 生成base_url -url = qiniu.rs.make_base_url(domain, pic_key) - -# 生成fop_url -image_info = qiniu.fop.ImageInfo() -url = image_info.make_request(url) - -# 对其签名,生成private_url。如果是公有bucket此步可以省略 -policy = qiniu.rs.GetPolicy() -url = policy.make_request(url) - -print '可以在浏览器浏览: %s' % url -``` - - -##### 查看图片EXIF信息 - -```{python} -import qiniu.conf - -qiniu.conf.ACCESS_KEY = "" -qiniu.conf.SECRET_KEY = "" - -import qiniu.fop -import qiniu.rs - -# 生成base_url -url = qiniu.rs.make_base_url(domain, pic_key) - -# 生成fop_url -image_exif = qiniu.fop.Exif() -url = image_exif.make_request(url) - -# 对其签名,生成private_url。如果是公有bucket此步可以省略 -policy = qiniu.rs.GetPolicy() -url = policy.make_request(url) - -print '可以在浏览器浏览: %s' % url -``` - - - -##### 生成图片预览 - -```{python} -import qiniu.conf - -qiniu.conf.ACCESS_KEY = "" -qiniu.conf.SECRET_KEY = "" - -import qiniu.fop -import qiniu.rs - -iv = qiniu.fop.ImageView() -iv.width = 100 - -# 生成base_url -url = qiniu.rs.make_base_url(domain, pic_key) -# 生成fop_url -url = iv.make_request(url) -# 对其签名,生成private_url。如果是公有bucket此步可以省略 -policy = qiniu.rs.GetPolicy() -url = policy.make_request(url) -print '可以在浏览器浏览: %s' % url -``` - - -## 贡献代码 - -+ Fork -+ 创建您的特性分支 (git checkout -b my-new-feature) -+ 提交您的改动 (git commit -am 'Added some feature') -+ 将您的修改记录提交到远程 git 仓库 (git push origin my-new-feature) -+ 然后到 github 网站的该 git 远程仓库的 my-new-feature 分支下发起 Pull Request - - -## 许可证 - -> Copyright (c) 2013 qiniu.com - -基于 MIT 协议发布: - -> [www.opensource.org/licenses/MIT](http://www.opensource.org/licenses/MIT) - - diff --git a/docs/gist/conf.py b/docs/gist/conf.py deleted file mode 100644 index c9190dd7..00000000 --- a/docs/gist/conf.py +++ /dev/null @@ -1,6 +0,0 @@ -# @gist config -import qiniu.conf - -qiniu.conf.ACCESS_KEY = "" -qiniu.conf.SECRET_KEY = "" -# @endgist diff --git a/docs/gist/demo.py b/docs/gist/demo.py deleted file mode 100644 index 78afaf05..00000000 --- a/docs/gist/demo.py +++ /dev/null @@ -1,360 +0,0 @@ -# -*- coding: utf-8 -*- -import os -import sys -import StringIO - -# @gist import_io -import qiniu.io -# @endgist -import qiniu.conf -# @gist import_rs -import qiniu.rs -# @endgist -# @gist import_fop -import qiniu.fop -# @endgist -# @gist import_resumable_io -import qiniu.resumable_io as rio -# @endgist -# @gist import_rsf -import qiniu.rsf -# @endgist - -bucket_name = None -uptoken = None -key = None -key2 = None -key3 = None -domain = None -pic_key = None - -# ---------------------------------------------------------- - -def setup(access_key, secret_key, bucketname, bucket_domain, pickey): - global bucket_name, uptoken, key, key2, domain, key3, pic_key - qiniu.conf.ACCESS_KEY = access_key - qiniu.conf.SECRET_KEY = secret_key - bucket_name = bucketname - domain = bucket_domain - pic_key = pickey - # @gist uptoken - policy = qiniu.rs.PutPolicy(bucket_name) - uptoken = policy.token() - # @endgist - key = "python-demo-put-file" - key2 = "python-demo-put-file-2" - key3 = "python-demo-put-file-3" - -def _setup(): - ''' 根据环境变量配置信息 ''' - access_key = getenv("QINIU_ACCESS_KEY") - if access_key is None: - exit("请配置环境变量 QINIU_ACCESS_KEY") - secret_key = getenv("QINIU_SECRET_KEY") - bucket_name = getenv("QINIU_TEST_BUCKET") - domain = getenv("QINIU_TEST_DOMAIN") - pickey = 'QINIU_UNIT_TEST_PIC' - setup(access_key, secret_key, bucket_name, domain, pickey) - -def getenv(name): - env = os.getenv(name) - if env is None: - sys.stderr.write("请配置环境变量 %s\n" % name) - exit(1) - return env - -def get_demo_list(): - return [put_file, put_binary, - resumable_put, resumable_put_file, - stat, copy, move, delete, batch, - image_info, image_exif, image_view, - list_prefix, list_prefix_all, - ] - -def run_demos(demos): - for i, demo in enumerate(demos): - print '%s.%s ' % (i+1, demo.__doc__), - demo() - print - -# ---------------------------------------------------------- -def make_private_url(domain, key): - ''' 生成私有下载链接 ''' - # @gist dntoken - base_url = qiniu.rs.make_base_url(domain, key) - policy = qiniu.rs.GetPolicy() - private_url = policy.make_request(base_url) - # @endgist - return private_url - -def put_file(): - ''' 演示上传文件的过程 ''' - # 尝试删除 - qiniu.rs.Client().delete(bucket_name, key) - - # @gist put_file - localfile = "%s" % __file__ - - ret, err = qiniu.io.put_file(uptoken, key, localfile) - if err is not None: - sys.stderr.write('error: %s ' % err) - return - # @endgist - - -def put_binary(): - ''' 上传二进制数据 ''' - # 尝试删除 - qiniu.rs.Client().delete(bucket_name, key) - - # @gist put - extra = qiniu.io.PutExtra() - extra.mime_type = "text/plain" - - # data 可以是str或read()able对象 - data = StringIO.StringIO("hello!") - ret, err = qiniu.io.put(uptoken, key, data, extra) - if err is not None: - sys.stderr.write('error: %s ' % err) - return - # @endgist - -def resumable_put(): - ''' 断点续上传 ''' - # 尝试删除 - qiniu.rs.Client().delete(bucket_name, key) - - # @gist resumable_put - a = "resumable upload string" - extra = rio.PutExtra(bucket_name) - extra.mime_type = "text/plain" - ret, err = rio.put(uptoken, key, StringIO.StringIO(a), len(a), extra) - if err is not None: - sys.stderr.write('error: %s ' % err) - return - print ret, - # @endgist - - -def resumable_put_file(): - ''' 断点续上传文件 ''' - # 尝试删除 - qiniu.rs.Client().delete(bucket_name, key) - - # @gist resumable_put_file - localfile = "%s" % __file__ - extra = rio.PutExtra(bucket_name) - - ret, err = rio.put_file(uptoken, key, localfile, extra) - if err is not None: - sys.stderr.write('error: %s ' % err) - return - print ret, - # @endgist - - -def stat(): - ''' 查看上传文件的内容 ''' - # @gist stat - ret, err = qiniu.rs.Client().stat(bucket_name, key) - if err is not None: - sys.stderr.write('error: %s ' % err) - return - print ret, - # @endgist - -def copy(): - ''' 复制文件 ''' - # 初始化 - qiniu.rs.Client().delete(bucket_name, key2) - - # @gist copy - ret, err = qiniu.rs.Client().copy(bucket_name, key, bucket_name, key2) - if err is not None: - sys.stderr.write('error: %s ' % err) - return - # @endgist - - stat, err = qiniu.rs.Client().stat(bucket_name, key2) - if err is not None: - sys.stderr.write('error: %s ' % err) - return - print 'new file:', stat, - -def move(): - ''' 移动文件 ''' - # 初始化 - qiniu.rs.Client().delete(bucket_name, key3) - - # @gist move - ret, err = qiniu.rs.Client().move(bucket_name, key2, bucket_name, key3) - if err is not None: - sys.stderr.write('error: %s ' % err) - return - # @endgist - - # 查看文件是否移动成功 - ret, err = qiniu.rs.Client().stat(bucket_name, key3) - if err is not None: - sys.stderr.write('error: %s ' % err) - return - - # 查看文件是否被删除 - ret, err = qiniu.rs.Client().stat(bucket_name, key2) - if err is None: - sys.stderr.write('error: %s ' % "删除失败") - return - -def delete(): - ''' 删除文件 ''' - # @gist delete - ret, err = qiniu.rs.Client().delete(bucket_name, key3) - if err is not None: - sys.stderr.write('error: %s ' % err) - return - # @endgist - - ret, err = qiniu.rs.Client().stat(bucket_name, key3) - if err is None: - sys.stderr.write('error: %s ' % "删除失败") - return - -def image_info(): - ''' 查看图片的信息 ''' - - # @gist image_info - # 生成base_url - url = qiniu.rs.make_base_url(domain, pic_key) - - # 生成fop_url - image_info = qiniu.fop.ImageInfo() - url = image_info.make_request(url) - - # 对其签名,生成private_url。如果是公有bucket此步可以省略 - policy = qiniu.rs.GetPolicy() - url = policy.make_request(url) - - print '可以在浏览器浏览: %s' % url - # @endgist - -def image_exif(): - ''' 查看图片的exif信息 ''' - # @gist exif - # 生成base_url - url = qiniu.rs.make_base_url(domain, pic_key) - - # 生成fop_url - image_exif = qiniu.fop.Exif() - url = image_exif.make_request(url) - - # 对其签名,生成private_url。如果是公有bucket此步可以省略 - policy = qiniu.rs.GetPolicy() - url = policy.make_request(url) - - print '可以在浏览器浏览: %s' % url - # @endgist - -def image_view(): - ''' 对图片进行预览处理 ''' - # @gist image_view - iv = qiniu.fop.ImageView() - iv.width = 100 - - # 生成base_url - url = qiniu.rs.make_base_url(domain, pic_key) - # 生成fop_url - url = iv.make_request(url) - # 对其签名,生成private_url。如果是公有bucket此步可以省略 - policy = qiniu.rs.GetPolicy() - url = policy.make_request(url) - print '可以在浏览器浏览: %s' % url - # @endgist - -def batch(): - ''' 文件处理的批量操作 ''' - # @gist batch_path - path_1 = qiniu.rs.EntryPath(bucket_name, key) - path_2 = qiniu.rs.EntryPath(bucket_name, key2) - path_3 = qiniu.rs.EntryPath(bucket_name, key3) - # @endgist - - # 查看状态 - # @gist batch_stat - rets, err = qiniu.rs.Client().batch_stat([path_1, path_2, path_3]) - if err is not None: - sys.stderr.write('error: %s ' % err) - return - # @endgist - if not [ret['code'] for ret in rets] == [200, 612, 612]: - sys.stderr.write('error: %s ' % "批量获取状态与预期不同") - return - - # 复制 - # @gist batch_copy - pair_1 = qiniu.rs.EntryPathPair(path_1, path_3) - rets, err = qiniu.rs.Client().batch_copy([pair_1]) - if not rets[0]['code'] == 200: - sys.stderr.write('error: %s ' % "复制失败") - return - # @endgist - - qiniu.rs.Client().batch_delete([path_2]) - # @gist batch_move - pair_2 = qiniu.rs.EntryPathPair(path_3, path_2) - rets, err = qiniu.rs.Client().batch_move([pair_2]) - if not rets[0]['code'] == 200: - sys.stderr.write('error: %s ' % "移动失败") - return - # @endgist - - # 删除残留文件 - # @gist batch_delete - rets, err = qiniu.rs.Client().batch_delete([path_1, path_2]) - if not [ret['code'] for ret in rets] == [200, 200]: - sys.stderr.write('error: %s ' % "删除失败") - return - # @endgist - -def list_prefix(): - ''' 列出文件操作 ''' - # @gist list_prefix - rets, err = qiniu.rsf.Client().list_prefix(bucket_name, prefix="test", limit=2) - if err is not None: - sys.stderr.write('error: %s ' % err) - return - print rets - - # 从上一次list_prefix的位置继续列出文件 - rets2, err = qiniu.rsf.Client().list_prefix(bucket_name, prefix="test", limit=1, marker=rets['marker']) - if err is not None: - sys.stderr.write('error: %s ' % err) - return - print rets2 - # @endgist - -def list_prefix_all(): - ''' 列出所有 ''' - list_all(bucket_name, prefix='test_Z', limit=10) - -# @gist list_all -def list_all(bucket, rs=None, prefix=None, limit=None): - if rs is None: - rs = qiniu.rsf.Client() - marker = None - err = None - while err is None: - ret, err = rs.list_prefix(bucket_name, prefix=prefix, limit=limit, marker=marker) - marker = ret.get('marker', None) - for item in ret['items']: - #do something - pass - if err is not qiniu.rsf.EOF: - # 错误处理 - pass -# @endgist - -if __name__ == "__main__": - _setup() - - demos = get_demo_list() - run_demos(demos) diff --git a/examples/.qiniu_pythonsdk_hostscache.json b/examples/.qiniu_pythonsdk_hostscache.json new file mode 100644 index 00000000..912b307d --- /dev/null +++ b/examples/.qiniu_pythonsdk_hostscache.json @@ -0,0 +1 @@ +{"http:wxCLv4yl_5saIuOHbbZbkP-Ef3kFFFeCDYmwTdg3:upload30": {"upHosts": ["http://up.qiniu.com", "http://upload.qiniu.com", "-H up.qiniu.com http://183.131.7.3"], "ioHosts": ["http://iovip.qbox.me"], "deadline": 1598428478}} \ No newline at end of file diff --git a/examples/batch.py b/examples/batch.py new file mode 100755 index 00000000..276e6ea8 --- /dev/null +++ b/examples/batch.py @@ -0,0 +1,26 @@ +# -*- coding: utf-8 -*- +# flake8: noqa + +from qiniu import Auth +from qiniu import BucketManager +from qiniu import build_batch_copy +from qiniu import build_batch_move, build_batch_rename + +access_key = '...' +secret_key = '...' + +# 初始化Auth状态 +q = Auth(access_key, secret_key) + +# 初始化BucketManager +bucket = BucketManager(q) +keys = {'123.jpg': '123.jpg'} + +# ops = build_batch_copy( 'teest', keys, 'teest',force='true') +# ops = build_batch_move('teest', keys, 'teest', force='true') +ops = build_batch_rename('teest', keys, force='true') + +ret, info = bucket.batch(ops) +print(ret) +print(info) +assert ret == {} diff --git a/examples/batch_copy.py b/examples/batch_copy.py new file mode 100644 index 00000000..6fd15b63 --- /dev/null +++ b/examples/batch_copy.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# flake8: noqa + + +""" +批量拷贝文件 + +https://developer.qiniu.com/kodo/api/1250/batch +""" + + +from qiniu import build_batch_copy, Auth, BucketManager + +access_key = '' + +secret_key = '' + +q = Auth(access_key, secret_key) + +bucket = BucketManager(q) + +src_bucket_name = '' + +target_bucket_name = '' + +# force为true时强制同名覆盖, 字典的键为原文件,值为目标文件 +ops = build_batch_copy(src_bucket_name, + {'src_key1': 'target_key1', + 'src_key2': 'target_key2'}, + target_bucket_name, + force='true') +ret, info = bucket.batch(ops) +print(info) diff --git a/examples/batch_delete.py b/examples/batch_delete.py new file mode 100644 index 00000000..c74dd951 --- /dev/null +++ b/examples/batch_delete.py @@ -0,0 +1,27 @@ +# -*- coding: utf-8 -*- +# flake8: noqa + +""" +批量删除文件 + +https://developer.qiniu.com/kodo/api/1250/batch +""" + + +from qiniu import build_batch_delete, Auth, BucketManager + +access_key = '' + +secret_key = '' + +q = Auth(access_key, secret_key) + +bucket = BucketManager(q) + +bucket_name = '' + +keys = ['1.gif', '2.txt', '3.png', '4.html'] + +ops = build_batch_delete(bucket_name, keys) +ret, info = bucket.batch(ops) +print(info) diff --git a/examples/batch_move.py b/examples/batch_move.py new file mode 100644 index 00000000..3375e2ea --- /dev/null +++ b/examples/batch_move.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# flake8: noqa + +""" +批量移动文件 + +https://developer.qiniu.com/kodo/api/1250/batch +""" + + +from qiniu import build_batch_move, Auth, BucketManager + +access_key = '' + +secret_key = '' + + +q = Auth(access_key, secret_key) + +bucket = BucketManager(q) + +src_bucket_name = '' + +target_bucket_name = '' + +# force为true时强制同名覆盖, 字典的键为原文件,值为目标文件 +ops = build_batch_move(src_bucket_name, + {'src_key1': 'target_key1', + 'src_key2': 'target_key2'}, + target_bucket_name, + force='true') +ret, info = bucket.batch(ops) +print(info) diff --git a/examples/batch_rename.py b/examples/batch_rename.py new file mode 100644 index 00000000..75a48289 --- /dev/null +++ b/examples/batch_rename.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# flake8: noqa + +""" +批量重命名文件 + +https://developer.qiniu.com/kodo/api/1250/batch +""" + + +from qiniu import build_batch_rename, Auth, BucketManager + +access_key = '' + +secret_key = '' + + +q = Auth(access_key, secret_key) + +bucket = BucketManager(q) + +bucket_name = '' + + +# force为true时强制同名覆盖, 字典的键为原文件,值为目标文件 +ops = build_batch_rename( + bucket_name, { + 'src_key1': 'target_key1', 'src_key2': 'target_key2'}, force='true') +ret, info = bucket.batch(ops) +print(info) diff --git a/examples/batch_restoreAr.py b/examples/batch_restoreAr.py new file mode 100644 index 00000000..7955aa36 --- /dev/null +++ b/examples/batch_restoreAr.py @@ -0,0 +1,31 @@ +# -*- coding: utf-8 -*- +# flake8: noqa + +""" +批量解冻文件 +https://developer.qiniu.com/kodo/api/1250/batch +""" + +from qiniu import build_batch_restoreAr, Auth, BucketManager + +# 七牛账号的公钥和私钥 +access_key = '' +secret_key = '' + +q = Auth(access_key, secret_key) + +bucket = BucketManager(q) + +# 存储空间 +bucket_name = "空间名" + +# 字典的键为需要解冻的文件,值为解冻有效期1-7 +ops = build_batch_restoreAr(bucket_name, + {"test00.png": 1, + "test01.jpeg": 2, + "test02.mp4": 3 + } + ) + +ret, info = bucket.batch(ops) +print(info) diff --git a/examples/batch_stat.py b/examples/batch_stat.py new file mode 100644 index 00000000..9ad9d7b0 --- /dev/null +++ b/examples/batch_stat.py @@ -0,0 +1,26 @@ +# -*- coding: utf-8 -*- +# flake8: noqa + +""" +批量查询文件信息 + +https://developer.qiniu.com/kodo/api/1250/batch +""" + +from qiniu import build_batch_stat, Auth, BucketManager + +access_key = '' +secret_key = '' + +q = Auth(access_key, secret_key) + +bucket = BucketManager(q) + +bucket_name = '' + +# 需要查询的文件名 +keys = ['1.gif', '2.txt', '3.png', '4.html'] + +ops = build_batch_stat(bucket_name, keys) +ret, info = bucket.batch(ops) +print(info) diff --git a/examples/bucket_domain.py b/examples/bucket_domain.py new file mode 100644 index 00000000..20600ccb --- /dev/null +++ b/examples/bucket_domain.py @@ -0,0 +1,24 @@ +# -*- coding: utf-8 -*- +# flake8: noqa + +from qiniu import Auth +from qiniu import BucketManager + +""" +获取空间绑定的加速域名 +https://developer.qiniu.com/kodo/api/3949/get-the-bucket-space-domain +""" + +# 七牛账号的 公钥和私钥 +access_key = '' +secret_key = '' + +# 空间名 +bucket_name = '' + +q = Auth(access_key, secret_key) + +bucket = BucketManager(q) + +ret, info = bucket.bucket_domain(bucket_name) +print(info) diff --git a/examples/bucket_info.py b/examples/bucket_info.py new file mode 100644 index 00000000..14ea4bbd --- /dev/null +++ b/examples/bucket_info.py @@ -0,0 +1,19 @@ +# -*- coding: utf-8 -*- +# flake8: noqa + +from qiniu import Auth +from qiniu import BucketManager + +# 需要填写你的 Access Key 和 Secret Key +access_key = '' +secret_key = '' + +# 空间名 +bucket_name = 'bucket_name' + +q = Auth(access_key, secret_key) + +bucket = BucketManager(q) + +ret, info = bucket.bucket_info(bucket_name) +print(info) diff --git a/examples/cdn_bandwidth.py b/examples/cdn_bandwidth.py new file mode 100644 index 00000000..32fbb40b --- /dev/null +++ b/examples/cdn_bandwidth.py @@ -0,0 +1,39 @@ +# -*- coding: utf-8 -*- +# flake8: noqa + +""" +查询指定域名指定时间段内的带宽 +""" +import qiniu +from qiniu import CdnManager, DataType + + +# 账户ak,sk +access_key = '' +secret_key = '' + +auth = qiniu.Auth(access_key=access_key, secret_key=secret_key) +cdn_manager = CdnManager(auth) + +startDate = '2017-07-20' + +endDate = '2017-08-20' + +granularity = 'day' + +urls = [ + 'a.example.com', + 'b.example.com' +] + +ret, info = cdn_manager.get_bandwidth_data( + urls, startDate, endDate, granularity) + +print(ret) +print(info) + +ret, info = cdn_manager.get_bandwidth_data( + urls, startDate, endDate, granularity, data_type=DataType.BANDWIDTH) + +print(ret) +print(info) diff --git a/examples/cdn_flux.py b/examples/cdn_flux.py new file mode 100644 index 00000000..bb42efc1 --- /dev/null +++ b/examples/cdn_flux.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# flake8: noqa + +""" +查询指定域名指定时间段内的流量 +""" +import qiniu +from qiniu import CdnManager + + +# 账户ak,sk +access_key = '' +secret_key = '' + +auth = qiniu.Auth(access_key=access_key, secret_key=secret_key) +cdn_manager = CdnManager(auth) + +startDate = '2017-07-20' + +endDate = '2017-08-20' + +granularity = 'day' + +urls = [ + 'a.example.com', + 'b.example.com' +] + +# 获得指定域名流量 +ret, info = cdn_manager.get_flux_data(urls, startDate, endDate, granularity) + +print(ret) +print(info) diff --git a/examples/cdn_log.py b/examples/cdn_log.py new file mode 100644 index 00000000..aee1e5c8 --- /dev/null +++ b/examples/cdn_log.py @@ -0,0 +1,29 @@ +# -*- coding: utf-8 -*- +# flake8: noqa + +""" +获取指定域名指定时间内的日志链接 +""" +import qiniu +from qiniu import CdnManager + + +# 账户ak,sk +access_key = '' +secret_key = '' + +auth = qiniu.Auth(access_key=access_key, secret_key=secret_key) +cdn_manager = CdnManager(auth) + +log_date = '2017-07-20' + +urls = [ + 'a.example.com', + 'b.example.com' +] + + +ret, info = cdn_manager.get_log_list_data(urls, log_date) + +print(ret) +print(info) diff --git a/examples/change_bucket_permission.py b/examples/change_bucket_permission.py new file mode 100644 index 00000000..ae8233e2 --- /dev/null +++ b/examples/change_bucket_permission.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# flake8: noqa + +from qiniu import Auth +from qiniu import BucketManager + +# 需要填写七牛账号的 公钥和私钥 +access_key = '' +secret_key = '' + +# 空间名 +bucket_name = "" + +# private 参数必须是str类型,0表示公有空间,1表示私有空间 +private = "0" + +q = Auth(access_key, secret_key) + +bucket = BucketManager(q) + +ret, info = bucket.change_bucket_permission(bucket_name, private) +print(info) diff --git a/examples/change_mime.py b/examples/change_mime.py new file mode 100644 index 00000000..770405cc --- /dev/null +++ b/examples/change_mime.py @@ -0,0 +1,23 @@ +# -*- coding: utf-8 -*- +# flake8: noqa + +""" +改变文件的mimeType +""" +from qiniu import Auth +from qiniu import BucketManager + +access_key = '...' +secret_key = '...' + +q = Auth(access_key, secret_key) + +bucket = BucketManager(q) + +bucket_name = 'Bucket_Name' + +key = '...' + +ret, info = bucket.change_mime(bucket_name, key, 'image/jpg') +print(info) +assert info.status_code == 200 diff --git a/examples/change_status.py b/examples/change_status.py new file mode 100644 index 00000000..5f1acdd5 --- /dev/null +++ b/examples/change_status.py @@ -0,0 +1,32 @@ +# -*- coding: utf-8 -*- +# flake8: noqa + +""" +改变文件状态,可用或不可用 +""" +from qiniu import Auth +from qiniu import BucketManager + +# 需要填写你的 Access Key 和 Secret Key +access_key = '' +secret_key = '' + +q = Auth(access_key, secret_key) + +bucket = BucketManager(q) + +# 空间名 +bucket_name = 'bernie' + +# 文件名 +key = '233.jpg' + +# 条件匹配,只有匹配上才会执行修改操作 +# cond可以填空,一个或多个 +cond = {"fsize": "186371", + "putTime": "14899798962573916", + "hash": "FiRxWzeeD6ofGTpwTZub5Fx1ozvi", + "mime": "image/png"} + +ret, info = bucket.change_status(bucket_name, key, '1', cond) +print(info) diff --git a/examples/change_type.py b/examples/change_type.py new file mode 100644 index 00000000..549f87ea --- /dev/null +++ b/examples/change_type.py @@ -0,0 +1,21 @@ +# -*- coding: utf-8 -*- +# flake8: noqa + +from qiniu import Auth +from qiniu import BucketManager + +access_key = '...' +secret_key = '...' + +q = Auth(access_key, secret_key) + +bucket = BucketManager(q) + +bucket_name = 'Bucket_Name' + +key = '...' + +# 1表示低频存储,0是标准存储 +ret, info = bucket.change_type(bucket_name, key, 1) + +print(info) diff --git a/examples/copy_to.py b/examples/copy_to.py new file mode 100755 index 00000000..a77e9397 --- /dev/null +++ b/examples/copy_to.py @@ -0,0 +1,25 @@ +# -*- coding: utf-8 -*- +# flake8: noqa + +from qiniu import Auth +from qiniu import BucketManager + +access_key = '...' +secret_key = '...' + +# 初始化Auth状态 +q = Auth(access_key, secret_key) + +# 初始化BucketManager +bucket = BucketManager(q) + +# 你要测试的空间, 并且这个key在你空间中存在 +bucket_name = 'Bucket_Name' +key = 'python-logo.png' + +# 将文件从文件key 复制到文件key2。 可以在不同bucket复制 +key2 = 'python-logo2.png' + +ret, info = bucket.copy(bucket_name, key, bucket_name, key2) +print(info) +assert ret == {} diff --git a/examples/create_bucket.py b/examples/create_bucket.py new file mode 100644 index 00000000..4b0b02c4 --- /dev/null +++ b/examples/create_bucket.py @@ -0,0 +1,27 @@ +# -*- coding: utf-8 -*- +# flake8: noqa + +""" +创建存储空间 +""" + +from qiniu import Auth +from qiniu import BucketManager + + +access_key = '...' +secret_key = '...' + +q = Auth(access_key, secret_key) + +bucket = BucketManager(q) + +bucket_name = 'Bucket_Name' + +# "填写存储区域代号 z0:华东, z1:华北, z2:华南, na0:北美" +region = 'z0' + +ret, info = bucket.mkbucketv2(bucket_name, region) +print(info) +print(ret) +assert info.status_code == 200 diff --git a/examples/delete.py b/examples/delete.py new file mode 100755 index 00000000..5466ba3a --- /dev/null +++ b/examples/delete.py @@ -0,0 +1,23 @@ +# -*- coding: utf-8 -*- +# flake8: noqa + +from qiniu import Auth +from qiniu import BucketManager + +access_key = '...' +secret_key = '...' + +# 初始化Auth状态 +q = Auth(access_key, secret_key) + +# 初始化BucketManager +bucket = BucketManager(q) + +# 你要测试的空间, 并且这个key在你空间中存在 +bucket_name = 'Bucket_Name' +key = 'python-logo.png' + +# 删除bucket_name 中的文件 key +ret, info = bucket.delete(bucket_name, key) +print(info) +assert ret == {} diff --git a/examples/delete_afte_days.py b/examples/delete_afte_days.py new file mode 100755 index 00000000..00401f65 --- /dev/null +++ b/examples/delete_afte_days.py @@ -0,0 +1,24 @@ +# -*- coding: utf-8 -*- +# flake8: noqa + +from qiniu import Auth +from qiniu import BucketManager + +access_key = '...' +secret_key = '...' + +# 初始化Auth状态 +q = Auth(access_key, secret_key) + +# 初始化BucketManager +bucket = BucketManager(q) + +# 你要测试的空间, 并且这个key在你空间中存在 +bucket_name = 'Bucket_Name' +key = 'python-test.png' + +# 您要更新的生命周期,单位为天 +days = '5' + +ret, info = bucket.delete_after_days(bucket_name, key, days) +print(info) diff --git a/examples/domain_relevant.py b/examples/domain_relevant.py new file mode 100644 index 00000000..5eb79a15 --- /dev/null +++ b/examples/domain_relevant.py @@ -0,0 +1,60 @@ +# -*- coding: utf-8 -*- +from qiniu import QiniuMacAuth, DomainManager +import json + +"""域名上线""" + +# 七牛账号的 公钥和私钥 +access_key = "" +secret_key = "" + +auth = QiniuMacAuth(access_key, secret_key) + +manager = DomainManager(auth) + +# 域名 +name = "zhuchangzhao2.peterpy.cn" + +ret, res = manager.domain_online(name) + +headers = {"code": res.status_code, "reqid": res.req_id, "xlog": res.x_log} +print(json.dumps(headers, indent=4, ensure_ascii=False)) +print(json.dumps(ret, indent=4, ensure_ascii=False)) + +"""域名下线""" + +# 七牛账号的 公钥和私钥 +access_key = "" +secret_key = "" + +auth = QiniuMacAuth(access_key, secret_key) + +manager = DomainManager(auth) + +# 域名 +name = "" + +ret, res = manager.domain_offline(name) + +headers = {"code": res.status_code, "reqid": res.req_id, "xlog": res.x_log} +print(json.dumps(headers, indent=4, ensure_ascii=False)) +print(json.dumps(ret, indent=4, ensure_ascii=False)) + +"""删除域名""" + +# 七牛账号的 公钥和私钥 +access_key = "" +secret_key = "" + +auth = QiniuMacAuth(access_key, secret_key) + +manager = DomainManager(auth) + +# 域名 +name = "" + +ret, res = manager.delete_domain(name) + +headers = {"code": res.status_code, "reqid": res.req_id, "xlog": res.x_log} +print(json.dumps(headers, indent=4, ensure_ascii=False)) +print(json.dumps(ret, indent=4, ensure_ascii=False)) diff --git a/examples/download.py b/examples/download.py new file mode 100755 index 00000000..f8437959 --- /dev/null +++ b/examples/download.py @@ -0,0 +1,25 @@ +# -*- coding: utf-8 -*- +# flake8: noqa + +import requests +from qiniu import Auth + +access_key = '...' +secret_key = '...' + +q = Auth(access_key, secret_key) +bucket_domain = "..." +key = "..." + +# 有两种方式构造base_url的形式 +base_url = 'http://%s/%s' % (bucket_domain, key) + +# 或者直接输入url的方式下载 +# base_url = 'http://domain/key' + +# 可以设置token过期时间 +private_url = q.private_download_url(base_url, expires=3600) + +print(private_url) +r = requests.get(private_url) +assert r.status_code == 200 diff --git a/examples/fetch.py b/examples/fetch.py new file mode 100755 index 00000000..c66c406e --- /dev/null +++ b/examples/fetch.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# flake8: noqa + +from qiniu import Auth +from qiniu import BucketManager + +access_key = '...' +secret_key = '...' + +bucket_name = 'Bucket_Name' + +q = Auth(access_key, secret_key) + +bucket = BucketManager(q) + +url = 'http://aaa.example.com/test.jpg' + +key = 'test.jpg' + +ret, info = bucket.fetch(url, bucket_name, key) +print(info) +assert ret['key'] == key diff --git a/examples/fops.py b/examples/fops.py new file mode 100755 index 00000000..f61de679 --- /dev/null +++ b/examples/fops.py @@ -0,0 +1,29 @@ +# -*- coding: utf-8 -*- +# flake8: noqa +from qiniu import Auth, PersistentFop, urlsafe_base64_encode + +# 对已经上传到七牛的视频发起异步转码操作 +access_key = '...' +secret_key = '...' +q = Auth(access_key, secret_key) + +# 要转码的文件所在的空间和文件名。 +bucket_name = 'Bucket_Name' +key = '1.mp4' + +# 转码是使用的队列名称。 +pipeline = 'your_pipeline' + +# 要进行转码的转码操作,下面是一个例子。 +fops = 'avthumb/mp4/s/640x360/vb/1.25m' + +# 可以对转码后的文件进行使用saveas参数自定义命名,当然也可以不指定文件会默认命名并保存在当前空间 +saveas_key = urlsafe_base64_encode('目标Bucket_Name:自定义文件key') +fops = fops + '|saveas/' + saveas_key +ops = [] +pfop = PersistentFop(q, bucket_name, pipeline) + +ops.append(fops) +ret, info = pfop.execute(key, ops, 1) +print(info) +assert ret['persistentId'] is not None diff --git a/examples/get_domaininfo.py b/examples/get_domaininfo.py new file mode 100644 index 00000000..2e8fc112 --- /dev/null +++ b/examples/get_domaininfo.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# flake8: noqa + +""" +获取指定域名指定时间内的日志链接 +""" +import qiniu +from qiniu import DomainManager + + +# 账户ak,sk +access_key = '' +secret_key = '' + +auth = qiniu.Auth(access_key=access_key, secret_key=secret_key) +domain_manager = DomainManager(auth) +domain = '' +ret, info = domain_manager.get_domain(domain) +print(ret) +print(info) \ No newline at end of file diff --git a/examples/kirk/README.md b/examples/kirk/README.md new file mode 100644 index 00000000..23e5a852 --- /dev/null +++ b/examples/kirk/README.md @@ -0,0 +1,5 @@ +# Examples + +``` +$ python list_apps.py +``` diff --git a/examples/kirk/list_apps.py b/examples/kirk/list_apps.py new file mode 100644 index 00000000..2062b8b1 --- /dev/null +++ b/examples/kirk/list_apps.py @@ -0,0 +1,18 @@ +# -*- coding: utf-8 -*- +# flake8: noqa + +import sys +from qiniu import QiniuMacAuth +from qiniu import AccountClient + +access_key = sys.argv[1] +secret_key = sys.argv[2] + +acc_client = AccountClient(QiniuMacAuth(access_key, secret_key)) + +ret, info = acc_client.list_apps() + +print(ret) +print(info) + +assert len(ret) is not None diff --git a/examples/kirk/list_services.py b/examples/kirk/list_services.py new file mode 100644 index 00000000..9ec683fb --- /dev/null +++ b/examples/kirk/list_services.py @@ -0,0 +1,26 @@ +# -*- coding: utf-8 -*- +# flake8: noqa + +import sys +from qiniu import QiniuMacAuth +from qiniu import AccountClient + +access_key = sys.argv[1] +secret_key = sys.argv[2] + +acc_client = AccountClient(QiniuMacAuth(access_key, secret_key)) +apps, info = acc_client.list_apps() + +for app in apps: + if app.get('runMode') == 'Private': + uri = app.get('uri') + qcos = acc_client.get_qcos_client(uri) + if qcos != None: + stacks, info = qcos.list_stacks() + for stack in stacks: + stack_name = stack.get('name') + services, info = qcos.list_services(stack_name) + print("list_services of '%s : %s':"%(uri, stack_name)) + print(services) + print(info) + assert len(services) is not None diff --git a/examples/kirk/list_stacks.py b/examples/kirk/list_stacks.py new file mode 100644 index 00000000..67815603 --- /dev/null +++ b/examples/kirk/list_stacks.py @@ -0,0 +1,23 @@ +# -*- coding: utf-8 -*- +# flake8: noqa + +import sys +from qiniu import QiniuMacAuth +from qiniu import AccountClient + +access_key = sys.argv[1] +secret_key = sys.argv[2] + +acc_client = AccountClient(QiniuMacAuth(access_key, secret_key)) +apps, info = acc_client.list_apps() + +for app in apps: + if app.get('runMode') == 'Private': + uri = app.get('uri') + qcos = acc_client.get_qcos_client(uri) + if qcos != None: + stacks, info = qcos.list_stacks() + print("list_stacks of '%s':"%uri) + print(stacks) + print(info) + assert len(stacks) is not None diff --git a/examples/list.py b/examples/list.py new file mode 100755 index 00000000..b90f870f --- /dev/null +++ b/examples/list.py @@ -0,0 +1,26 @@ +# -*- coding: utf-8 -*- +# flake8: noqa +from qiniu import Auth +from qiniu import BucketManager + +access_key = '...' +secret_key = '...' + +q = Auth(access_key, secret_key) +bucket = BucketManager(q) + +bucket_name = 'Bucket_Name' +# 前缀 +prefix = None +# 列举条目 +limit = 10 +# 列举出除'/'的所有文件以及以'/'为分隔的所有前缀 +delimiter = None +# 标记 +marker = None + +ret, eof, info = bucket.list(bucket_name, prefix, marker, limit, delimiter) + +print(info) + +assert len(ret.get('items')) is not None diff --git a/examples/list_buckets.py b/examples/list_buckets.py new file mode 100644 index 00000000..e83589f7 --- /dev/null +++ b/examples/list_buckets.py @@ -0,0 +1,28 @@ +# -*- coding: utf-8 -*- +# flake8: noqa + +""" +列举账号下的空间 +""" +from qiniu import Auth +from qiniu import BucketManager + +# 需要填写你的 Access Key 和 Secret Key +access_key = '' +secret_key = '' + +q = Auth(access_key, secret_key) + +bucket = BucketManager(q) + +# 指定需要列举的区域,填空字符串返回全部空间,为减少响应时间建议不为空 +# z0:只返回华东区域的空间 +# z1:只返回华北区域的空间 +# z2:只返回华南区域的空间 +# na0:只返回北美区域的空间 +# as0:只返回东南亚区域的空间 +region = "as0" + +ret, info = bucket.list_bucket(region) +print(info) +print(ret) diff --git a/examples/list_domains.py b/examples/list_domains.py new file mode 100755 index 00000000..3bbff58c --- /dev/null +++ b/examples/list_domains.py @@ -0,0 +1,21 @@ +# -*- coding: utf-8 -*- +# flake8: noqa +from qiniu import Auth +from qiniu import BucketManager + +access_key = '...' +secret_key = '...' + +# 初始化Auth状态 +q = Auth(access_key, secret_key) + +# 初始化BucketManager +bucket = BucketManager(q) + +# 要获取域名的空间名 +bucket_name = 'Bucket_Name' + +# 获取空间绑定的域名列表 +ret, info = bucket.list_domains(bucket_name) +print(ret) +print(info) diff --git a/examples/mk_bucket.py b/examples/mk_bucket.py new file mode 100644 index 00000000..09035d79 --- /dev/null +++ b/examples/mk_bucket.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# flake8: noqa + +from qiniu import Auth +from qiniu import BucketManager + +# 需要填写你的 Access Key 和 Secret Key +access_key = '...' +secret_key = '...' + +bucket_name = 'Bucket_Name' + +q = Auth(access_key, secret_key) + +bucket = BucketManager(q) + +region = "z0" + +ret, info = bucket.mkbucketv2(bucket_name, region) +print(info) diff --git a/examples/move_to.py b/examples/move_to.py new file mode 100755 index 00000000..edf9eeeb --- /dev/null +++ b/examples/move_to.py @@ -0,0 +1,24 @@ +# -*- coding: utf-8 -*- +# flake8: noqa +from qiniu import Auth +from qiniu import BucketManager + +access_key = '...' +secret_key = '...' + +# 初始化Auth状态 +q = Auth(access_key, secret_key) + +# 初始化BucketManager +bucket = BucketManager(q) + +# 你要测试的空间, 并且这个key在你空间中存在 +bucket_name = 'Bucket_Name' +key = 'python-logo.png' + +# 将文件从文件key 移动到文件key2,可以实现文件的重命名 可以在不同bucket移动 +key2 = 'python-logo2.png' + +ret, info = bucket.move(bucket_name, key, bucket_name, key2) +print(info) +assert ret == {} diff --git a/examples/pfop_vframe.py b/examples/pfop_vframe.py new file mode 100755 index 00000000..381e5e98 --- /dev/null +++ b/examples/pfop_vframe.py @@ -0,0 +1,29 @@ +# -*- coding: utf-8 -*- +# flake8: noqa +from qiniu import Auth, PersistentFop, urlsafe_base64_encode + +# 对已经上传到七牛的视频发起异步转码操作 +access_key = 'Access_Key' +secret_key = 'Secret_Key' +q = Auth(access_key, secret_key) + +# 要转码的文件所在的空间和文件名。 +bucket = 'Bucket_Name' +key = '1.mp4' + +# 转码是使用的队列名称。 +pipeline = 'pipeline_name' + +# 要进行视频截图操作。 +fops = 'vframe/jpg/offset/1/w/480/h/360/rotate/90' + +# 可以对转码后的文件进行使用saveas参数自定义命名,当然也可以不指定文件会默认命名并保存在当前空间 +saveas_key = urlsafe_base64_encode('目标Bucket_Name:自定义文件key') +fops = fops + '|saveas/' + saveas_key + +pfop = PersistentFop(q, bucket, pipeline) +ops = [] +ops.append(fops) +ret, info = pfop.execute(key, ops, 1) +print(info) +assert ret['persistentId'] is not None diff --git a/examples/pfop_watermark.py b/examples/pfop_watermark.py new file mode 100755 index 00000000..327ac839 --- /dev/null +++ b/examples/pfop_watermark.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# flake8: noqa +from qiniu import Auth, PersistentFop, urlsafe_base64_encode + +# 对已经上传到七牛的视频发起异步转码操作 +access_key = 'Access_Key' +secret_key = 'Secret_Key' +q = Auth(access_key, secret_key) + +# 要转码的文件所在的空间和文件名。 +bucket = 'Bucket_Name' +key = '1.mp4' + +# 转码是使用的队列名称。 +pipeline = 'pipeline_name' + +# 需要添加水印的图片UrlSafeBase64,可以参考 https://developer.qiniu.com/dora/api/video-watermarking +base64URL = urlsafe_base64_encode( + 'http://developer.qiniu.com/resource/logo-2.jpg') + +# 视频水印参数 +fops = 'avthumb/mp4/wmImage/'+base64URL + +# 可以对转码后的文件进行使用saveas参数自定义命名,当然也可以不指定文件会默认命名并保存在当前空间 +saveas_key = urlsafe_base64_encode('目标Bucket_Name:自定义文件key') +fops = fops + '|saveas/' + saveas_key +ops = [] +pfop = PersistentFop(q, bucket, pipeline) +ops.append(fops) +ret, info = pfop.execute(key, ops, 1) +print(info) +assert ret['persistentId'] + diff --git a/examples/prefetch_to_bucket.py b/examples/prefetch_to_bucket.py new file mode 100644 index 00000000..2d447ff4 --- /dev/null +++ b/examples/prefetch_to_bucket.py @@ -0,0 +1,28 @@ +# -*- coding: utf-8 -*- +# flake8: noqa + +""" +拉取镜像源资源到空间 + +https://developer.qiniu.com/kodo/api/1293/prefetch +""" + +from qiniu import Auth +from qiniu import BucketManager + +access_key = '...' +secret_key = '...' + + +bucket_name = 'Bucket_Name' + +q = Auth(access_key, secret_key) + +bucket = BucketManager(q) + +# 要拉取的文件名 +key = 'test.jpg' + +ret, info = bucket.prefetch(bucket_name, key) +print(info) +assert ret['key'] == key diff --git a/examples/prefetch_to_cdn.py b/examples/prefetch_to_cdn.py new file mode 100644 index 00000000..fbab188c --- /dev/null +++ b/examples/prefetch_to_cdn.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# flake8: noqa + +""" +预取资源到cdn节点 + +https://developer.qiniu.com/fusion/api/1227/file-prefetching +""" + + +import qiniu +from qiniu import CdnManager + + +# 账户ak,sk +access_key = '...' +secret_key = '...' + +auth = qiniu.Auth(access_key=access_key, secret_key=secret_key) +cdn_manager = CdnManager(auth) + +# 需要刷新的文件链接 +urls = [ + 'http://aaa.example.com/doc/img/', + 'http://bbb.example.com/doc/video/' +] + + +# 刷新链接 +refresh_dir_result = cdn_manager.prefetch_urls(urls) diff --git a/examples/refresh_dirs.py b/examples/refresh_dirs.py new file mode 100644 index 00000000..5ce41438 --- /dev/null +++ b/examples/refresh_dirs.py @@ -0,0 +1,23 @@ +# -*- coding: utf-8 -*- +# flake8: noqa + +import qiniu +from qiniu import CdnManager + + +# 账户ak,sk +access_key = '...' +secret_key = '...' + +auth = qiniu.Auth(access_key=access_key, secret_key=secret_key) +cdn_manager = CdnManager(auth) + +# 需要刷新的目录链接 +dirs = [ + 'http://aaa.example.com/doc/img/', + 'http://bbb.example.com/doc/video/' +] + + +# 刷新链接 +refresh_dir_result = cdn_manager.refresh_dirs(dirs) diff --git a/examples/refresh_urls.py b/examples/refresh_urls.py new file mode 100644 index 00000000..b4194275 --- /dev/null +++ b/examples/refresh_urls.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# flake8: noqa + +import qiniu +from qiniu import CdnManager + +# 账户ak,sk +access_key = '...' +secret_key = '...' + +auth = qiniu.Auth(access_key=access_key, secret_key=secret_key) +cdn_manager = CdnManager(auth) + +# 需要刷新的文件链接 +urls = [ + 'http://aaa.example.com/a.gif', + 'http://bbb.example.com/b.jpg' +] + +# 刷新链接 +refresh_url_result = cdn_manager.refresh_urls(urls) +print(refresh_url_result) diff --git a/examples/restorear.py b/examples/restorear.py new file mode 100644 index 00000000..794c41f3 --- /dev/null +++ b/examples/restorear.py @@ -0,0 +1,17 @@ +# -*- coding: utf-8 -*- +# flake8: noqa +from qiniu import Auth +from qiniu import BucketManager + + +access_key = '' +secret_key = '' + +q = Auth(access_key, secret_key) +bucket = BucketManager(q) +bucket_name = '13' +key = 'fb8539c39f65d74b4e70db9133c1e9d5.mp4' +ret,info = bucket.restoreAr(bucket_name,key,3) +print(ret) +print(info) + diff --git a/examples/rtc_server.py b/examples/rtc_server.py new file mode 100644 index 00000000..94b9ebb9 --- /dev/null +++ b/examples/rtc_server.py @@ -0,0 +1,88 @@ +# -*- coding: utf-8 -*- +# flake8: noqa + +from qiniu import QiniuMacAuth +from qiniu import RtcServer, get_room_token +import time + +# 需要填写你的 Access Key 和 Secret Key +access_key = 'xxx' +secret_key = 'xxx' + +# 构建鉴权对象 +q = QiniuMacAuth(access_key, secret_key) + +# 构建直播连麦管理对象 +rtc = RtcServer(q) + +# 创建一个APP +# 首先需要写好创建APP的各个参数。参数如下 +create_data = { + "hub": 'python_test_hub', # Hub: 绑定的直播 hub,可选,使用此 hub 的资源进行推流等业务功能,hub与app 必须属于同一个七牛账户。 + "title": 'python_test_app', # Title: app 的名称,可选,注意,Title 不是唯一标识,重复 create 动作将生成多个 app。 + # "maxUsers": MaxUsers, # MaxUsers: int 类型,可选,连麦房间支持的最大在线人数。 + # "noAutoKickUser": NoAutoKickUser # NoAutoKickUser: bool 类型,可选,禁止自动踢人(抢流)。默认为 false , + # 即同一个身份的 client (app/room/user) ,新的连麦请求可以成功,旧连接被关闭。 +} +# 然后运行 rtc.CreateApp(<创建APP相关参数的字典变量>) +print(rtc.create_app(create_data)) + +# 查询一个APP +# 查询某一个具体的APP的相关信息的方法为 print( rtc.GetApp() ) ,其中 app_id 是类似 'desls83s2' +# 这样在创建时由七牛自动生成的数字字母乱序组合的字符串 +# 如果不指定具体的app_id,直接运行 print( rtc.GetApp() ) ,那么就会列举出该账号下所有的APP +print(rtc.get_app(':可选填')) + +# 删除一个APP +# 使用方法为:rtc.DeleteApp(),例如: rtc.DeleteApp('desls83s2') +print(rtc.delete_app(':必填')) + +# 更新一个APP的相关参数 +# 首先需要写好更新的APP的各个参数。参数如下: +update_data = { + "hub": "python_new_hub", # Hub: 绑定的直播 hub,可选,用于合流后 rtmp 推流。 + "title": "python_new_app", # Title: app 的名称, 可选。 + # "maxUsers": , # MaxUsers: int 类型,可选,连麦房间支持的最大在线人数。 + # "noAutoKickUser": , # NoAutoKickUser: bool 类型,可选,禁止自动踢人。 + # "mergePublishRtmp": { # MergePublishRtmp: 连麦合流转推 RTMP 的配置,可选择。其详细配置包括如下 + # "enable": , # Enable: 布尔类型,用于开启和关闭所有房间的合流功能。 + # "audioOnly": , # AudioOnly: 布尔类型,可选,指定是否只合成音频。 + # "height": , # Height, Width: int64,可选,指定合流输出的高和宽,默认为 640 x 480。 + # "width": , # Height, Width: int64,可选,指定合流输出的高和宽,默认为 640 x 480。 + # "fps": , # OutputFps: int64,可选,指定合流输出的帧率,默认为 25 fps 。 + # "kbps": , # OutputKbps: int64,可选,指定合流输出的码率,默认为 1000 。 + # "url": "", # URL: 合流后转推旁路直播的地址,可选,支持魔法变量配置按照连麦房间号生成不同 + # 的推流地址。如果是转推到七牛直播云,不建议使用该配置。 + + # "streamTitle": "" # StreamTitle: 转推七牛直播云的流名,可选,支持魔法变量配置按照连麦房间号 + # 生成不同的流名。例如,配置 Hub 为 qn-zhibo ,配置 StreamTitle 为 $(roomName) , + # 则房间 meeting-001 的合流将会被转推到 rtmp://pili-publish.qn-zhibo.***.com/qn-zhibo/meeting-001地址。 + # 详细配置细则,请咨询七牛技术支持。 + # } +} +# 使用方法为:rtc.UpdateApp(':必填', update_data),例如:app_id 是形如 desmfnkw5 的字符串 +print(rtc.update_app(':必填', update_data)) + +# 列举一个APP下面,某个房间的所有用户 +print(rtc.list_user(':必填', '<房间名>:必填')) + +# 踢出一个APP下面,某个房间的某个用户 +print(rtc.kick_user(':必填', '<房间名>:必填', '<客户ID>:必填')) + +# 列举一个APP下面,所有的房间 +print(rtc.list_active_rooms(':必填')) + +# 计算房间管理鉴权。连麦用户终端通过房间管理鉴权获取七牛连麦服务 +# 首先需要写好房间鉴权的各个参数。参数如下: +roomAccess = { + "appId": ":必填", # AppID: 房间所属帐号的 app 。 + "roomName": "<房间名>:必填", # RoomName: 房间名称,需满足规格 ^[a-zA-Z0-9_-]{3,64}$ + "userId": "<用户名>:必填", # UserID: 请求加入房间的用户 ID,需满足规格 ^[a-zA-Z0-9_-]{3,50}$ + # ExpireAt: int64 类型,鉴权的有效时间,传入以秒为单位的64位Unix绝对时间, + "expireAt": int(time.time()) + 3600, + # token 将在该时间后失效。 + "permission": "user" # 该用户的房间管理权限,"admin" 或 "user",默认为 "user" 。当权限角色为 "admin" 时, + # 拥有将其他用户移除出房间等特权. +} +# 获得房间管理鉴权的方法:print(RtcRoomToken ( access_key, secret_key, roomAccess ) ) +print(get_room_token(access_key, secret_key, roomAccess)) diff --git a/examples/set_object_lifecycle.py b/examples/set_object_lifecycle.py new file mode 100644 index 00000000..45005458 --- /dev/null +++ b/examples/set_object_lifecycle.py @@ -0,0 +1,34 @@ +# -*- coding: utf-8 -*- +# flake8: noqa + +from qiniu import Auth +from qiniu import BucketManager + +access_key = 'your_ak' +secret_key = 'your_sk' + +# 初始化 Auth +q = Auth(access_key, secret_key) + +# 初始化 BucketManager +bucket = BucketManager(q) + +# 目标空间 +bucket_name = 'your_bucket_name' +# 目标 key +key = 'path/to/key' + +# bucket_name 更新 rule +ret, info = bucket.set_object_lifecycle( + bucket=bucket_name, + key=key, + to_line_after_days=10, + to_archive_after_days=20, + to_deep_archive_after_days=30, + delete_after_days=40, + cond={ + 'hash': 'object_hash' + } +) +print(ret) +print(info) diff --git a/examples/sms_test.py b/examples/sms_test.py new file mode 100644 index 00000000..75f4e51c --- /dev/null +++ b/examples/sms_test.py @@ -0,0 +1,100 @@ +# -*- coding: utf-8 -*- +# flake8: noqa + +from qiniu import QiniuMacAuth +from qiniu import Sms +import os + +access_key = os.getenv('QINIU_ACCESS_KEY') +secret_key = os.getenv('QINIU_SECRET_KEY') + +# 初始化Auth状态 +q = QiniuMacAuth(access_key, secret_key) + +# 初始化Sms +sms = Sms(q) + +""" +#创建签名 +signature = 'abs' +source = 'website' +req, info = sms.createSignature(signature, source) +print(req,info) +""" + +""" +#查询签名 +audit_status = '' +page = 1 +page_size = 20 +req, info = sms.querySignature(audit_status, page, page_size) +print(req, info) +""" + +""" +编辑签名 +id = 1136530250662940672 +signature = 'sssss' +req, info = sms.updateSignature(id, signature) +print(req, info) +""" + +""" +#删除签名 +signature_id= 1136530250662940672 +req, info = sms.deleteSignature(signature_id) +print(req, info) +""" + +""" +#创建模版 +name = '06-062-test' +template = '${test}' +type = 'notification' +description = '就测试啊' +signature_id = '1131464448834277376' +req, info = sms.createTemplate(name, template, type, description, signature_id) +print(req, info) +""" + +""" +#查询模版 +audit_status = '' +page = 1 +page_size = 20 +req, info = sms.queryTemplate(audit_status, page, page_size) +print(req, info) +""" + +""" +#编辑模版 +template_id = '1136589777022226432' +name = '06-06-test' +template = 'hi,你好' +description = '就测试啊' +signature_id = '1131464448834277376' +req, info = sms.updateTemplate(template_id, name, template, description, signature_id) +print(info) +""" + +""" +#删除模版 +template_id = '1136589777022226432' +req, info = sms.deleteTemplate(template_id) +print(req, info) +""" + +""" +# 查询短信发送记录 +req, info = sms.get_messages_info() +print(req, info) +""" + +""" +#发送短信 +""" +template_id = '' +mobiles = [] +parameters = {} +req, info = sms.sendMessage(template_id, mobiles, parameters) +print(req, info) diff --git a/examples/stat.py b/examples/stat.py new file mode 100755 index 00000000..6ee62209 --- /dev/null +++ b/examples/stat.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# flake8: noqa +from qiniu import Auth +from qiniu import BucketManager + +access_key = '...' +secret_key = '...' + +# 初始化Auth状态 +q = Auth(access_key, secret_key) + +# 初始化BucketManager +bucket = BucketManager(q) + +# 你要测试的空间, 并且这个key在你空间中存在 +bucket_name = 'Bucket_Name' +key = 'python-logo.png' + +# 获取文件的状态信息 +ret, info = bucket.stat(bucket_name, key) +print(info) +assert 'hash' in ret diff --git a/examples/timestamp_url.py b/examples/timestamp_url.py new file mode 100644 index 00000000..0873a181 --- /dev/null +++ b/examples/timestamp_url.py @@ -0,0 +1,29 @@ +# -*- coding: utf-8 -*- +# flake8: noqa + +""" +获取一个配置时间戳防盗链的url +""" + +from qiniu.services.cdn.manager import create_timestamp_anti_leech_url +import time + +host = 'http://a.example.com' + +# 配置时间戳时指定的key +encrypt_key = '' + +# 资源路径 +file_name = 'a/b/c/example.jpeg' + +# 查询字符串,不需加? +query_string = '' + +# 截止日期的时间戳,秒为单位,3600为当前时间一小时之后过期 +deadline = int(time.time()) + 3600 + + +timestamp_url = create_timestamp_anti_leech_url( + host, file_name, query_string, encrypt_key, deadline) + +print(timestamp_url) diff --git a/examples/update_cdn_sslcert.py b/examples/update_cdn_sslcert.py new file mode 100644 index 00000000..40152f56 --- /dev/null +++ b/examples/update_cdn_sslcert.py @@ -0,0 +1,32 @@ +# -*- coding: utf-8 -*- +# flake8: noqa + +""" +更新cdn证书(可配合let's encrypt 等完成自动证书更新) +""" +import qiniu +from qiniu import DomainManager + +# 账户ak,sk +access_key = '' +secret_key = '' + +auth = qiniu.Auth(access_key=access_key, secret_key=secret_key) +domain_manager = DomainManager(auth) + +privatekey = "ssl/www.qiniu.com/privkey.pem" +ca = "ssl/www.qiniu.com/fullchain.pem" +domain_name = 'www.qiniu.com' + +with open(privatekey, 'r') as f: + privatekey_str = f.read() + +with open(ca, 'r') as f: + ca_str = f.read() + +ret, info = domain_manager.create_sslcert( + domain_name, domain_name, privatekey_str, ca_str) +print(ret['certID']) + +ret, info = domain_manager.put_httpsconf(domain_name, ret['certID'], False) +print(info) diff --git a/examples/upload.py b/examples/upload.py new file mode 100755 index 00000000..25aef7cc --- /dev/null +++ b/examples/upload.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# flake8: noqa +# import hashlib + +from qiniu import Auth, put_file, urlsafe_base64_encode +import qiniu.config +from qiniu.compat import is_py2, is_py3 + +# 需要填写你的 Access Key 和 Secret Key +access_key = '...' +secret_key = '...' + +# 构建鉴权对象 +q = Auth(access_key, secret_key) + +# 要上传的空间 +bucket_name = '' + +# 上传到七牛后保存的文件名 +key = 'my-python-七牛.png' + +# 生成上传 Token,可以指定过期时间等 +token = q.upload_token(bucket_name, key, 3600) + +# 要上传文件的本地路径 +localfile = '/Users/jemy/Documents/qiniu.png' + +# 上传时,sdk 会自动计算文件 hash 作为参数传递给服务端确保上传完整性 +# (若不一致,服务端会拒绝完成上传) +# 但在访问文件时,服务端可能不会提供 MD5 或者编码格式不是期望的 +# 因此若有需有,请通过元数据功能自定义 MD5 或其他 hash 字段 +# hasher = hashlib.md5() +# with open(localfile, 'rb') as f: +# for d in f: +# hasher.update(d) +# object_metadata = { +# 'x-qn-meta-md5': hasher.hexdigest() +# } + +ret, info = put_file( + token, + key, + localfile + # metadata=object_metadata +) +print(ret) +print(info) + +if is_py2: + assert ret['key'].encode('utf-8') == key +elif is_py3: + assert ret['key'] == key diff --git a/examples/upload_callback.py b/examples/upload_callback.py new file mode 100755 index 00000000..468120a5 --- /dev/null +++ b/examples/upload_callback.py @@ -0,0 +1,27 @@ +# -*- coding: utf-8 -*- +# flake8: noqa + +from qiniu import Auth, put_file + +access_key = '...' +secret_key = '...' + +q = Auth(access_key, secret_key) + +bucket_name = 'Bucket_Name' + +key = 'my-python-logo.png' + +# 上传文件到七牛后, 七牛将文件名和文件大小回调给业务服务器。 +policy = { + 'callbackUrl': 'http://your.domain.com/callback.php', + 'callbackBody': 'filename=$(fname)&filesize=$(fsize)' +} + +token = q.upload_token(bucket_name, key, 3600, policy) + +localfile = './sync/bbb.jpg' + +ret, info = put_file(token, key, localfile) +print(info) +assert ret['key'] == key diff --git a/examples/upload_pfops.py b/examples/upload_pfops.py new file mode 100755 index 00000000..d8546c3f --- /dev/null +++ b/examples/upload_pfops.py @@ -0,0 +1,38 @@ +# -*- coding: utf-8 -*- +# flake8: noqa +from qiniu import Auth, put_file, urlsafe_base64_encode + +access_key = '...' +secret_key = '...' + +# 初始化Auth状态 +q = Auth(access_key, secret_key) + +# 你要测试的空间, 并且这个key在你空间中存在 +bucket_name = 'Bucket_Name' +key = 'python_video.flv' + +# 指定转码使用的队列名称 +pipeline = 'your_pipeline' + +# 设置转码参数(以视频转码为例) +fops = 'avthumb/mp4/vcodec/libx264' + +# 通过添加'|saveas'参数,指定处理后的文件保存的bucket和key,不指定默认保存在当前空间,bucket_saved为目标bucket,bucket_saved为目标key +saveas_key = urlsafe_base64_encode('bucket_saved:bucket_saved') + +fops = fops + '|saveas/' + saveas_key + +# 在上传策略中指定fobs和pipeline +policy = { + 'persistentOps': fops, + 'persistentPipeline': pipeline +} + +token = q.upload_token(bucket_name, key, 3600, policy) + +localfile = './python_video.flv' + +ret, info = put_file(token, key, localfile) +print(info) +assert ret['key'] == key diff --git a/examples/upload_token.py b/examples/upload_token.py new file mode 100644 index 00000000..69ba24e3 --- /dev/null +++ b/examples/upload_token.py @@ -0,0 +1,31 @@ +# -*- coding: utf-8 -*- +# flake8: noqa + +from qiniu import Auth + +# 需要填写你的 Access Key 和 Secret Key +access_key = '' +secret_key = '' + +# 构建鉴权对象 +q = Auth(access_key, secret_key) + +# 要上传的空间 +bucket_name = '' + +# 上传到七牛后保存的文件名 +key = '' + +# 生成上传 Token,可以指定过期时间等 + +# 上传策略示例 +# https://developer.qiniu.com/kodo/manual/1206/put-policy +policy = { + # 'callbackUrl':'https://requestb.in/1c7q2d31', + # 'callbackBody':'filename=$(fname)&filesize=$(fsize)' + # 'persistentOps':'imageView2/1/w/200/h/200' +} + +token = q.upload_token(bucket_name, key, 3600, policy) + +print(token) diff --git a/examples/upload_with_qvmzone.py b/examples/upload_with_qvmzone.py new file mode 100644 index 00000000..4d298f59 --- /dev/null +++ b/examples/upload_with_qvmzone.py @@ -0,0 +1,39 @@ +# -*- coding: utf-8 -*- +# flake8: noqa + +from qiniu import Auth, put_file, urlsafe_base64_encode +import qiniu.config +from qiniu import Zone, set_default + +# 需要填写你的 Access Key 和 Secret Key +access_key = '...' +secret_key = '...' + +# 构建鉴权对象 +q = Auth(access_key, secret_key) + +# 要上传的空间 +bucket_name = 'Bucket_Name' + +# 上传到七牛后保存的文件名 +key = 'my-python-logo.png' + +# 生成上传 Token,可以指定过期时间等 +token = q.upload_token(bucket_name, key, 3600) + +# 要上传文件的本地路径 +localfile = 'stat.py' + +# up_host, 指定上传域名,注意不同区域的qvm上传域名不同 +# https://developer.qiniu.com/qvm/manual/4269/qvm-kodo + +zone = Zone( + up_host='free-qvm-z1-zz.qiniup.com', + up_host_backup='free-qvm-z1-zz.qiniup.com', + io_host='iovip.qbox.me', + scheme='http') +set_default(default_zone=zone) + +ret, info = put_file(token, key, localfile) +print(info) +assert ret['key'] == key diff --git a/examples/upload_with_zone.py b/examples/upload_with_zone.py new file mode 100644 index 00000000..c8650d39 --- /dev/null +++ b/examples/upload_with_zone.py @@ -0,0 +1,42 @@ +# -*- coding: utf-8 -*- +# flake8: noqa + +from qiniu import Auth, put_file +from qiniu import Zone, set_default + +# 需要填写你的 Access Key 和 Secret Key +access_key = '' +secret_key = '' + +# 构建鉴权对象 +q = Auth(access_key, secret_key) + +# 要上传的空间 +bucket_name = 'bucket_name' + +# 上传到七牛后保存的文件名 +key = 'a.jpg' + +# 生成上传 Token,可以指定过期时间等 +token = q.upload_token(bucket_name, key, 3600) + +# 要上传文件的本地路径 +localfile = '/Users/abc/Documents/a.jpg' + +# 指定固定域名的zone,不同区域uphost域名见下文档 +# https://developer.qiniu.com/kodo/manual/1671/region-endpoint +# 未指定或上传错误,sdk会根据token自动查询对应的上传域名 +# *.qiniup.com 支持https上传 +# 备用*.qiniu.com域名 不支持https上传 +# 要求https上传时,如果客户指定的两个host都错误,且sdk自动查询的第一个*.qiniup.com上传域名因意外不可用导致访问到备用*.qiniu.com会报ssl错误 +# 建议https上传时查看上面文档,指定正确的host + +zone = Zone( + up_host='https://up.qiniup.com', + up_host_backup='https://upload.qiniup.com', + io_host='http://iovip.qbox.me', + scheme='https') +set_default(default_zone=zone) + +ret, info = put_file(token, key, localfile) +print(info) diff --git a/manual_test_kirk.py b/manual_test_kirk.py new file mode 100644 index 00000000..e6791f30 --- /dev/null +++ b/manual_test_kirk.py @@ -0,0 +1,334 @@ +# -*- coding: utf-8 -*- +""" +======================= + 注意:必须手动运行 +======================= +""" +import os +import sys +import time +import logging +import pytest +from qiniu import auth +from qiniu.services import compute + + +access_key = os.getenv('QINIU_ACCESS_KEY') +secret_key = os.getenv('QINIU_SECRET_KEY') +qn_auth = auth.QiniuMacAuth(access_key, secret_key) +acc_client = compute.app.AccountClient(qn_auth) +qcos_client = None +user_name = '' +app_uri = '' +app_name = 'appjust4test' +app_region = 'nq' + + +def setup_module(module): + acc_client = compute.app.AccountClient(qn_auth) + user_info = acc_client.get_account_info()[0] + acc_client.create_app({'name': app_name, 'title': 'whatever', 'region': app_region}) + + module.user_name = user_info['name'] + module.app_uri = '{0}.{1}'.format(module.user_name, app_name) + module.qcos_client = acc_client.create_qcos_client(module.app_uri) + + +def teardown_module(module): + module.app_uri + acc_client.delete_app(module.app_uri) + + +class TestApp: + """应用测试用例""" + + def test_create_and_delete_app(self): + _name_create = 'appjust4testcreate' + _uri_create = '' + _args = {'name': _name_create, 'title': 'whatever', 'region': app_region} + + with Call(acc_client, 'create_app', _args) as r: + assert r[0] is not None + _uri_create = r[0]['uri'] + + with Call(acc_client, 'delete_app', _uri_create) as r: + assert r[0] == {} + + def test_get_app_keys(self): + with Call(acc_client, 'get_app_keys', app_uri) as r: + assert len(r[0]) > 0 + + def test_get_account_info(self): + with Call(acc_client, 'get_account_info') as r: + assert r[0] is not None + + +class TestStack: + """服务组测试用例""" + + _name = 'just4test' + _name_del = 'just4del' + _name_create = 'just4create' + + @classmethod + def setup_class(cls): + qcos_client.create_stack({'name': cls._name}) + qcos_client.create_stack({'name': cls._name_del}) + + @classmethod + def teardown_class(cls): + qcos_client.delete_stack(cls._name) + qcos_client.delete_stack(cls._name_create) + qcos_client.delete_stack(cls._name_del) + + def test_create_stack(self): + with Call(qcos_client, 'create_stack', {'name': self._name_create}) as r: + assert r[0] == {} + + def test_delete_stack(self): + with Call(qcos_client, 'delete_stack', self._name_del) as r: + assert r[0] == {} + + def test_list_stacks(self): + with Call(qcos_client, 'list_stacks') as r: + assert len(r) > 0 + assert self._name in [stack['name'] for stack in r[0]] + + def test_get_stack(self): + with Call(qcos_client, 'get_stack', self._name) as r: + assert r[0]['name'] == self._name + + def test_start_stack(self): + with Call(qcos_client, 'start_stack', self._name) as r: + assert r[0] == {} + + def test_stop_stack(self): + with Call(qcos_client, 'stop_stack', self._name) as r: + assert r[0] == {} + + +class TestService: + """服务测试用例""" + + _stack = 'just4test2' + _name = 'spaceship' + _name_del = 'spaceship4del' + _name_create = 'spaceship4create' + _image = 'library/nginx:stable' + _unit = '1U1G' + _spec = {'image': _image, 'unitType': _unit} + + @classmethod + def setup_class(cls): + qcos_client.delete_stack(cls._stack) + qcos_client.create_stack({'name': cls._stack}) + qcos_client.create_service(cls._stack, {'name': cls._name, 'spec': cls._spec}) + qcos_client.create_service(cls._stack, {'name': cls._name_del, 'spec': cls._spec}) + + _debug_info('waiting for services to setup ...') + time.sleep(10) + + @classmethod + def teardown_class(cls): + # 删除stack会清理所有相关服务 + qcos_client.delete_stack(cls._stack) + + def test_create_service(self): + service = {'name': self._name_create, 'spec': self._spec} + with Call(qcos_client, 'create_service', self._stack, service) as r: + assert r[0] == {} + + def test_delete_service(self): + with Call(qcos_client, 'delete_service', self._stack, self._name_del) as r: + assert r[0] == {} + + def test_list_services(self): + with Call(qcos_client, 'list_services', self._stack) as r: + assert len(r) > 0 + assert self._name in [service['name'] for service in r[0]] + + def test_get_service_inspect(self): + with Call(qcos_client, 'get_service_inspect', self._stack, self._name) as r: + assert r[0]['name'] == self._name + assert r[0]['spec']['unitType'] == self._unit + + def test_update_service(self): + data = {'spec': {'autoRestart': 'ON_FAILURE'}} + with Call(qcos_client, 'update_service', self._stack, self._name, data) as r: + assert r[0] == {} + + _debug_info('waiting for update services to ready ...') + time.sleep(10) + + def test_scale_service(self): + data = {'instanceNum': 2} + with Call(qcos_client, 'scale_service', self._stack, self._name, data) as r: + assert r[0] == {} + + _debug_info('waiting for scale services to ready ...') + time.sleep(10) + + +class TestContainer: + """容器测试用例""" + + _stack = 'just4test3' + _service = 'spaceship' + _spec = {'image': 'library/nginx:stable', 'unitType': '1U1G'} + # 为了方便测试,容器数量最少为2 + _instanceNum = 2 + + @classmethod + def setup_class(cls): + qcos_client.delete_stack(cls._stack) + qcos_client.create_stack({'name': cls._stack}) + qcos_client.create_service(cls._stack, {'name': cls._service, 'spec': cls._spec, 'instanceNum': cls._instanceNum}) + + _debug_info('waiting for containers to setup ...') + time.sleep(10) + + @classmethod + def teardown_class(cls): + qcos_client.delete_stack(cls._stack) + + def test_list_containers(self): + with Call(qcos_client, 'list_containers', self._stack, self._service) as r: + assert len(r[0]) > 0 + assert len(r[0]) <= self._instanceNum + + def test_get_container_inspect(self): + ips = qcos_client.list_containers(self._stack, self._service)[0] + # 查看第1个容器 + with Call(qcos_client, 'get_container_inspect', ips[0]) as r: + assert r[0]['ip'] == ips[0] + + def test_stop_and_strat_container(self): + ips = qcos_client.list_containers(self._stack, self._service)[0] + # 停止第2个容器 + with Call(qcos_client, 'stop_container', ips[1]) as r: + assert r[0] == {} + + _debug_info('waiting for containers to stop ...') + time.sleep(3) + + # 启动第2个容器 + with Call(qcos_client, 'start_container', ips[1]) as r: + assert r[0] == {} + + def test_restart_container(self): + ips = qcos_client.list_containers(self._stack, self._service)[0] + # 重启第1个容器 + with Call(qcos_client, 'restart_container', ips[0]) as r: + assert r[0] == {} + + +class TestAp: + """接入点测试用例""" + + _stack = 'just4test4' + _service = 'spaceship' + _spec = {'image': 'library/nginx:stable', 'unitType': '1U1G'} + # 为了方便测试,容器数量最少为2 + _instanceNum = 2 + _apid_domain = {} + _apid_ip = {} + _apid_ip_port = 8080 + _user_domain = 'just4test001.example.com' + + @classmethod + def setup_class(cls): + qcos_client.delete_stack(cls._stack) + qcos_client.create_stack({'name': cls._stack}) + qcos_client.create_service(cls._stack, {'name': cls._service, 'spec': cls._spec, 'instanceNum': cls._instanceNum}) + cls._ap_domain = qcos_client.create_ap({'type': 'DOMAIN', 'provider': 'Telecom', 'unitType': 'BW_10M', 'title': 'public1'})[0] + cls._ap_ip = qcos_client.create_ap({'type': 'PUBLIC_IP', 'provider': 'Telecom', 'unitType': 'BW_10M', 'title': 'public2'})[0] + qcos_client.set_ap_port(cls._ap_ip['apid'], cls._apid_ip_port, {'proto': 'http'}) + + @classmethod + def teardown_class(cls): + qcos_client.delete_stack(cls._stack) + qcos_client.delete_ap(cls._ap_domain['apid']) + qcos_client.delete_ap(cls._ap_ip['apid']) + + def test_list_aps(self): + with Call(qcos_client, 'list_aps') as r: + assert len(r[0]) > 0 + assert self._ap_domain['apid'] in [ap['apid'] for ap in r[0]] + assert self._ap_domain['apid'] in [ap['apid'] for ap in r[0]] + + def test_create_and_delete_ap(self): + apid = 0 + ap = {'type': 'DOMAIN', 'provider': 'Telecom', 'unitType': 'BW_10M', 'title': 'public1'} + + with Call(qcos_client, 'create_ap', ap) as r: + assert r[0] is not None and r[0]['apid'] > 0 + apid = r[0]['apid'] + + with Call(qcos_client, 'delete_ap', apid) as r: + assert r[0] == {} + + def test_search_ap(self): + with Call(qcos_client, 'search_ap', 'ip', self._ap_ip['ip']) as r: + assert str(r[0]['apid']) == self._ap_ip['apid'] + + def test_get_ap(self): + with Call(qcos_client, 'get_ap', self._ap_ip['apid']) as r: + assert str(r[0]['apid']) == self._ap_ip['apid'] + + def test_update_ap(self): + with Call(qcos_client, 'update_ap', self._ap_ip['apid'], {}) as r: + assert r[0] == {} + + def test_set_ap_port(self): + with Call(qcos_client, 'set_ap_port', self._ap_ip['apid'], 80, {'proto': 'http'}) as r: + assert r[0] == {} + + def test_publish_ap(self): + domain = {'userDomain': self._user_domain} + with Call(qcos_client, 'publish_ap', self._ap_domain['apid'], domain) as r: + assert r[0] == {} + + def test_unpublish_ap(self): + domain = {'userDomain': self._user_domain} + with Call(qcos_client, 'unpublish_ap', self._ap_domain['apid'], domain) as r: + assert r[0] == {} + + def test_get_ap_port_healthcheck(self): + with Call(qcos_client, 'get_ap_port_healthcheck', self._ap_ip['apid'], self._apid_ip_port) as r: + assert r[0] is not None + + def test_disable_ap_port(self): + with Call(qcos_client, 'disable_ap_port', self._ap_ip['apid'], self._apid_ip_port) as r: + assert r[0] == {} + + def test_enable_ap_port(self): + with Call(qcos_client, 'enable_ap_port', self._ap_ip['apid'], self._apid_ip_port) as r: + assert r[0] == {} + + def test_get_ap_providers(self): + with Call(qcos_client, 'get_ap_providers') as r: + assert len(r[0]) > 0 + + +class Call(object): + def __init__(self, obj, method, *args): + self.context = (obj, method, args) + self.result = None + + def __enter__(self): + self.result = getattr(self.context[0], self.context[1])(*self.context[2]) + assert self.result is not None + return self.result + + def __exit__(self, type, value, traceback): + _debug_info('\033[94m%s.%s\x1b[0m: %s', self.context[0].__class__, self.context[1], self.result) + + +def _debug_info(*args): + logger = logging.getLogger(__name__) + logger.debug(*args) + + +if __name__ == '__main__': + logging.basicConfig(stream=sys.stdout, level=logging.DEBUG) + pytest.main() diff --git a/qiniu/__init__.py b/qiniu/__init__.py index f461c30e..d097fdb8 100644 --- a/qiniu/__init__.py +++ b/qiniu/__init__.py @@ -1,10 +1,31 @@ +# -*- coding: utf-8 -*- ''' Qiniu Resource Storage SDK for Python ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ For detailed document, please see: - + ''' -# -*- coding: utf-8 -*- -__version__ = '6.1.3' +# flake8: noqa + +__version__ = '7.16.0' + +from .auth import Auth, QiniuMacAuth + +from .config import set_default +from .zone import Zone +from .region import LegacyRegion as Region + +from .services.storage.bucket import BucketManager, build_batch_copy, build_batch_rename, build_batch_move, \ + build_batch_stat, build_batch_delete, build_batch_restoreAr, build_batch_restore_ar +from .services.storage.uploader import put_data, put_file, put_stream +from .services.storage.upload_progress_recorder import UploadProgressRecorder +from .services.cdn.manager import CdnManager, DataType, create_timestamp_anti_leech_url, DomainManager +from .services.processing.pfop import PersistentFop +from .services.processing.cmd import build_op, pipe_cmd, op_save +from .services.compute.app import AccountClient +from .services.compute.qcos_api import QcosClient +from .services.sms.sms import Sms +from .services.pili.rtc_server_manager import RtcServer, get_room_token +from .utils import urlsafe_base64_encode, urlsafe_base64_decode, etag, entry, decode_entry, canonical_mime_header_key diff --git a/qiniu/auth.py b/qiniu/auth.py new file mode 100644 index 00000000..1647199e --- /dev/null +++ b/qiniu/auth.py @@ -0,0 +1,445 @@ +# -*- coding: utf-8 -*- +import base64 +from datetime import datetime +import hmac +import os +import time +from hashlib import sha1 +from requests.auth import AuthBase +from .compat import urlparse, json, b +from .utils import urlsafe_base64_encode, canonical_mime_header_key + +# 上传策略,参数规格详见 +# https://developer.qiniu.com/kodo/manual/1206/put-policy +# the `str()` prevent implicit concatenation of string. DON'T remove it. +# for example, avoid you lost comma at the end of line in middle. +_policy_fields = { + str('callbackUrl'), # 回调URL + str('callbackBody'), # 回调Body + str('callbackHost'), # 回调URL指定的Host + str('callbackBodyType'), # 回调Body的Content-Type + str('callbackFetchKey'), # 回调FetchKey模式开关 + + str('returnUrl'), # 上传端的303跳转URL + str('returnBody'), # 上传端简单反馈获取的Body + + str('endUser'), # 回调时上传端标识 + str('saveKey'), # 自定义资源名 + str('forceSaveKey'), # saveKey的优先级设置。为 true 时,saveKey不能为空,会忽略客户端指定的key,强制使用saveKey进行文件命名。参数不设置时,默认值为false + str('insertOnly'), # 插入模式开关 + + str('detectMime'), # MimeType侦测开关 + str('mimeLimit'), # MimeType限制 + str('fsizeLimit'), # 上传文件大小限制 + str('fsizeMin'), # 上传文件最少字节数 + str('keylimit'), # 设置允许上传的key列表,字符串数组类型,数组长度不可超过20个,如果设置了这个字段,上传时必须提供key + + str('persistentOps'), # 持久化处理操作,与 persistentWorkflowTemplateID 二选一 + str('persistentNotifyUrl'), # 持久化处理结果通知URL + str('persistentPipeline'), # 持久化处理独享队列 + str('persistentType'), # 为 `1` 时,开启闲时任务,必须是 int 类型 + str('persistentWorkflowTemplateID'), # 工作流模板 ID,与 persistentOps 二选一 + + str('deleteAfterDays'), # 文件多少天后自动删除 + str('fileType'), # 文件的存储类型,0为标准存储,1为低频存储,2为归档存储,3为深度归档存储,4为归档直读存储 + str('isPrefixalScope'), # 指定上传文件必须使用的前缀 + + str('transform'), # deprecated + str('transformFallbackKey'), # deprecated + str('transformFallbackMode'), # deprecated +} + + +class Auth(object): + """七牛安全机制类 + + 该类主要内容是七牛上传凭证、下载凭证、管理凭证三种凭证的签名接口的实现,以及回调验证。 + + Attributes: + __access_key: 账号密钥对中的accessKey,详见 https://portal.qiniu.com/user/key + __secret_key: 账号密钥对重的secretKey,详见 https://portal.qiniu.com/user/key + """ + + def __init__(self, access_key, secret_key, disable_qiniu_timestamp_signature=None): + """初始化Auth类""" + self.__checkKey(access_key, secret_key) + self.__access_key = access_key + self.__secret_key = b(secret_key) + self.disable_qiniu_timestamp_signature = disable_qiniu_timestamp_signature + + def get_access_key(self): + return self.__access_key + + def get_secret_key(self): + return self.__secret_key + + def __token(self, data): + data = b(data) + hashed = hmac.new(self.__secret_key, data, sha1) + return urlsafe_base64_encode(hashed.digest()) + + def token(self, data): + return '{0}:{1}'.format(self.__access_key, self.__token(data)) + + def token_with_data(self, data): + data = urlsafe_base64_encode(data) + return '{0}:{1}:{2}'.format( + self.__access_key, self.__token(data), data) + + def token_of_request(self, url, body=None, content_type=None): + """带请求体的签名(本质上是管理凭证的签名) + + Args: + url: 待签名请求的url + body: 待签名请求的body + content_type: 待签名请求的body的Content-Type + + Returns: + 管理凭证 + """ + parsed_url = urlparse(url) + query = parsed_url.query + path = parsed_url.path + data = path + if query != '': + data = ''.join([data, '?', query]) + data = ''.join([data, "\n"]) + + if body: + mimes = [ + 'application/x-www-form-urlencoded' + ] + if content_type in mimes: + data += body + + return '{0}:{1}'.format(self.__access_key, self.__token(data)) + + @staticmethod + def __checkKey(access_key, secret_key): + if not (access_key and secret_key): + raise ValueError('invalid key') + + def private_download_url(self, url, expires=3600): + """生成私有资源下载链接 + + Args: + url: 私有空间资源的原始URL + expires: 下载凭证有效期,默认为3600s + + Returns: + 私有资源的下载链接 + """ + deadline = int(time.time()) + expires + if '?' in url: + url += '&' + else: + url += '?' + url = '{0}e={1}'.format(url, str(deadline)) + + token = self.token(url) + return '{0}&token={1}'.format(url, token) + + def upload_token( + self, + bucket, + key=None, + expires=3600, + policy=None, + strict_policy=True): + """生成上传凭证 + + Args: + bucket: 上传的空间名 + key: 上传的文件名,默认为空 + expires: 上传凭证的过期时间,默认为3600s + policy: 上传策略,默认为空 + strict_policy: 严格模式,将校验 policy 字段,默认为 True + + Returns: + 上传凭证 + """ + if bucket is None or bucket == '': + raise ValueError('invalid bucket name') + + scope = bucket + if key is not None: + scope = '{0}:{1}'.format(bucket, key) + + args = dict( + scope=scope, + deadline=int(time.time()) + expires, + ) + + if policy is not None: + self.__copy_policy(policy, args, strict_policy) + + return self.__upload_token(args) + + @staticmethod + def up_token_decode(up_token): + up_token_list = up_token.split(':') + ak = up_token_list[0] + sign = base64.urlsafe_b64decode(up_token_list[1]) + decode_policy = base64.urlsafe_b64decode(up_token_list[2]) + decode_policy = decode_policy.decode('utf-8') + dict_policy = json.loads(decode_policy) + return ak, sign, dict_policy + + @staticmethod + def get_bucket_name(up_token): + _, _, policy = Auth.up_token_decode(up_token) + if not policy or not policy['scope']: + return None + return policy['scope'].split(':', 1)[0] + + def __upload_token(self, policy): + data = json.dumps(policy, separators=(',', ':')) + return self.token_with_data(data) + + def verify_callback( + self, + origin_authorization, + url, + body, + content_type='application/x-www-form-urlencoded', + method='GET', + headers=None + ): + """ + Qbox 回调验证 + + Parameters + ---------- + origin_authorization: str + 回调时请求 Header 中的 Authorization 字段 + url: str + 回调请求的 url + body: str + 回调请求的 body + content_type: str + 回调请求的 Content-Type + method: str + 回调请求的 method,Qiniu 签名必须传入,默认 GET + headers: dict + 回调请求的 headers,Qiniu 签名必须传入,默认为空字典 + + Returns + ------- + bool + 返回 True 表示验证成功,返回 False 表示验证失败 + """ + if headers is None: + headers = {} + + # 兼容 Qiniu 签名 + if origin_authorization.startswith("Qiniu"): + qn_auth = QiniuMacAuth( + access_key=self.__access_key, + secret_key=self.__secret_key, + disable_qiniu_timestamp_signature=True + ) + return qn_auth.verify_callback( + origin_authorization, + url=url, + body=body, + content_type=content_type, + method=method, + headers=headers + ) + + token = self.token_of_request(url, body, content_type) + authorization = 'QBox {0}'.format(token) + return origin_authorization == authorization + + @staticmethod + def __copy_policy(policy, to, strict_policy): + for k, v in policy.items(): + if (not strict_policy) or k in _policy_fields: + to[k] = v + + +class RequestsAuth(AuthBase): + def __init__(self, auth): + self.auth = auth + + def __call__(self, r): + if r.body is not None and r.headers['Content-Type'] == 'application/x-www-form-urlencoded': + token = self.auth.token_of_request( + r.url, r.body, 'application/x-www-form-urlencoded') + else: + token = self.auth.token_of_request(r.url) + r.headers['Authorization'] = 'QBox {0}'.format(token) + return r + + +class QiniuMacAuth(object): + """ + Sign Requests + + Attributes: + __access_key + __secret_key + + https://developer.qiniu.com/kodo/1201/access-token + """ + + def __init__(self, access_key, secret_key, disable_qiniu_timestamp_signature=None): + self.qiniu_header_prefix = "X-Qiniu-" + self.__checkKey(access_key, secret_key) + self.__access_key = access_key + self.__secret_key = b(secret_key) + self.disable_qiniu_timestamp_signature = disable_qiniu_timestamp_signature + + def __token(self, data): + data = b(data) + hashed = hmac.new(self.__secret_key, data, sha1) + return urlsafe_base64_encode(hashed.digest()) + + @property + def should_sign_with_timestamp(self): + if self.disable_qiniu_timestamp_signature is not None: + return not self.disable_qiniu_timestamp_signature + if os.getenv('DISABLE_QINIU_TIMESTAMP_SIGNATURE', '').lower() == 'true': + return False + return True + + def token_of_request( + self, + method, + host, + url, + qheaders, + content_type=None, + body=None): + """ + + Host: + Content-Type: + [ Headers] + + [] #这里的 只有在 存在且不为 application/octet-stream 时才签进去。 + + """ + parsed_url = urlparse(url) + netloc = parsed_url.netloc + path = parsed_url.path + query = parsed_url.query + + if not host: + host = netloc + + path_with_query = path + if query != '': + path_with_query = ''.join([path_with_query, '?', query]) + data = ''.join([ + "%s %s" % (method, path_with_query), + "\n", + "Host: %s" % host + ]) + + if content_type: + data += "\n" + data += "Content-Type: %s" % content_type + + if qheaders: + data += "\n" + data += qheaders + + data += "\n\n" + + if content_type and content_type != "application/octet-stream" and body: + if isinstance(body, bytes): + data += body.decode(encoding='UTF-8') + else: + data += body + return '{0}:{1}'.format(self.__access_key, self.__token(data)) + + def qiniu_headers(self, headers): + qiniu_fields = [ + key for key in headers + if key.startswith(self.qiniu_header_prefix) and len(key) > len(self.qiniu_header_prefix) + ] + return '\n'.join([ + '%s: %s' % (canonical_mime_header_key(key), headers.get(key)) for key in sorted(qiniu_fields) + ]) + + def verify_callback( + self, + origin_authorization, + url, + body, + content_type='application/x-www-form-urlencoded', + method='GET', + headers=None + ): + """ + Qiniu 回调验证 + + Parameters + ---------- + origin_authorization: str + 回调时请求 Header 中的 Authorization 字段 + url: str + 回调请求的 url + body: str + 回调请求的 body + content_type: str + 回调请求的 Content-Type + method: str + 回调请求的 Method + headers: dict + 回调请求的 headers + + Returns + ------- + + """ + if headers is None: + headers = {} + token = self.token_of_request( + method=method, + host=headers.get('Host', None), + url=url, + qheaders=self.qiniu_headers(headers), + content_type=content_type, + body=body + ) + authorization = 'Qiniu {0}'.format(token) + return origin_authorization == authorization + + @staticmethod + def __checkKey(access_key, secret_key): + if not (access_key and secret_key): + raise ValueError('QiniuMacAuthSign : Invalid key') + + +class QiniuMacRequestsAuth(AuthBase): + """ + Attributes: + auth (QiniuMacAuth): + """ + def __init__(self, auth): + """ + Args: + auth (QiniuMacAuth): + """ + self.auth = auth + + def __call__(self, r): + if r.headers.get('Content-Type', None) is None: + r.headers['Content-Type'] = 'application/x-www-form-urlencoded' + + if self.auth.should_sign_with_timestamp: + x_qiniu_date = datetime.utcnow().strftime('%Y%m%dT%H%M%SZ') + r.headers['X-Qiniu-Date'] = x_qiniu_date + + token = self.auth.token_of_request( + r.method, + r.headers.get('Host', None), + r.url, + self.auth.qiniu_headers(r.headers), + r.headers.get('Content-Type', None), + r.body + ) + r.headers['Authorization'] = 'Qiniu {0}'.format(token) + return r diff --git a/qiniu/auth/digest.py b/qiniu/auth/digest.py deleted file mode 100644 index 7ef3a542..00000000 --- a/qiniu/auth/digest.py +++ /dev/null @@ -1,58 +0,0 @@ -# -*- coding: utf-8 -*- -from urlparse import urlparse -import hmac -from hashlib import sha1 -from base64 import urlsafe_b64encode - -from .. import rpc -from .. import conf - -class Mac(object): - access = None - secret = None - def __init__(self, access=None, secret=None): - if access is None and secret is None: - access, secret = conf.ACCESS_KEY, conf.SECRET_KEY - self.access, self.secret = access, secret - - def __sign(self, data): - hashed = hmac.new(self.secret, data, sha1) - return urlsafe_b64encode(hashed.digest()) - - def sign(self, data): - return '%s:%s' % (self.access, self.__sign(data)) - - def sign_with_data(self, b): - data = urlsafe_b64encode(b) - return '%s:%s:%s' % (self.access, self.__sign(data), data) - - def sign_request(self, path, body, content_type): - parsedurl = urlparse(path) - p_query = parsedurl.query - p_path = parsedurl.path - data = p_path - if p_query != "": - data = ''.join([data, '?', p_query]) - data = ''.join([data, "\n"]) - - if body: - incBody = [ - "application/x-www-form-urlencoded", - ] - if content_type in incBody: - data += body - - return '%s:%s' % (self.access, self.__sign(data)) - - -class Client(rpc.Client): - def __init__(self, host, mac=None): - if mac is None: - mac = Mac() - super(Client, self).__init__(host) - self.mac = mac - - def round_tripper(self, method, path, body): - token = self.mac.sign_request(path, body, self._header.get("Content-Type")) - self.set_header("Authorization", "QBox %s" % token) - return super(Client, self).round_tripper(method, path, body) diff --git a/qiniu/auth/up.py b/qiniu/auth/up.py deleted file mode 100644 index b13aa629..00000000 --- a/qiniu/auth/up.py +++ /dev/null @@ -1,19 +0,0 @@ -# -*- coding: utf-8 -*- -from .. import conf -from .. import rpc - - -class Client(rpc.Client): - up_token = None - - def __init__(self, up_token, host=None): - if host is None: - host = conf.UP_HOST - if host.startswith("http://"): - host = host[7:] - self.up_token = up_token - super(Client, self).__init__(host) - - def round_tripper(self, method, path, body): - self.set_header("Authorization", "UpToken %s" % self.up_token) - return super(Client, self).round_tripper(method, path, body) diff --git a/qiniu/compat.py b/qiniu/compat.py new file mode 100644 index 00000000..079aef70 --- /dev/null +++ b/qiniu/compat.py @@ -0,0 +1,96 @@ +# -*- coding: utf-8 -*- + +""" +pythoncompat +""" + +import os +import sys + +try: + import simplejson as json +except (ImportError, SyntaxError): + # simplejson does not support Python 3.2, it thows a SyntaxError + # because of u'...' Unicode literals. + import json # noqa + +# ------- +# Platform +# ------- + +is_windows = sys.platform == 'win32' +is_linux = sys.platform == 'linux' +is_macos = sys.platform == 'darwin' + +# ------- +# Pythons +# ------- + +_ver = sys.version_info + +#: Python 2.x? +is_py2 = (_ver[0] == 2) + +#: Python 3.x? +is_py3 = (_ver[0] == 3) + + +# --------- +# Specifics +# --------- + +if is_py2: + from urllib import urlencode # noqa + from urlparse import urlparse # noqa + import StringIO + StringIO = BytesIO = StringIO.StringIO + + builtin_str = str + bytes = str + str = unicode # noqa + basestring = basestring # noqa + numeric_types = (int, long, float) # noqa + + def b(data): + return bytes(data) + + def s(data): + return bytes(data) + + def u(data): + return unicode(data, 'unicode_escape') # noqa + + def is_seekable(data): + try: + data.seek(0, os.SEEK_CUR) + return True + except (AttributeError, IOError): + return False + +elif is_py3: + from urllib.parse import urlparse, urlencode # noqa + import io + StringIO = io.StringIO + BytesIO = io.BytesIO + + builtin_str = str + str = str + bytes = bytes + basestring = (str, bytes) + numeric_types = (int, float) + + def b(data): + if isinstance(data, str): + return data.encode('utf-8') + return data + + def s(data): + if isinstance(data, bytes): + data = data.decode('utf-8') + return data + + def u(data): + return data + + def is_seekable(data): + return data.seekable() diff --git a/qiniu/conf.py b/qiniu/conf.py deleted file mode 100644 index d43b3144..00000000 --- a/qiniu/conf.py +++ /dev/null @@ -1,11 +0,0 @@ -# -*- coding: utf-8 -*- - -ACCESS_KEY = "" -SECRET_KEY = "" - -RS_HOST = "rs.qbox.me" -RSF_HOST = "rsf.qbox.me" -UP_HOST = "up.qiniu.com" - -from . import __version__ -USER_AGENT = "qiniu python-sdk v%s" % __version__ diff --git a/qiniu/config.py b/qiniu/config.py new file mode 100644 index 00000000..338c1399 --- /dev/null +++ b/qiniu/config.py @@ -0,0 +1,103 @@ +# -*- coding: utf-8 -*- +RS_HOST = 'http://rs.qiniu.com' # 管理操作Host +RSF_HOST = 'http://rsf.qbox.me' # 列举操作Host +API_HOST = 'http://api.qiniuapi.com' # 数据处理操作Host +QUERY_REGION_HOST = 'https://uc.qiniuapi.com' +QUERY_REGION_BACKUP_HOSTS = [ + 'kodo-config.qiniuapi.com', + 'uc.qbox.me' +] +UC_HOST = QUERY_REGION_HOST # 获取空间信息Host +UC_BACKUP_HOSTS = QUERY_REGION_BACKUP_HOSTS + +_BLOCK_SIZE = 1024 * 1024 * 4 # 断点续传分块大小,该参数为接口规格,暂不支持修改 + +_config = { + 'default_zone': None, + 'default_rs_host': RS_HOST, + 'default_rsf_host': RSF_HOST, + 'default_api_host': API_HOST, + 'default_uc_host': UC_HOST, + 'default_uc_backup_hosts': UC_BACKUP_HOSTS, + 'default_query_region_host': QUERY_REGION_HOST, + 'default_query_region_backup_hosts': QUERY_REGION_BACKUP_HOSTS, + 'default_backup_hosts_retry_times': 3, # 仅控制旧区域 LegacyRegion 查询 Hosts 的重试次数 + 'connection_timeout': 30, # 链接超时为时间为30s + 'connection_retries': 3, # 链接重试次数为3次 + 'connection_pool': 10, # 链接池个数为10 + 'default_upload_threshold': 2 * _BLOCK_SIZE # put_file上传方式的临界默认值 +} + +_is_customized_default = { + k: False + for k in _config.keys() +} + + +def is_customized_default(key): + return _is_customized_default[key] + + +def get_default(key): + if key == 'default_zone' and not _is_customized_default[key]: + # prevent circle import + from .region import LegacyRegion + return LegacyRegion() + return _config[key] + + +def set_default( + default_zone=None, connection_retries=None, connection_pool=None, + connection_timeout=None, default_rs_host=None, default_uc_host=None, + default_rsf_host=None, default_api_host=None, default_upload_threshold=None, + default_query_region_host=None, default_query_region_backup_hosts=None, + default_backup_hosts_retry_times=None, default_uc_backup_hosts=None): + if default_zone: + _config['default_zone'] = default_zone + _is_customized_default['default_zone'] = True + if default_rs_host: + _config['default_rs_host'] = default_rs_host + _is_customized_default['default_rs_host'] = True + if default_rsf_host: + _config['default_rsf_host'] = default_rsf_host + _is_customized_default['default_rsf_host'] = True + if default_api_host: + _config['default_api_host'] = default_api_host + _is_customized_default['default_api_host'] = True + if default_uc_host: + _config['default_uc_host'] = default_uc_host + _is_customized_default['default_uc_host'] = True + _config['default_uc_backup_hosts'] = [] + _is_customized_default['default_uc_backup_hosts'] = True + _config['default_query_region_host'] = default_uc_host + _is_customized_default['default_query_region_host'] = True + _config['default_query_region_backup_hosts'] = [] + _is_customized_default['default_query_region_backup_hosts'] = True + if default_uc_backup_hosts is not None: + _config['default_uc_backup_hosts'] = default_uc_backup_hosts + _is_customized_default['default_uc_backup_hosts'] = True + _config['default_query_region_backup_hosts'] = default_uc_backup_hosts + _is_customized_default['default_query_region_backup_hosts'] = True + if default_query_region_host: + _config['default_query_region_host'] = default_query_region_host + _is_customized_default['default_query_region_host'] = True + _config['default_query_region_backup_hosts'] = [] + _is_customized_default['default_query_region_backup_hosts'] = True + if default_query_region_backup_hosts is not None: + _config['default_query_region_backup_hosts'] = default_query_region_backup_hosts + _is_customized_default['default_query_region_backup_hosts'] = True + if default_backup_hosts_retry_times: + _config['default_backup_hosts_retry_times'] = default_backup_hosts_retry_times + _is_customized_default['default_backup_hosts_retry_times'] = True + if connection_retries: + _config['connection_retries'] = connection_retries + _is_customized_default['connection_retries'] = True + if connection_pool: + _config['connection_pool'] = connection_pool + _is_customized_default['connection_pool'] = True + if connection_timeout: + _config['connection_timeout'] = connection_timeout + _is_customized_default['connection_timeout'] = True + if default_upload_threshold: + _config['default_upload_threshold'] = default_upload_threshold + _is_customized_default['default_upload_threshold'] = True diff --git a/qiniu/fop.py b/qiniu/fop.py deleted file mode 100644 index be9a7c19..00000000 --- a/qiniu/fop.py +++ /dev/null @@ -1,37 +0,0 @@ -# -*- coding:utf-8 -*- -import json - -class Exif(object): - def make_request(self, url): - return '%s?exif' % url - - -class ImageView(object): - mode = 1 # 1或2 - width = None # width 默认为0,表示不限定宽度 - height = None - quality = None # 图片质量, 1-100 - format = None # 输出格式, jpg, gif, png, tif 等图片格式 - - def make_request(self, url): - target = [] - target.append('%s' % self.mode) - - if self.width is not None: - target.append("w/%s" % self.width) - - if self.height is not None: - target.append("h/%s" % self.height) - - if self.quality is not None: - target.append("q/%s" % self.quality) - - if self.format is not None: - target.append("format/%s" % self.format) - - return "%s?imageView/%s" % (url, '/'.join(target)) - - -class ImageInfo(object): - def make_request(self, url): - return '%s?imageInfo' % url diff --git a/qiniu/http/__init__.py b/qiniu/http/__init__.py new file mode 100644 index 00000000..83a837a4 --- /dev/null +++ b/qiniu/http/__init__.py @@ -0,0 +1,211 @@ +# -*- coding: utf-8 -*- +import logging +import platform + +import requests +from requests.auth import AuthBase + +from qiniu import config, __version__ +import qiniu.auth + +from .response import ResponseInfo +from .default_client import qn_http_client, _init_http_adapter + +_sys_info = '{0}; {1}'.format(platform.system(), platform.machine()) +_python_ver = platform.python_version() + +USER_AGENT = 'QiniuPython/{0} ({1}; ) Python/{2}'.format( + __version__, _sys_info, _python_ver) + +_session = None +_headers = {'User-Agent': USER_AGENT} + + +def __return_wrapper(resp): + if resp.status_code != 200 or resp.headers.get('X-Reqid') is None: + return None, ResponseInfo(resp) + resp.encoding = 'utf-8' + try: + ret = resp.json() + except ValueError: + logging.debug("response body decode error: %s" % resp.text) + ret = {} + return ret, ResponseInfo(resp) + + +def _init(): + global _session + if _session is None: + _session = qn_http_client.session + _init_http_adapter() + + +def _post(url, data, files, auth, headers=None): + if _session is None: + _init() + try: + post_headers = _headers.copy() + if headers is not None: + for k, v in headers.items(): + post_headers.update({k: v}) + r = _session.post( + url, data=data, files=files, auth=auth, headers=post_headers, + timeout=config.get_default('connection_timeout')) + except Exception as e: + return None, ResponseInfo(None, e) + return __return_wrapper(r) + + +def _put(url, data, files, auth, headers=None): + if _session is None: + _init() + try: + post_headers = _headers.copy() + if headers is not None: + for k, v in headers.items(): + post_headers.update({k: v}) + r = _session.put( + url, data=data, files=files, auth=auth, headers=post_headers, + timeout=config.get_default('connection_timeout')) + except Exception as e: + return None, ResponseInfo(None, e) + return __return_wrapper(r) + + +def _get(url, params, auth, headers=None): + if _session is None: + _init() + try: + get_headers = _headers.copy() + if headers is not None: + for k, v in headers.items(): + get_headers.update({k: v}) + r = _session.get( + url, + params=params, + auth=auth, + timeout=config.get_default('connection_timeout'), + headers=get_headers) + except Exception as e: + return None, ResponseInfo(None, e) + return __return_wrapper(r) + + +class _TokenAuth(AuthBase): + def __init__(self, token): + self.token = token + + def __call__(self, r): + r.headers['Authorization'] = 'UpToken {0}'.format(self.token) + return r + + +def _post_with_token(url, data, token): + return _post(url, data, None, _TokenAuth(token)) + + +def _post_with_token_and_headers(url, data, token, headers): + return _post(url, data, None, _TokenAuth(token), headers) + + +def _post_file(url, data, files): + return _post(url, data, files, None) + + +def _post_with_auth(url, data, auth): + return _post(url, data, None, qiniu.auth.RequestsAuth(auth)) + + +def _get_with_auth(url, data, auth): + return _get(url, data, qiniu.auth.RequestsAuth(auth)) + + +def _post_with_auth_and_headers(url, data, auth, headers): + return _post(url, data, None, qiniu.auth.RequestsAuth(auth), headers) + + +def _get_with_auth_and_headers(url, data, auth, headers): + return _get(url, data, qiniu.auth.RequestsAuth(auth), headers) + + +def _post_with_qiniu_mac_and_headers(url, data, auth, headers): + return _post(url, data, None, qiniu.auth.QiniuMacRequestsAuth(auth), headers) + + +def _put_with_auth(url, data, auth): + return _put(url, data, None, qiniu.auth.RequestsAuth(auth)) + + +def _put_with_token_and_headers(url, data, auth, headers): + return _put(url, data, None, _TokenAuth(auth), headers) + + +def _put_with_auth_and_headers(url, data, auth, headers): + return _put(url, data, None, qiniu.auth.RequestsAuth(auth), headers) + + +def _put_with_qiniu_mac_and_headers(url, data, auth, headers): + return _put(url, data, None, qiniu.auth.QiniuMacRequestsAuth(auth), headers) + + +def _post_with_qiniu_mac(url, data, auth): + qn_auth = qiniu.auth.QiniuMacRequestsAuth( + auth + ) if auth is not None else None + + return _post(url, data, None, qn_auth) + + +def _get_with_qiniu_mac(url, params, auth): + qn_auth = qiniu.auth.QiniuMacRequestsAuth( + auth + ) if auth is not None else None + + return _get(url, params, qn_auth) + + +def _get_with_qiniu_mac_and_headers(url, params, auth, headers): + try: + post_headers = _headers.copy() + if headers is not None: + for k, v in headers.items(): + post_headers.update({k: v}) + r = requests.get( + url, + params=params, + auth=qiniu.auth.QiniuMacRequestsAuth(auth) if auth is not None else None, + timeout=config.get_default('connection_timeout'), + headers=post_headers) + except Exception as e: + return None, ResponseInfo(None, e) + return __return_wrapper(r) + + +def _delete_with_qiniu_mac(url, params, auth): + try: + r = requests.delete( + url, + params=params, + auth=qiniu.auth.QiniuMacRequestsAuth(auth) if auth is not None else None, + timeout=config.get_default('connection_timeout'), + headers=_headers) + except Exception as e: + return None, ResponseInfo(None, e) + return __return_wrapper(r) + + +def _delete_with_qiniu_mac_and_headers(url, params, auth, headers): + try: + post_headers = _headers.copy() + if headers is not None: + for k, v in headers.items(): + post_headers.update({k: v}) + r = requests.delete( + url, + params=params, + auth=qiniu.auth.QiniuMacRequestsAuth(auth) if auth is not None else None, + timeout=config.get_default('connection_timeout'), + headers=post_headers) + except Exception as e: + return None, ResponseInfo(None, e) + return __return_wrapper(r) diff --git a/qiniu/http/client.py b/qiniu/http/client.py new file mode 100644 index 00000000..6b850a38 --- /dev/null +++ b/qiniu/http/client.py @@ -0,0 +1,182 @@ +# -*- coding: utf-8 -*- +import logging + +import requests + +from qiniu.config import get_default +from .response import ResponseInfo +from .middleware import compose_middleware + + +class HTTPClient: + def __init__(self, middlewares=None, send_opts=None): + self.session = requests.Session() + self.middlewares = [] if middlewares is None else middlewares + self.send_opts = {} if send_opts is None else send_opts + + def _wrap_send(self, req, **kwargs): + # compatibility with setting timeout by qiniu.config.set_default + kwargs.setdefault('timeout', get_default('connection_timeout')) + + resp = self.session.send(req.prepare(), **kwargs) + return ResponseInfo(resp, None) + + def send_request(self, request, middlewares=None, **kwargs): + """ + + Args: + request (requests.Request): + requests.Request 对象 + + middlewares (list[qiniu.http.middleware.Middleware] or (list[qiniu.http.middleware.Middleware]) -> list[qiniu.http.middleware.Middleware]): + 仅对本次请求生效的中间件。 + + 如果传入的是列表,那么会作为追加的中间件拼接到 Client 中间件的后面。 + + 也可传入函数,获得 Client 中间件的一个副本来做更细的控制。 + 例如拼接到 Client 中间件的前面,可以这样使用: + + c.send_request(my_req, middlewares=lambda mws: my_mws + mws) + + kwargs: + 将作为其他参数直接透传给 session.send 方法 + + + Returns: + (dict, ResponseInfo): 可拆包的一个元组。 + 第一个元素为响应体的 dict,若响应体为 json 的话。 + 第二个元素为包装过的响应内容,包括了更多的响应内容。 + + """ + + # set default values + middlewares = [] if middlewares is None else middlewares + + # join middlewares and client middlewares + mw_ls = [] + if callable(middlewares): + mw_ls = middlewares(self.middlewares.copy()) + elif isinstance(middlewares, list): + mw_ls = self.middlewares + middlewares + + # send request + try: + handle = compose_middleware( + mw_ls, + lambda req: self._wrap_send(req, **kwargs) + ) + resp_info = handle(request) + except Exception as e: + return None, ResponseInfo(None, e) + + # if ok try dump response info to dict from json + if not resp_info.ok(): + return None, resp_info + + try: + ret = resp_info.json() + except ValueError: + logging.debug("response body decode error: %s" % resp_info.text_body) + ret = {} + return ret, resp_info + + def get( + self, + url, + params=None, + auth=None, + headers=None, + middlewares=None, + **kwargs + ): + req = requests.Request( + method='get', + url=url, + params=params, + auth=auth, + headers=headers + ) + send_opts = self.send_opts.copy() + send_opts.update(kwargs) + send_opts.setdefault("allow_redirects", True) + return self.send_request( + req, + middlewares=middlewares, + **send_opts + ) + + def post( + self, + url, + data, + files, + auth=None, + headers=None, + middlewares=None, + **kwargs + ): + req = requests.Request( + method='post', + url=url, + data=data, + files=files, + auth=auth, + headers=headers + ) + send_opts = self.send_opts.copy() + send_opts.update(kwargs) + return self.send_request( + req, + middlewares=middlewares, + **send_opts + ) + + def put( + self, + url, + data, + files, + auth=None, + headers=None, + middlewares=None, + **kwargs + ): + req = requests.Request( + method='put', + url=url, + data=data, + files=files, + auth=auth, + headers=headers + ) + send_opts = self.send_opts.copy() + send_opts.update(kwargs) + return self.send_request( + req, + middlewares=middlewares, + **send_opts + ) + + def delete( + self, + url, + params, + auth=None, + headers=None, + middlewares=None, + **kwargs + ): + req = requests.Request( + method='delete', + url=url, + params=params, + auth=auth, + headers=headers + ) + send_opts = self.send_opts.copy() + send_opts.update(kwargs) + return self.send_request( + req, + middlewares=middlewares, + **send_opts + ) diff --git a/qiniu/http/default_client.py b/qiniu/http/default_client.py new file mode 100644 index 00000000..7d7ccc60 --- /dev/null +++ b/qiniu/http/default_client.py @@ -0,0 +1,37 @@ +import functools + +from requests.adapters import HTTPAdapter + +from qiniu import config, __version__ + +from .client import HTTPClient +from .middleware import UserAgentMiddleware + +qn_http_client = HTTPClient( + middlewares=[ + UserAgentMiddleware(__version__) + ] +) + + +# compatibility with some config from qiniu.config +def _before_send(func): + @functools.wraps(func) + def wrapper(self, *args, **kwargs): + _init_http_adapter() + return func(self, *args, **kwargs) + + return wrapper + + +qn_http_client.send_request = _before_send(qn_http_client.send_request) + + +def _init_http_adapter(): + # may be optimized: + # only called when config changed, not every time before send request + adapter = HTTPAdapter( + pool_connections=config.get_default('connection_pool'), + pool_maxsize=config.get_default('connection_pool'), + max_retries=config.get_default('connection_retries')) + qn_http_client.session.mount('http://', adapter) diff --git a/qiniu/http/endpoint.py b/qiniu/http/endpoint.py new file mode 100644 index 00000000..307542b9 --- /dev/null +++ b/qiniu/http/endpoint.py @@ -0,0 +1,68 @@ +class Endpoint: + @staticmethod + def from_host(host): + """ + Autodetect scheme from host string + + Parameters + ---------- + host: str + + Returns + ------- + Endpoint + """ + if '://' in host: + scheme, host = host.split('://') + return Endpoint(host=host, default_scheme=scheme) + else: + return Endpoint(host=host) + + def __init__(self, host, default_scheme='https'): + """ + Parameters + ---------- + host: str + default_scheme: str + """ + self.host = host + self.default_scheme = default_scheme + + def __str__(self): + return 'Endpoint(host:\'{0}\',default_scheme:\'{1}\')'.format( + self.host, + self.default_scheme + ) + + def __repr__(self): + return self.__str__() + + def __eq__(self, other): + if not isinstance(other, Endpoint): + raise TypeError('Cannot compare Endpoint with {0}'.format(type(other))) + + return self.host == other.host and self.default_scheme == other.default_scheme + + def get_value(self, scheme=None): + """ + Parameters + ---------- + scheme: str + + Returns + ------- + str + """ + scheme = scheme if scheme is not None else self.default_scheme + return ''.join([scheme, '://', self.host]) + + def clone(self): + """ + Returns + ------- + Endpoint + """ + return Endpoint( + host=self.host, + default_scheme=self.default_scheme + ) diff --git a/qiniu/http/endpoints_provider.py b/qiniu/http/endpoints_provider.py new file mode 100644 index 00000000..ccfb3b43 --- /dev/null +++ b/qiniu/http/endpoints_provider.py @@ -0,0 +1,13 @@ +import abc + + +class EndpointsProvider: + __metaclass__ = abc.ABCMeta + + @abc.abstractmethod + def __iter__(self): + """ + Returns + ------- + list[Endpoint] + """ diff --git a/qiniu/http/endpoints_retry_policy.py b/qiniu/http/endpoints_retry_policy.py new file mode 100644 index 00000000..f648a29e --- /dev/null +++ b/qiniu/http/endpoints_retry_policy.py @@ -0,0 +1,56 @@ +from qiniu.retry.abc import RetryPolicy + + +class EndpointsRetryPolicy(RetryPolicy): + def __init__(self, endpoints_provider=None, skip_init_context=False): + """ + Parameters + ---------- + endpoints_provider: Iterable[Endpoint] + skip_init_context: bool + """ + self.endpoints_provider = endpoints_provider if endpoints_provider else [] + self.skip_init_context = skip_init_context + + def init_context(self, context): + """ + Parameters + ---------- + context: dict + + Returns + ------- + None + """ + if self.skip_init_context: + return + context['alternative_endpoints'] = list(self.endpoints_provider) + if not context['alternative_endpoints']: + raise ValueError('There isn\'t available endpoint') + context['endpoint'] = context['alternative_endpoints'].pop(0) + + def should_retry(self, attempt): + """ + Parameters + ---------- + attempt: qiniu.retry.Attempt + + Returns + ------- + bool + """ + return len(attempt.context['alternative_endpoints']) > 0 + + def prepare_retry(self, attempt): + """ + Parameters + ---------- + attempt: qiniu.retry.Attempt + + Returns + ------- + None + """ + if not attempt.context['alternative_endpoints']: + raise Exception('There isn\'t available endpoint for next try') + attempt.context['endpoint'] = attempt.context['alternative_endpoints'].pop(0) diff --git a/qiniu/http/middleware/__init__.py b/qiniu/http/middleware/__init__.py new file mode 100644 index 00000000..1177dcb4 --- /dev/null +++ b/qiniu/http/middleware/__init__.py @@ -0,0 +1,9 @@ +from .base import Middleware, compose_middleware +from .ua import UserAgentMiddleware +from .retry_domains import RetryDomainsMiddleware + +__all__ = [ + 'Middleware', 'compose_middleware', + 'UserAgentMiddleware', + 'RetryDomainsMiddleware' +] diff --git a/qiniu/http/middleware/base.py b/qiniu/http/middleware/base.py new file mode 100644 index 00000000..aaaee330 --- /dev/null +++ b/qiniu/http/middleware/base.py @@ -0,0 +1,36 @@ +# -*- coding: utf-8 -*- +from functools import reduce + + +def compose_middleware(middlewares, handle): + """ + Args: + middlewares (list[Middleware]): Middlewares + handle ((requests.Request) -> qiniu.http.response.ResponseInfo): The send request handle + + Returns: + (requests.Request) -> qiniu.http.response.ResponseInfo: Composed handle + + """ + middlewares.reverse() + + return reduce( + lambda h, mw: + lambda req: mw(req, h), + middlewares, + handle + ) + + +class Middleware: + def __call__(self, request, nxt): + """ + Args: + request (requests.Request): + nxt ((requests.Request) -> qiniu.http.response.ResponseInfo): + + Returns: + requests.Response: + + """ + raise NotImplementedError('{0}.__call__ method is not implemented yet'.format(type(self))) diff --git a/qiniu/http/middleware/retry_domains.py b/qiniu/http/middleware/retry_domains.py new file mode 100644 index 00000000..4f6845a0 --- /dev/null +++ b/qiniu/http/middleware/retry_domains.py @@ -0,0 +1,81 @@ +from qiniu.compat import urlparse + +from .base import Middleware + + +class RetryDomainsMiddleware(Middleware): + def __init__(self, backup_domains, max_retry_times=2, retry_condition=None): + """ + Args: + backup_domains (list[str]): + max_retry_times (int): + retry_condition ((requests.Response or None, requests.Request)->bool): + """ + self.backup_domains = backup_domains + self.max_retry_times = max_retry_times + self.retry_condition = retry_condition + + self.retried_times = 0 + + @staticmethod + def _get_changed_url(url, domain): + url_parse_result = urlparse(url) + + backup_netloc = '' + has_user = False + if url_parse_result.username is not None: + backup_netloc += url_parse_result.username + has_user = True + if url_parse_result.password is not None: + backup_netloc += url_parse_result.password + has_user = True + if has_user: + backup_netloc += '@' + backup_netloc += domain + if url_parse_result.port is not None: + backup_netloc += ':' + str(url_parse_result.port) + + # the _replace is a public method. start with `_` just to prevent conflicts with field names + # see namedtuple docs + url_parse_result = url_parse_result._replace( + netloc=backup_netloc + ) + + return url_parse_result.geturl() + + @staticmethod + def _try_nxt(request, nxt): + resp = None + err = None + try: + resp = nxt(request) + except Exception as e: + err = e + return resp, err + + def _should_retry(self, resp, req): + if callable(self.retry_condition): + return self.retry_condition(resp, req) + + return resp is None or resp.need_retry() + + def __call__(self, request, nxt): + resp_info, err = None, None + url_parse_result = urlparse(request.url) + + for backup_domain in [str(url_parse_result.hostname)] + self.backup_domains: + request.url = RetryDomainsMiddleware._get_changed_url(request.url, backup_domain) + self.retried_times = 0 + + while self.retried_times < self.max_retry_times: + resp_info, err = RetryDomainsMiddleware._try_nxt(request, nxt) + self.retried_times += 1 + if not self._should_retry(resp_info, request): + if err is not None: + raise err + return resp_info + + if err is not None: + raise err + + return resp_info diff --git a/qiniu/http/middleware/ua.py b/qiniu/http/middleware/ua.py new file mode 100644 index 00000000..3661d5cf --- /dev/null +++ b/qiniu/http/middleware/ua.py @@ -0,0 +1,20 @@ +import platform as _platform + +from .base import Middleware + + +class UserAgentMiddleware(Middleware): + def __init__(self, sdk_version): + sys_info = '{0}; {1}'.format(_platform.system(), _platform.machine()) + python_ver = _platform.python_version() + + user_agent = 'QiniuPython/{0} ({1}; ) Python/{2}'.format( + sdk_version, sys_info, python_ver) + + self.user_agent = user_agent + + def __call__(self, request, nxt): + if not request.headers: + request.headers = {} + request.headers['User-Agent'] = self.user_agent + return nxt(request) diff --git a/qiniu/http/region.py b/qiniu/http/region.py new file mode 100644 index 00000000..09a9917c --- /dev/null +++ b/qiniu/http/region.py @@ -0,0 +1,184 @@ +from datetime import datetime, timedelta + +from enum import Enum + +from .endpoint import Endpoint + + +# Use StrEnum when min version of python update to >= 3.11 +# to make the json stringify more readable, +# or find another way to simple the json stringify +class ServiceName(Enum): + UC = 'uc' + UP = 'up' + UP_ACC = 'up_acc' + IO = 'io' + # IO_SRC = 'io_src' + RS = 'rs' + RSF = 'rsf' + API = 'api' + + +class Region: + @staticmethod + def merge(*args): + """ + Parameters + ---------- + args: list[list[Region]] + + Returns + ------- + + """ + if not args: + raise TypeError('There aren\'ta any regions to merge') + source, rest = args[0], args[1:] + target = source.clone() + for r in rest: + for sn, el in r.services.items(): + if sn not in target.services: + target.services[sn] = [e.clone() for e in el] + else: + target_values = [e.get_value() for e in target.services[sn]] + target.services[sn] += [ + e.clone() + for e in el + if e.get_value() not in target_values + ] + + return target + + @staticmethod + def from_region_id(region_id, **kwargs): + """ + Parameters + ---------- + region_id: str + kwargs: dict + s3_region_id: str + ttl: int + create_time: datetime + extended_services: dict[str, list[Region]] + preferred_scheme: str + + Returns + ------- + Region + """ + # create services endpoints + endpoint_kwargs = { + } + if 'preferred_scheme' in kwargs: + endpoint_kwargs['default_scheme'] = kwargs.get('preferred_scheme') + + is_z0 = region_id == 'z0' + services_hosts = { + ServiceName.UC: ['uc.qiniuapi.com'], + ServiceName.UP: [ + 'upload-{0}.qiniup.com'.format(region_id), + 'up-{0}.qiniup.com'.format(region_id) + ] if not is_z0 else [ + 'upload.qiniup.com', + 'up.qiniup.com' + ], + ServiceName.IO: [ + 'iovip-{0}.qiniuio.com'.format(region_id), + ] if not is_z0 else [ + 'iovip.qiniuio.com', + ], + ServiceName.RS: [ + 'rs-{0}.qiniuapi.com'.format(region_id), + ], + ServiceName.RSF: [ + 'rsf-{0}.qiniuapi.com'.format(region_id), + ], + ServiceName.API: [ + 'api-{0}.qiniuapi.com'.format(region_id), + ] + } + services = { + k: [ + Endpoint(h, **endpoint_kwargs) for h in v + ] + for k, v in services_hosts.items() + } + services.update(kwargs.get('extended_services', {})) + + # create region + region_kwargs = { + k: kwargs.get(k) + for k in [ + 's3_region_id', + 'ttl', + 'create_time' + ] if k in kwargs + } + region_kwargs['region_id'] = region_id + region_kwargs.setdefault('s3_region_id', region_id) + region_kwargs['services'] = services + + return Region(**region_kwargs) + + def __init__( + self, + region_id=None, + s3_region_id=None, + services=None, + ttl=86400, + create_time=None + ): + """ + Parameters + ---------- + region_id: str + s3_region_id: str + services: dict[ServiceName or str, list[Endpoint]] + ttl: int, default 86400 + create_time: datetime, default datetime.now() + """ + self.region_id = region_id + self.s3_region_id = s3_region_id if s3_region_id else region_id + + self.services = services if services else {} + self.services.update( + { + k: [] + for k in ServiceName + if + k not in self.services or + not isinstance(self.services[k], list) + } + ) + + self.ttl = ttl + self.create_time = create_time if create_time else datetime.now() + + @property + def is_live(self): + """ + Returns + ------- + bool + """ + if self.ttl < 0: + return True + live_time = datetime.now() - self.create_time + return live_time < timedelta(seconds=self.ttl) + + def clone(self): + """ + Returns + ------- + Region + """ + return Region( + region_id=self.region_id, + s3_region_id=self.s3_region_id, + services={ + k: [endpoint.clone() for endpoint in self.services[k]] + for k in self.services + }, + ttl=self.ttl, + create_time=self.create_time + ) diff --git a/qiniu/http/regions_provider.py b/qiniu/http/regions_provider.py new file mode 100644 index 00000000..c451d166 --- /dev/null +++ b/qiniu/http/regions_provider.py @@ -0,0 +1,917 @@ +import abc +import datetime +import errno +import itertools +from collections import namedtuple +import logging +import tempfile +import os +import shutil +import threading + +from qiniu.compat import json, b as to_bytes, is_windows, is_linux, is_macos +from qiniu.utils import io_md5, dt2ts + +from .endpoint import Endpoint +from .region import Region, ServiceName +from .default_client import qn_http_client +from .middleware import RetryDomainsMiddleware +from .single_flight import SingleFlight + + +class RegionsProvider: + __metaclass__ = abc.ABCMeta + + @abc.abstractmethod + def __iter__(self): + """ + Returns + ------- + Generator[Region, None, None] + """ + + +class MutableRegionsProvider(RegionsProvider): + @abc.abstractmethod + def set_regions(self, regions): + """ + Parameters + ---------- + regions: list[Region] + """ + + +# --- serializers for QueryRegionsProvider --- + +def _get_region_from_query(data, **kwargs): + preferred_scheme = kwargs.get('preferred_scheme') + if not preferred_scheme: + preferred_scheme = 'http' + + domain_path_map = { + k: (k.value, 'domains') + for k in ServiceName + if k not in [ServiceName.UP_ACC] + } + domain_path_map[ServiceName.UP_ACC] = ('up', 'acc_domains') + + services = { + # sn service name, dsn data service name + sn: [ + Endpoint(h, default_scheme=preferred_scheme) + for h in data.get(dsn, {}).get(k, []) + ] + for sn, (dsn, k) in domain_path_map.items() + } + + return Region( + region_id=data.get('region'), + s3_region_id=data.get('s3', {}).get('region_alias', None), + services=services, + ttl=data.get('ttl', None) + ) + + +_query_regions_single_flight = SingleFlight() + + +class QueryRegionsProvider(RegionsProvider): + def __init__( + self, + access_key, + bucket_name, + endpoints_provider, + preferred_scheme='http', + max_retry_times_per_endpoint=1, + ): + """ + Parameters + ---------- + access_key: str + bucket_name: str + endpoints_provider: Iterable[Endpoint] + preferred_scheme: str + max_retry_times_per_endpoint: int + """ + self.access_key = access_key + self.bucket_name = bucket_name + self.endpoints_provider = endpoints_provider + self.preferred_scheme = preferred_scheme + self.max_retry_times_per_endpoint = max_retry_times_per_endpoint + + def __iter__(self): + endpoints_md5 = io_md5([ + to_bytes(e.host) for e in self.endpoints_provider + ]) + flight_key = ':'.join([ + endpoints_md5, + self.access_key, + self.bucket_name + ]) + regions = _query_regions_single_flight.do(flight_key, self.__fetch_regions) + # change to `yield from` when min version of python update to >= 3.3 + for r in regions: + yield r + + def __fetch_regions(self): + endpoints = list(self.endpoints_provider) + if not endpoints: + raise ValueError('There aren\'t any available endpoints to query regions') + endpoint, alternative_endpoints = endpoints[0], endpoints[1:] + + url = '{0}/v4/query?ak={1}&bucket={2}'.format(endpoint.get_value(), self.access_key, self.bucket_name) + ret, resp = qn_http_client.get( + url, + middlewares=[ + RetryDomainsMiddleware( + backup_domains=[e.host for e in alternative_endpoints], + max_retry_times=self.max_retry_times_per_endpoint + ) + ] + ) + + if not resp.ok(): + raise RuntimeError( + ( + 'Query regions failed with ' + 'HTTP Status Code {0}, ' + 'Body {1}' + ).format(resp.status_code, resp.text_body) + ) + + return [ + _get_region_from_query(d, preferred_scheme=self.preferred_scheme) + for d in ret.get('hosts', []) + ] + + +# --- helpers for CachedRegionsProvider --- +class FileAlreadyLocked(RuntimeError): + def __init__(self, message): + super(FileAlreadyLocked, self).__init__(message) + + +_file_threading_lockers_lock = threading.Lock() +_file_threading_lockers = {} + + +class _FileThreadingLocker: + def __init__(self, fd): + self._fd = fd + + def __enter__(self): + with _file_threading_lockers_lock: + global _file_threading_lockers + threading_lock = _file_threading_lockers.get(self._file_path, threading.Lock()) + # Could use keyword style `acquire(blocking=False)` when min version of python update to >= 3 + if not threading_lock.acquire(False): + raise FileAlreadyLocked('File {0} already locked'.format(self._file_path)) + _file_threading_lockers[self._file_path] = threading_lock + + def __exit__(self, exc_type, exc_val, exc_tb): + with _file_threading_lockers_lock: + global _file_threading_lockers + threading_lock = _file_threading_lockers.get(self._file_path) + if threading_lock and threading_lock.locked(): + threading_lock.release() + del _file_threading_lockers[self._file_path] + + @property + def _file_path(self): + return self._fd.name + + +if is_linux or is_macos: + import fcntl + + # Use subclass of _FileThreadingLocker when min version of python update to >= 3 + class _FileLocker: + def __init__(self, fd): + self._fd = fd + + def __enter__(self): + try: + fcntl.lockf(self._fd, fcntl.LOCK_EX | fcntl.LOCK_NB) + except IOError: + # Use `raise ... from ...` when min version of python update to >= 3 + raise FileAlreadyLocked('File {0} already locked'.format(self._file_path)) + + def __exit__(self, exc_type, exc_val, exc_tb): + fcntl.lockf(self._fd, fcntl.LOCK_UN) + + @property + def _file_path(self): + return self._fd.name + +elif is_windows: + import msvcrt + + class _FileLocker: + def __init__(self, fd): + self._fd = fd + self._lock_fd = None + self._already_locked = False + + def __enter__(self): + try: + self._lock_fd = open(self._lock_file_path, 'w') + msvcrt.locking(self._lock_fd.fileno(), msvcrt.LK_LOCK | msvcrt.LK_NBLCK, 1) + except OSError: + self._already_locked = True + raise FileAlreadyLocked('File {0} already locked'.format(self._file_path)) + + def __exit__(self, exc_type, exc_val, exc_tb): + if self._already_locked: + if self._lock_fd: + self._lock_fd.close() + return + + try: + msvcrt.locking(self._lock_fd.fileno(), msvcrt.LK_UNLCK, 1) + finally: + self._lock_fd.close() + os.remove(self._lock_file_path) + + @property + def _file_path(self): + return self._fd.name + + @property + def _lock_file_path(self): + """ + Returns + ------- + str + """ + return self._file_path + '.lock' + +else: + class _FileLocker: + def __init__(self, fd): + self._fd = fd + self._already_locked = False + + def __enter__(self): + try: + # Atomic file creation + open_flags = os.O_EXCL | os.O_RDWR | os.O_CREAT + fd = os.open(self._lock_file_path, open_flags) + os.close(fd) + except OSError: + self._already_locked = True + raise FileAlreadyLocked('File {0} already locked'.format(self._file_path)) + + def __exit__(self, exc_type, exc_val, exc_tb): + if self._already_locked: + return + try: + os.remove(self._lock_file_path) + except OSError: + pass + + @property + def _file_path(self): + return self._fd.name + + @property + def _lock_file_path(self): + """ + Returns + ------- + str + """ + return self._file_path + '.lock' + + +# use dataclass instead namedtuple if min version of python update to 3.7 +CacheScope = namedtuple( + 'CacheScope', + [ + 'memo_cache', + 'persist_path', + 'last_shrink_at', + 'shrink_interval', + 'should_shrink_expired_regions', + 'memo_cache_lock' + ] +) + + +_global_cache_scope = CacheScope( + memo_cache={}, + persist_path=os.path.join( + tempfile.gettempdir(), + 'qn-py-sdk', + 'regions-cache.jsonl' + ), + last_shrink_at=datetime.datetime.fromtimestamp(0), + shrink_interval=datetime.timedelta(days=1), + should_shrink_expired_regions=False, + memo_cache_lock=threading.Lock() +) + + +# --- serializers for CachedRegionsProvider --- + +_PersistedEndpoint = namedtuple( + 'PersistedEndpoint', + [ + 'host', + 'defaultScheme' + ] +) + + +def _persist_endpoint(endpoint): + """ + Parameters + ---------- + endpoint: Endpoint + + Returns + ------- + dict + """ + return _PersistedEndpoint( + defaultScheme=endpoint.default_scheme, + host=endpoint.host + )._asdict() + + +def _get_endpoint_from_persisted(data): + """ + Parameters + ---------- + data: dict + + Returns + ------- + Endpoint + """ + persisted_endpoint = _PersistedEndpoint(**data) + return Endpoint( + persisted_endpoint.host, + default_scheme=persisted_endpoint.defaultScheme + ) + + +_PersistedRegion = namedtuple( + 'PersistedRegion', + [ + 'regionId', + 's3RegionId', + 'services', + 'ttl', + 'createTime' + ] +) + + +def _persist_region(region): + """ + Parameters + ---------- + region: Region + + Returns + ------- + dict + """ + return _PersistedRegion( + regionId=region.region_id, + s3RegionId=region.s3_region_id, + services={ + # The StrEnum not available in python < 3.11 + # so need stringify the key manually + k.value if isinstance(k, ServiceName) else k: [ + _persist_endpoint(e) + for e in v + ] + for k, v in region.services.items() + }, + ttl=region.ttl, + # use datetime.datetime.timestamp() when min version of python >= 3 + createTime=dt2ts(region.create_time) + )._asdict() + + +def _get_region_from_persisted(data): + """ + Parameters + ---------- + data: dict + + Returns + ------- + Region + """ + def _get_service_name(k): + try: + return ServiceName(k) + except ValueError: + return k + + persisted_region = _PersistedRegion(**data) + + return Region( + region_id=persisted_region.regionId, + s3_region_id=persisted_region.s3RegionId, + services={ + # The StrEnum not available in python < 3.11 + # so need parse the key manually + _get_service_name(k): [ + _get_endpoint_from_persisted(d) + for d in v + ] + for k, v in persisted_region.services.items() + }, + ttl=persisted_region.ttl, + create_time=datetime.datetime.fromtimestamp(persisted_region.createTime / 1000) + ) + + +def _parse_persisted_regions(persisted_data): + """ + Parameters + ---------- + persisted_data: str + + Returns + ------- + cache_key: str + regions: list[Region] + """ + parsed_data = json.loads(persisted_data) + regions = [ + _get_region_from_persisted(d) + for d in parsed_data.get('regions', []) + ] + return parsed_data.get('cacheKey'), regions + + +def _walk_persist_cache_file(persist_path, ignore_parse_error=True): + """ + Parameters + ---------- + persist_path: str + ignore_parse_error: bool + + Returns + ------- + Iterable[(str, list[Region])] + """ + if not os.access(persist_path, os.R_OK): + return + + with open(persist_path, 'r') as f: + for line in f: + if not line.strip(): + continue + try: + cache_key, regions = _parse_persisted_regions(line.strip()) + yield cache_key, regions + except Exception as err: + if not ignore_parse_error: + raise err + + +def _merge_regions(*args): + """ + merge two regions by region id. + if the same region id, the last create region will be keep. + Parameters + ---------- + args: list[Region] + + Returns + ------- + list[Region] + """ + regions_dict = {} + + for r in itertools.chain(*args): + if r.region_id not in regions_dict: + regions_dict[r.region_id] = r + else: + if r.create_time > regions_dict[r.region_id].create_time: + regions_dict[r.region_id] = r + + return regions_dict.values() + + +class CachedRegionsProvider(MutableRegionsProvider): + def __init__( + self, + cache_key, + base_regions_provider, + **kwargs + ): + """ + Parameters + ---------- + cache_key: str + base_regions_provider: Iterable[Region] + kwargs + persist_path: str + shrink_interval: datetime.timedelta + should_shrink_expired_regions: bool + """ + self.cache_key = cache_key + self.base_regions_provider = base_regions_provider + + persist_path = kwargs.get('persist_path', None) + last_shrink_at = datetime.datetime.fromtimestamp(0) + if persist_path is None: + cache_dir = os.path.dirname(_global_cache_scope.persist_path) + try: + # make sure the cache dir is available for all users. + # we can not use the '/tmp' dir directly on linux, + # because the permission is 0o1777 + if not os.path.exists(cache_dir): + # os.makedirs have no exists_ok parameter in python 2.7 + os.makedirs(cache_dir) + os.chmod(cache_dir, 0o777) + persist_path = _global_cache_scope.persist_path + last_shrink_at = _global_cache_scope.last_shrink_at + except Exception as err: + if isinstance(err, OSError) and err.errno == errno.EEXIST: + persist_path = _global_cache_scope.persist_path + last_shrink_at = _global_cache_scope.last_shrink_at + else: + logging.warning( + 'failed to create cache dir %s. error: %s', cache_dir, err) + + shrink_interval = kwargs.get('shrink_interval', None) + if shrink_interval is None: + shrink_interval = _global_cache_scope.shrink_interval + + should_shrink_expired_regions = kwargs.get('should_shrink_expired_regions', None) + if should_shrink_expired_regions is None: + should_shrink_expired_regions = _global_cache_scope.should_shrink_expired_regions + + self._cache_scope = _global_cache_scope._replace( + persist_path=persist_path, + last_shrink_at=last_shrink_at, + shrink_interval=shrink_interval, + should_shrink_expired_regions=should_shrink_expired_regions + ) + + def __iter__(self): + if self.__should_shrink: + try: + self.__shrink_cache() + except Exception as err: + logging.warning('failed to shrink cache. error: %s', err) + + get_regions_fns = [ + self.__get_regions_from_memo, + self.__get_regions_from_file, + self.__get_regions_from_base_provider + ] + + # set the fallback to None for raise errors when failed + regions = None + for get_regions in get_regions_fns: + regions = get_regions(fallback=regions) + if regions and all(r.is_live for r in regions): + break + + # change to `yield from` when min version of python update to >= 3.3 + for r in regions: + yield r + + def set_regions(self, regions): + """ + Parameters + ---------- + regions: list[Region] + """ + with self._cache_scope.memo_cache_lock: + self._cache_scope.memo_cache[self.cache_key] = regions + + if not self._cache_scope.persist_path: + return + + try: + with open(self._cache_scope.persist_path, 'a') as f: + f.write(json.dumps({ + 'cacheKey': self.cache_key, + 'regions': [_persist_region(r) for r in regions] + }) + os.linesep) + except Exception as err: + logging.warning('failed to cache regions result to file. error: %s', err) + + @property + def persist_path(self): + """ + Returns + ------- + str + """ + return self._cache_scope.persist_path + + @persist_path.setter + def persist_path(self, value): + """ + Parameters + ---------- + value: str + """ + if value == self._cache_scope.persist_path: + return + self._cache_scope = self._cache_scope._replace( + persist_path=value, + last_shrink_at=datetime.datetime.fromtimestamp(0) + ) + + @property + def last_shrink_at(self): + """ + Returns + ------- + datetime.datetime + """ + # copy the datetime make sure it is read-only + return self._cache_scope.last_shrink_at.replace() + + @property + def shrink_interval(self): + """ + Returns + ------- + datetime.timedelta + """ + return self._cache_scope.shrink_interval + + @shrink_interval.setter + def shrink_interval(self, value): + """ + Parameters + ---------- + value: datetime.timedelta + """ + self._cache_scope = self._cache_scope._replace( + shrink_interval=value + ) + + @property + def should_shrink_expired_regions(self): + """ + Returns + ------- + bool + """ + return self._cache_scope.should_shrink_expired_regions + + @should_shrink_expired_regions.setter + def should_shrink_expired_regions(self, value): + """ + Parameters + ---------- + value: bool + """ + self._cache_scope = self._cache_scope._replace( + should_shrink_expired_regions=value + ) + + def __get_regions_from_memo(self, fallback=None): + """ + Parameters + ---------- + fallback: list[Region] + + Returns + ------- + list[Region] + """ + regions = self._cache_scope.memo_cache.get(self.cache_key) + + if regions: + return regions + + return fallback + + def __get_regions_from_file(self, fallback=None): + """ + Parameters + ---------- + fallback: list[Region] + + Returns + ------- + list[Region] + """ + if not self._cache_scope.persist_path: + return fallback + + try: + self.__flush_file_cache_to_memo() + except Exception as err: + if fallback is not None: + return fallback + else: + raise err + + return self.__get_regions_from_memo(fallback) + + def __get_regions_from_base_provider(self, fallback=None): + """ + Parameters + ---------- + fallback: list[Region] + + Returns + ------- + list[Region] + """ + try: + regions = list(self.base_regions_provider) + except Exception as err: + if fallback is not None: + return fallback + else: + raise err + self.set_regions(regions) + return regions + + def __flush_file_cache_to_memo(self): + for cache_key, regions in _walk_persist_cache_file( + persist_path=self._cache_scope.persist_path + ): + if cache_key not in self._cache_scope.memo_cache: + self._cache_scope.memo_cache[cache_key] = regions + return + memo_regions = self._cache_scope.memo_cache[cache_key] + self._cache_scope.memo_cache[cache_key] = _merge_regions( + memo_regions, + regions + ) + + @property + def __should_shrink(self): + """ + Returns + ------- + bool + """ + return datetime.datetime.now() - self._cache_scope.last_shrink_at > self._cache_scope.shrink_interval + + def __shrink_cache(self): + # shrink memory cache + if self._cache_scope.should_shrink_expired_regions: + memo_cache_old = self._cache_scope.memo_cache.copy() + # Could use keyword style `acquire(blocking=False)` when min version of python update to >= 3 + if self._cache_scope.memo_cache_lock.acquire(False): + try: + for k, regions in memo_cache_old.items(): + live_regions = [r for r in regions if r.is_live] + if live_regions: + self._cache_scope.memo_cache[k] = live_regions + else: + del self._cache_scope.memo_cache[k] + finally: + self._cache_scope.memo_cache_lock.release() + + # shrink file cache + if not self._cache_scope.persist_path: + self._cache_scope = self._cache_scope._replace( + last_shrink_at=datetime.datetime.now() + ) + return + + shrink_file_path = self._cache_scope.persist_path + '.shrink' + try: + with open(shrink_file_path, 'a') as f, _FileThreadingLocker(f), _FileLocker(f): + # filter data + shrunk_cache = {} + for cache_key, regions in _walk_persist_cache_file( + persist_path=self._cache_scope.persist_path + ): + kept_regions = regions + if self._cache_scope.should_shrink_expired_regions: + kept_regions = [ + r for r in kept_regions if r.is_live + ] + + if cache_key not in shrunk_cache: + shrunk_cache[cache_key] = kept_regions + else: + shrunk_cache[cache_key] = _merge_regions( + shrunk_cache[cache_key], + kept_regions + ) + + # write data + for cache_key, regions in shrunk_cache.items(): + f.write( + json.dumps( + { + 'cacheKey': cache_key, + 'regions': [_persist_region(r) for r in regions] + } + ) + os.linesep + ) + + # make the cache file available for all users + if is_linux or is_macos: + os.chmod(shrink_file_path, 0o666) + + # rename file + if is_windows: + # windows must close first, or will raise permission error + # be careful to do something with the file after this + f.close() + shutil.move(shrink_file_path, self._cache_scope.persist_path) + + # update last shrink time + self._cache_scope = self._cache_scope._replace( + last_shrink_at=datetime.datetime.now() + ) + global _global_cache_scope + if _global_cache_scope.persist_path == self._cache_scope.persist_path: + _global_cache_scope = _global_cache_scope._replace( + last_shrink_at=self._cache_scope.last_shrink_at + ) + + except FileAlreadyLocked: + # skip file shrink by another running + pass + + +def get_default_regions_provider( + query_endpoints_provider, + access_key, + bucket_name, + accelerate_uploading=False, + force_query=False, + **kwargs +): + """ + Parameters + ---------- + query_endpoints_provider: Iterable[Endpoint] + access_key: str + bucket_name: str + accelerate_uploading: bool + force_query: bool + kwargs + preferred_scheme: str + option of QueryRegionsProvider + max_retry_times_per_endpoint: int + option of QueryRegionsProvider + persist_path: str + option of CachedRegionsProvider + shrink_interval: datetime.timedelta + option of CachedRegionsProvider + should_shrink_expired_regions: bool + option of CachedRegionsProvider + + Returns + ------- + Iterable[Region] + """ + query_regions_provider_opts = { + 'access_key': access_key, + 'bucket_name': bucket_name, + 'endpoints_provider': query_endpoints_provider, + } + query_regions_provider_opts.update({ + k: v + for k, v in kwargs.items() + if k in ['preferred_scheme', 'max_retry_times_per_endpoint'] + }) + + query_regions_provider = QueryRegionsProvider(**query_regions_provider_opts) + + if force_query: + return query_regions_provider + + query_endpoints = list(query_endpoints_provider) + + endpoints_md5 = io_md5([ + to_bytes(e.host) for e in query_endpoints + ]) + cache_key = ':'.join([ + endpoints_md5, + access_key, + bucket_name, + 'true' if accelerate_uploading else 'false' + ]) + + cached_regions_provider_opts = { + 'cache_key': cache_key, + 'base_regions_provider': query_regions_provider, + } + cached_regions_provider_opts.update({ + k: v + for k, v in kwargs.items() + if k in [ + 'persist_path', + 'shrink_interval', + 'should_shrink_expired_regions' + ] + }) + + return CachedRegionsProvider( + **cached_regions_provider_opts + ) diff --git a/qiniu/http/regions_retry_policy.py b/qiniu/http/regions_retry_policy.py new file mode 100644 index 00000000..29b2f9a9 --- /dev/null +++ b/qiniu/http/regions_retry_policy.py @@ -0,0 +1,162 @@ +from qiniu.retry.abc import RetryPolicy + +from .region import Region, ServiceName + + +class RegionsRetryPolicy(RetryPolicy): + def __init__( + self, + regions_provider, + service_names, + preferred_endpoints_provider=None, + on_change_region=None + ): + """ + Parameters + ---------- + regions_provider: Iterable[Region] + service_names: list[ServiceName or str] + preferred_endpoints_provider: Iterable[Endpoint] + on_change_region: Callable + `(context: dict) -> None` + """ + self.regions_provider = regions_provider + self.service_names = service_names + if not service_names: + raise ValueError('Must provide at least one service name') + if preferred_endpoints_provider is None: + preferred_endpoints_provider = [] + self.preferred_endpoints_provider = preferred_endpoints_provider + self.on_change_region = on_change_region + + def init_context(self, context): + """ + Parameters + ---------- + context: dict + """ + self._init_regions(context) + self._prepare_endpoints(context) + + def should_retry(self, attempt): + """ + Parameters + ---------- + attempt: Attempt + """ + return ( + len(attempt.context.get('alternative_regions', [])) > 0 or + len(attempt.context.get('alternative_service_names', [])) > 0 + ) + + def prepare_retry(self, attempt): + """ + Parameters + ---------- + attempt: Attempt + """ + if attempt.context.get('alternative_service_names'): + # change service for next try + attempt.context['service_name'] = attempt.context.get('alternative_service_names').pop(0) + elif attempt.context.get('alternative_regions'): + # change region for next try + attempt.context['region'] = attempt.context.get('alternative_regions').pop(0) + if callable(self.on_change_region): + self.on_change_region(attempt.context) + else: + raise RuntimeError('There isn\'t available region or service for next try') + self._prepare_endpoints(attempt.context) + + def _init_regions(self, context): + """ + Parameters + ---------- + context: dict + """ + regions = list(self.regions_provider) + preferred_endpoints = list(self.preferred_endpoints_provider) + if not regions and not preferred_endpoints: + raise ValueError('There isn\'t available region or preferred endpoint') + + if not preferred_endpoints: + # regions are not empty implicitly by above if condition + context['alternative_regions'] = regions + context['region'] = context['alternative_regions'].pop(0) + # shallow copy list + # change to `list.copy` for more readable when min version of python update to >= 3 + context['alternative_service_names'] = self.service_names[:] + context['service_name'] = context['alternative_service_names'].pop(0) + return + + # find preferred service name and region by preferred endpoints + preferred_region_index = -1 + preferred_service_index = -1 + for ri, region in enumerate(regions): + for si, service_name in enumerate(self.service_names): + if any( + pe.host in [ + e.host for e in region.services.get(service_name, []) + ] + for pe in preferred_endpoints + ): + preferred_region_index = ri + preferred_service_index = si + break + + # initialize the order of service_names and regions + if preferred_region_index < 0: + # shallow copy list + # change to `list.copy` for more readable when min version of python update to >= 3 + context['alternative_service_names'] = self.service_names[:] + context['service_name'] = context['alternative_service_names'].pop(0) + + context['region'] = Region( + region_id='preferred_region', + services={ + context['service_name']: preferred_endpoints + } + ) + context['alternative_regions'] = regions + else: + # regions are not empty implicitly by above if condition + # preferred endpoints are in a known region, then reorder the regions and services + context['alternative_regions'] = regions + context['region'] = context['alternative_regions'].pop(preferred_region_index) + # shallow copy list + # change to `list.copy` for more readable when min version of python update to >= 3 + context['alternative_service_names'] = self.service_names[:] + context['service_name'] = context['alternative_service_names'].pop(preferred_service_index) + + def _prepare_endpoints(self, context): + """ + Parameters + ---------- + context: dict + """ + # shallow copy list + # change to `list.copy` for more readable when min version of python update to >= 3 + endpoints = context['region'].services.get(context['service_name'], [])[:] + while not endpoints: + if context['alternative_service_names']: + context['service_name'] = context['alternative_service_names'].pop(0) + endpoints = context['region'].services.get(context['service_name'], [])[:] + elif context['alternative_regions']: + context['region'] = context['alternative_regions'].pop(0) + # shallow copy list + # change to `list.copy` for more readable when min version of python update to >= 3 + context['alternative_service_names'] = self.service_names[:] + context['service_name'] = context['alternative_service_names'].pop(0) + endpoints = context['region'].services.get(context['service_name'], [])[:] + if callable(self.on_change_region): + self.on_change_region(context) + else: + raise RuntimeError( + 'There isn\'t available endpoint for {0} service(s) in any available regions'.format( + ', '.join( + sn.value if isinstance(sn, ServiceName) else sn + for sn in self.service_names + ) + ) + ) + context['alternative_endpoints'] = endpoints + context['endpoint'] = context['alternative_endpoints'].pop(0) diff --git a/qiniu/http/response.py b/qiniu/http/response.py new file mode 100644 index 00000000..cbfcf034 --- /dev/null +++ b/qiniu/http/response.py @@ -0,0 +1,93 @@ +# -*- coding: utf-8 -*- +from qiniu.compat import is_py2, is_py3 + + +class ResponseInfo(object): + """七牛HTTP请求返回信息类 + + 该类主要是用于获取和解析对七牛发起各种请求后的响应包的header和body。 + + Attributes: + status_code (int): 整数变量,响应状态码 + text_body (str): 字符串变量,响应的body + req_id (str): 字符串变量,七牛HTTP扩展字段,参考 https://developer.qiniu.com/kodo/3924/common-request-headers + x_log (str): 字符串变量,七牛HTTP扩展字段,参考 https://developer.qiniu.com/kodo/3924/common-request-headers + error (str): 字符串变量,响应的错误内容 + """ + + def __init__(self, response, exception=None): + """用响应包和异常信息初始化ResponseInfo类""" + self.__response = response + self.exception = exception + if response is None: + self.url = None + self.status_code = -1 + self.text_body = None + self.req_id = None + self.x_log = None + self.error = str(exception) + else: + self.url = response.url + self.status_code = response.status_code + self.text_body = response.text + self.req_id = response.headers.get('X-Reqid') + self.x_log = response.headers.get('X-Log') + if self.status_code >= 400: + if self.__check_json(response): + ret = response.json() if response.text != '' else None + if ret is None: + self.error = 'unknown' + else: + self.error = response.text + else: + self.error = response.text + if self.req_id is None and self.status_code == 200: + self.error = 'server is not qiniu' + + def ok(self): + return self.status_code // 100 == 2 + + def need_retry(self): + if 100 <= self.status_code < 500: + return False + if all([ + self.status_code < 0, + self.exception is not None, + 'BadStatusLine' in str(self.exception) + ]): + return False + # https://developer.qiniu.com/fusion/kb/1352/the-http-request-return-a-status-code + # https://developer.qiniu.com/kodo/3928/error-responses + if self.status_code in [ + 501, 509, 573, 579, 608, 612, 614, 616, 618, 630, 631, 632, 640, 701 + ]: + return False + return True + + def connect_failed(self): + return self.__response is None or self.req_id is None + + def json(self): + try: + self.__response.encoding = "utf-8" + return self.__response.json() + except Exception: + return {} + + def __str__(self): + if is_py2: + return ', '.join( + ['%s:%s' % item for item in self.__dict__.items()]).encode('utf-8') + elif is_py3: + return ', '.join( + ['%s:%s' % item for item in self.__dict__.items()]) + + def __repr__(self): + return self.__str__() + + def __check_json(self, response): + try: + response.json() + return True + except Exception: + return False diff --git a/qiniu/http/single_flight.py b/qiniu/http/single_flight.py new file mode 100644 index 00000000..28536de0 --- /dev/null +++ b/qiniu/http/single_flight.py @@ -0,0 +1,50 @@ +import threading + + +class _FlightLock: + """ + Do not use dataclass which caused the event created only once + """ + def __init__(self): + self.event = threading.Event() + self.result = None + self.error = None + + +class SingleFlight: + def __init__(self): + self._locks = {} + self._lock = threading.Lock() + + def do(self, key, fn, *args, **kwargs): + # here does not use `with` statement + # because need to wait by another object if it exists, + # and reduce the `acquire` times if it not exists + self._lock.acquire() + if key in self._locks: + flight_lock = self._locks[key] + + self._lock.release() + flight_lock.event.wait() + + if flight_lock.error: + raise flight_lock.error + return flight_lock.result + + flight_lock = _FlightLock() + self._locks[key] = flight_lock + self._lock.release() + + try: + flight_lock.result = fn(*args, **kwargs) + except Exception as e: + flight_lock.error = e + finally: + flight_lock.event.set() + + with self._lock: + del self._locks[key] + + if flight_lock.error: + raise flight_lock.error + return flight_lock.result diff --git a/qiniu/httplib_chunk.py b/qiniu/httplib_chunk.py deleted file mode 100644 index d08b0ebb..00000000 --- a/qiniu/httplib_chunk.py +++ /dev/null @@ -1,123 +0,0 @@ -""" -Modified from standard httplib - -1. HTTPConnection can send trunked data. -2. Remove httplib's automatic Content-Length insertion when data is a file-like object. -""" - -# -*- coding: utf-8 -*- - -import httplib -from httplib import _CS_REQ_STARTED, _CS_REQ_SENT, CannotSendHeader, NotConnected -import string -import os -from array import array - -class HTTPConnection(httplib.HTTPConnection): - - def send(self, data, is_chunked=False): - """Send `data' to the server.""" - if self.sock is None: - if self.auto_open: - self.connect() - else: - raise NotConnected() - - if self.debuglevel > 0: - print "send:", repr(data) - blocksize = 8192 - if hasattr(data,'read') and not isinstance(data, array): - if self.debuglevel > 0: print "sendIng a read()able" - datablock = data.read(blocksize) - while datablock: - if self.debuglevel > 0: - print 'chunked:', is_chunked - if is_chunked: - if self.debuglevel > 0: print 'send: with trunked data' - lenstr = string.upper(hex(len(datablock))[2:]) - self.sock.sendall('%s\r\n%s\r\n' % (lenstr, datablock)) - else: - self.sock.sendall(datablock) - datablock = data.read(blocksize) - if is_chunked: - self.sock.sendall('0\r\n\r\n') - else: - self.sock.sendall(data) - - - def _set_content_length(self, body): - # Set the content-length based on the body. - thelen = None - try: - thelen = str(len(body)) - except (TypeError, AttributeError), te: - # Don't send a length if this failed - if self.debuglevel > 0: print "Cannot stat!!" - - if thelen is not None: - self.putheader('Content-Length', thelen) - return True - return False - - - def _send_request(self, method, url, body, headers): - # Honor explicitly requested Host: and Accept-Encoding: headers. - header_names = dict.fromkeys([k.lower() for k in headers]) - skips = {} - if 'host' in header_names: - skips['skip_host'] = 1 - if 'accept-encoding' in header_names: - skips['skip_accept_encoding'] = 1 - - self.putrequest(method, url, **skips) - - is_chunked = False - if body and header_names.get('Transfer-Encoding') == 'chunked': - is_chunked = True - elif body and ('content-length' not in header_names): - is_chunked = not self._set_content_length(body) - if is_chunked: - self.putheader('Transfer-Encoding', 'chunked') - for hdr, value in headers.iteritems(): - self.putheader(hdr, value) - - self.endheaders(body, is_chunked=is_chunked) - - - def endheaders(self, message_body=None, is_chunked=False): - """Indicate that the last header line has been sent to the server. - - This method sends the request to the server. The optional - message_body argument can be used to pass a message body - associated with the request. The message body will be sent in - the same packet as the message headers if it is string, otherwise it is - sent as a separate packet. - """ - if self.__state == _CS_REQ_STARTED: - self.__state = _CS_REQ_SENT - else: - raise CannotSendHeader() - self._send_output(message_body, is_chunked=is_chunked) - - - def _send_output(self, message_body=None, is_chunked=False): - """Send the currently buffered request and clear the buffer. - - Appends an extra \\r\\n to the buffer. - A message_body may be specified, to be appended to the request. - """ - self._buffer.extend(("", "")) - msg = "\r\n".join(self._buffer) - del self._buffer[:] - # If msg and message_body are sent in a single send() call, - # it will avoid performance problems caused by the interaction - # between delayed ack and the Nagle algorithm. - if isinstance(message_body, str): - msg += message_body - message_body = None - self.send(msg) - if message_body is not None: - #message_body was not a string (i.e. it is a file) and - #we must run the risk of Nagle - self.send(message_body, is_chunked=is_chunked) - diff --git a/qiniu/io.py b/qiniu/io.py deleted file mode 100644 index 2576fc0b..00000000 --- a/qiniu/io.py +++ /dev/null @@ -1,82 +0,0 @@ -# -*- coding: utf-8 -*- -from base64 import urlsafe_b64encode -import rpc -import conf -import random -import string -try: - import zlib as binascii -except ImportError: - import binascii - - -# @gist PutExtra -class PutExtra(object): - params = {} - mime_type = 'application/octet-stream' - crc32 = "" - check_crc = 0 -# @endgist - - -def put(uptoken, key, data, extra=None): - """ put your data to Qiniu - - If key is None, the server will generate one. - data may be str or read()able object. - """ - fields = { - } - - if not extra: - extra = PutExtra() - - if extra.params: - for k in extra.params: - fields[k] = str(extra.params[k]) - - if extra.check_crc: - fields["crc32"] = str(extra.crc32) - - if key is not None: - fields['key'] = key - - fields["token"] = uptoken - - fname = key - if fname is None: - fname = _random_str(9) - elif fname is '': - fname = 'index.html' - files = [ - {'filename': fname, 'data': data, 'mime_type': extra.mime_type}, - ] - return rpc.Client(conf.UP_HOST).call_with_multipart("/", fields, files) - - -def put_file(uptoken, key, localfile, extra=None): - """ put a file to Qiniu - - If key is None, the server will generate one. - """ - if extra is not None and extra.check_crc == 1: - extra.crc32 = _get_file_crc32(localfile) - with open(localfile, 'rb') as f: - return put(uptoken, key, f, extra) - - -_BLOCK_SIZE = 1024 * 1024 * 4 - -def _get_file_crc32(filepath): - with open(filepath, 'rb') as f: - block = f.read(_BLOCK_SIZE) - crc = 0 - while len(block) != 0: - crc = binascii.crc32(block, crc) & 0xFFFFFFFF - block = f.read(_BLOCK_SIZE) - return crc - - -def _random_str(length): - lib = string.ascii_lowercase - return ''.join([random.choice(lib) for i in range(0, length)]) diff --git a/qiniu/main.py b/qiniu/main.py new file mode 100755 index 00000000..6f0b81a0 --- /dev/null +++ b/qiniu/main.py @@ -0,0 +1,40 @@ +#! /usr/bin/env python +# -*- coding: utf-8 -*- + +import argparse + +from qiniu import etag + + +def main(): + parser = argparse.ArgumentParser(prog='qiniu') + sub_parsers = parser.add_subparsers() + + parser_etag = sub_parsers.add_parser( + 'etag', + description='calculate the etag of the file', + help='etag [file...]') + parser_etag.add_argument( + 'etag_files', + metavar='N', + nargs='+', + help='the file list for calculate') + + args = parser.parse_args() + + try: + etag_files = args.etag_files + + except AttributeError: + etag_files = None + + if etag_files: + r = [etag(file) for file in etag_files] + if len(r) == 1: + print(r[0]) + else: + print(' '.join(r)) + + +if __name__ == '__main__': + main() diff --git a/qiniu/region.py b/qiniu/region.py new file mode 100644 index 00000000..a59d488e --- /dev/null +++ b/qiniu/region.py @@ -0,0 +1,345 @@ +# -*- coding: utf-8 -*- +import functools +import logging +import os +import time + + +from .compat import json, s as str_from_bytes +from .utils import urlsafe_base64_decode, dt2ts +from .config import UC_HOST, is_customized_default, get_default +from .http.endpoint import Endpoint as _HTTPEndpoint +from .http.regions_provider import Region as _HTTPRegion, ServiceName, get_default_regions_provider + + +def _legacy_default_get(key): + def decorator(func): + @functools.wraps(func) + def wrapper(self, *args, **kwargs): + if hasattr(self, key) and getattr(self, key): + return getattr(self, key) + if is_customized_default('default_' + key): + return get_default('default_' + key) + return func(self, *args, **kwargs) + + return wrapper + + return decorator + + +class LegacyRegion(_HTTPRegion, object): + """七牛上传区域类 + 该类主要内容上传区域地址。 + """ + + def __init__( + self, + up_host=None, + up_host_backup=None, + io_host=None, + host_cache=None, + home_dir=None, + scheme="http", + rs_host=None, + rsf_host=None, + api_host=None, + accelerate_uploading=False + ): + """初始化Zone类""" + super(LegacyRegion, self).__init__() + if host_cache is None: + host_cache = {} + self.up_host = up_host + self.up_host_backup = up_host_backup + self.io_host = io_host + self.rs_host = rs_host + self.rsf_host = rsf_host + self.api_host = api_host + self.home_dir = home_dir + self.host_cache = host_cache + self.scheme = scheme + self.services.update({ + k: [ + _HTTPEndpoint.from_host(h) + for h in v if h + ] + for k, v in { + ServiceName.UP: [up_host, up_host_backup], + ServiceName.IO: [io_host], + ServiceName.RS: [rs_host], + ServiceName.RSF: [rsf_host], + ServiceName.API: [api_host] + }.items() + }) + self.accelerate_uploading = accelerate_uploading + + def get_up_host_by_token(self, up_token, home_dir): + ak, bucket = self.unmarshal_up_token(up_token) + if home_dir is None: + home_dir = os.getcwd() + up_hosts = self.get_up_host(ak, bucket, home_dir) + return up_hosts[0] + + def get_up_host_backup_by_token(self, up_token, home_dir): + ak, bucket = self.unmarshal_up_token(up_token) + if home_dir is None: + home_dir = os.getcwd() + up_hosts = self.get_up_host(ak, bucket, home_dir) + if len(up_hosts) <= 1: + up_host = up_hosts[0] + else: + up_host = up_hosts[1] + return up_host + + def get_io_host(self, ak, bucket, home_dir=None): + if self.io_host: + return self.io_host + if home_dir is None: + home_dir = os.getcwd() + bucket_hosts = self.get_bucket_hosts(ak, bucket, home_dir) + if 'ioHosts' not in bucket_hosts: + bucket_hosts = self.get_bucket_hosts(ak, bucket, home_dir, force=True) + io_hosts = bucket_hosts['ioHosts'] + return io_hosts[0] + + @_legacy_default_get('rs_host') + def get_rs_host(self, ak, bucket, home_dir=None): + if home_dir is None: + home_dir = os.getcwd() + bucket_hosts = self.get_bucket_hosts(ak, bucket, home_dir) + if 'rsHosts' not in bucket_hosts: + bucket_hosts = self.get_bucket_hosts(ak, bucket, home_dir, force=True) + rs_hosts = bucket_hosts['rsHosts'] + return rs_hosts[0] + + @_legacy_default_get('rsf_host') + def get_rsf_host(self, ak, bucket, home_dir=None): + if home_dir is None: + home_dir = os.getcwd() + bucket_hosts = self.get_bucket_hosts(ak, bucket, home_dir) + if 'rsfHosts' not in bucket_hosts: + bucket_hosts = self.get_bucket_hosts(ak, bucket, home_dir, force=True) + rsf_hosts = bucket_hosts['rsfHosts'] + return rsf_hosts[0] + + @_legacy_default_get('api_host') + def get_api_host(self, ak, bucket, home_dir=None): + if home_dir is None: + home_dir = os.getcwd() + bucket_hosts = self.get_bucket_hosts(ak, bucket, home_dir) + if 'apiHosts' not in bucket_hosts: + bucket_hosts = self.get_bucket_hosts(ak, bucket, home_dir, force=True) + api_hosts = bucket_hosts['apiHosts'] + return api_hosts[0] + + def get_up_host(self, ak, bucket, home_dir): + if home_dir is None: + home_dir = os.getcwd() + bucket_hosts = self.get_bucket_hosts(ak, bucket, home_dir) + if 'upHosts' not in bucket_hosts: + bucket_hosts = self.get_bucket_hosts(ak, bucket, home_dir, force=True) + up_hosts = bucket_hosts['upHosts'] + return up_hosts + + def unmarshal_up_token(self, up_token): + token = up_token.split(':') + if len(token) != 3: + raise ValueError('invalid up_token') + + ak = token[0] + policy = json.loads( + str_from_bytes( + urlsafe_base64_decode( + token[2]))) + + scope = policy["scope"] + bucket = scope + if ':' in scope: + bucket = scope.split(':')[0] + + return ak, bucket + + def get_bucket_hosts(self, ak, bucket, home_dir=None, force=False): + cache_persist_path = os.path.join(home_dir, 'qn-regions-cache.jsonl') if home_dir else None + regions = self.__get_bucket_regions( + ak, + bucket, + force_query=force, + cache_persist_path=cache_persist_path + ) + + if not regions: + raise KeyError("Please check your BUCKET_NAME! Server hosts not correct! The hosts is empty") + + region = regions[0] + + bucket_hosts = { + k: [ + e.get_value(scheme=self.scheme) + for e in region.services[sn] + if e + ] + for k, sn in { + 'upHosts': ServiceName.UP, + 'ioHosts': ServiceName.IO, + 'rsHosts': ServiceName.RS, + 'rsfHosts': ServiceName.RSF, + 'apiHosts': ServiceName.API + }.items() + } + + ttl = region.ttl if region.ttl > 0 else 24 * 3600 # 1 day + # use datetime.datetime.timestamp() when min version of python >= 3 + create_time = dt2ts(region.create_time) + bucket_hosts['deadline'] = create_time + ttl + + return bucket_hosts + + def get_bucket_hosts_to_cache(self, key, home_dir): + """ + .. deprecated:: + The cache has been replaced by CachedRegionsProvider + + Parameters + ---------- + key: str + home_dir: str + + Returns + ------- + dict + """ + ret = {} + if len(self.host_cache) == 0: + self.host_cache_from_file(home_dir) + + if key not in self.host_cache: + return ret + + if self.host_cache[key]['deadline'] > time.time(): + ret = self.host_cache[key] + + return ret + + def set_bucket_hosts_to_cache(self, key, val, home_dir): + """ + .. deprecated:: + The cache has been replaced by CachedRegionsProvider + + Parameters + ---------- + key: str + val: dict + home_dir: str + """ + self.host_cache[key] = val + self.host_cache_to_file(home_dir) + return + + def host_cache_from_file(self, home_dir): + """ + .. deprecated:: + The cache has been replaced by CachedRegionsProvider + + Parameters + ---------- + home_dir: str + """ + if home_dir is not None: + self.home_dir = home_dir + path = self.host_cache_file_path() + if not os.path.isfile(path): + return None + with open(path, 'r') as f: + try: + bucket_hosts = json.load(f) + self.host_cache = bucket_hosts + except Exception as e: + logging.error(e) + f.close() + return + + def host_cache_file_path(self): + """ + .. deprecated:: + The cache has been replaced by CachedRegionsProvider + + Returns + ------- + str + """ + return os.path.join(self.home_dir, ".qiniu_pythonsdk_hostscache.json") + + def host_cache_to_file(self, home_dir): + """ + .. deprecated:: + The cache has been replaced by CachedRegionsProvider + + Parameters + ---------- + home_dir: str + + """ + path = self.host_cache_file_path() + with open(path, 'w') as f: + json.dump(self.host_cache, f) + f.close() + + def bucket_hosts(self, ak, bucket): + regions = self.__get_bucket_regions(ak, bucket) + + data_dict = { + 'hosts': [ + { + k.value if isinstance(k, ServiceName) else k: { + 'domains': [ + e.host for e in v + ] + } + for k, v in r.services.items() + } + for r in regions + ] + } + for r in data_dict['hosts']: + if 'up_acc' in r: + r.setdefault('up', {}) + r['up'].update(acc_domains=r['up_acc'].get('domains', [])) + del r['up_acc'] + + data = json.dumps(data_dict) + + return data + + def __get_bucket_regions( + self, + access_key, + bucket_name, + force_query=False, + cache_persist_path=None + ): + query_region_host = UC_HOST + if is_customized_default('default_query_region_host'): + query_region_host = get_default('default_query_region_host') + query_region_backup_hosts = get_default('default_query_region_backup_hosts') + query_region_backup_retry_times = get_default('default_backup_hosts_retry_times') + + regions_provider = get_default_regions_provider( + query_endpoints_provider=[ + _HTTPEndpoint.from_host(h) + for h in [query_region_host] + query_region_backup_hosts + if h + ], + access_key=access_key, + bucket_name=bucket_name, + accelerate_uploading=self.accelerate_uploading, + force_query=force_query, + preferred_scheme=self.scheme, + persist_path=cache_persist_path, + max_retry_times_per_endpoint=query_region_backup_retry_times + ) + + return list(regions_provider) + + +Region = LegacyRegion diff --git a/qiniu/resumable_io.py b/qiniu/resumable_io.py deleted file mode 100644 index 85be9378..00000000 --- a/qiniu/resumable_io.py +++ /dev/null @@ -1,171 +0,0 @@ -# -*- coding: utf-8 -*- -import os -try: - import zlib as binascii -except ImportError: - import binascii -from base64 import urlsafe_b64encode - -import auth.up -import conf - -_workers = 1 -_task_queue_size = _workers * 4 -_chunk_size = 256 * 1024 -_try_times = 3 -_block_size = 4 * 1024 * 1024 - -class Error(Exception): - value = None - def __init__(self, value): - self.value = value - def __str__(self): - return self.value - -err_invalid_put_progress = Error("invalid put progress") -err_put_failed = Error("resumable put failed") -err_unmatched_checksum = Error("unmatched checksum") - -def setup(chunk_size=0, try_times=0): - """ - * chunk_size => 默认的Chunk大小,不设定则为256k - * try_times => 默认的尝试次数,不设定则为3 - """ - global _chunk_size, _try_times - - if chunk_size == 0: - chunk_size = 1 << 18 - - if try_times == 0: - try_times = 3 - - _chunk_size, _try_times = chunk_size, try_times - -# ---------------------------------------------------------- -def gen_crc32(data): - return binascii.crc32(data) & 0xffffffff - -class PutExtra(object): - params = None # 自定义用户变量, key需要x: 开头 - mimetype = None # 可选。在 uptoken 没有指定 DetectMime 时,用户客户端可自己指定 MimeType - chunk_size = None # 可选。每次上传的Chunk大小 - try_times = None # 可选。尝试次数 - progresses = None # 可选。上传进度 - notify = lambda self, idx, size, ret: None # 可选。进度提示 - notify_err = lambda self, idx, size, err: None - - def __init__(self, bucket): - self.bucket = bucket - -def put_file(uptoken, key, localfile, extra): - """ 上传文件 """ - f = open(localfile, "rb") - statinfo = os.stat(localfile) - ret = put(uptoken, key, f, statinfo.st_size, extra) - f.close() - return ret - -def put(uptoken, key, f, fsize, extra): - """ 上传二进制流, 通过将data "切片" 分段上传 """ - if not isinstance(extra, PutExtra): - print("extra must the instance of PutExtra") - return - - block_cnt = block_count(fsize) - if extra.progresses is None: - extra.progresses = [None for i in xrange(0, block_cnt)] - else: - if not len(extra.progresses) == block_cnt: - return None, err_invalid_put_progress - - if extra.try_times is None: - extra.try_times = _try_times - - if extra.chunk_size is None: - extra.chunk_size = _chunk_size - - for i in xrange(0, block_cnt): - try_time = extra.try_times - read_length = _block_size - if (i+1)*_block_size > fsize: - read_length = fsize - i*_block_size - data_slice = f.read(read_length) - while True: - err = resumable_block_put(data_slice, i, extra, uptoken) - if err is None: - break - - try_time -= 1 - if try_time <= 0: - return None, err_put_failed - print err, ".. retry" - - mkfile_client = auth.up.Client(uptoken, extra.progresses[-1]["host"]) - return mkfile(mkfile_client, key, fsize, extra) - -# ---------------------------------------------------------- - -def resumable_block_put(block, index, extra, uptoken): - block_size = len(block) - - mkblk_client = auth.up.Client(uptoken, conf.UP_HOST) - if extra.progresses[index] is None or "ctx" not in extra.progresses[index]: - end_pos = extra.chunk_size-1 - if block_size < extra.chunk_size: - end_pos = block_size-1 - chunk = block[: end_pos] - crc32 = gen_crc32(chunk) - chunk = bytearray(chunk) - extra.progresses[index], err = mkblock(mkblk_client, block_size, chunk) - if not extra.progresses[index]["crc32"] == crc32: - return err_unmatched_checksum - if err is not None: - extra.notify_err(index, end_pos + 1, err) - return err - extra.notify(index, end_pos + 1, extra.progresses[index]) - - bput_client = auth.up.Client(uptoken, extra.progresses[index]["host"]) - while extra.progresses[index]["offset"] < block_size: - offset = extra.progresses[index]["offset"] - chunk = block[offset: offset+extra.chunk_size-1] - crc32 = gen_crc32(chunk) - chunk = bytearray(chunk) - - extra.progresses[index], err = putblock(bput_client, extra.progresses[index], chunk) - if not extra.progresses[index]["crc32"] == crc32: - return err_unmatched_checksum - if err is not None: - extra.notify_err(index, len(chunk), err) - return err - extra.notify(index, len(chunk), extra.progresses[index]) - -def block_count(size): - global _block_size - return size / _block_size + 1 - -def mkblock(client, block_size, first_chunk): - url = "http://%s/mkblk/%s" % (conf.UP_HOST, block_size) - content_type = "application/octet-stream" - return client.call_with(url, first_chunk, content_type, len(first_chunk)) - -def putblock(client, block_ret, chunk): - url = "%s/bput/%s/%s" % (block_ret["host"], block_ret["ctx"], block_ret["offset"]) - content_type = "application/octet-stream" - return client.call_with(url, chunk, content_type, len(chunk)) - -def mkfile(client, key, fsize, extra): - url = ["http://%s/mkfile/%s" % (conf.UP_HOST, fsize)] - - if extra.mimetype: - url.append("mimeType/%s" % urlsafe_b64encode(extra.mimetype)) - - if key is not None: - url.append("key/%s" % urlsafe_b64encode(key)) - - if extra.params: - for k, v in extra.params.iteritems(): - url.append("%s/%s" % (k, urlsafe_b64encode(v))) - - url = "/".join(url) - body = ",".join([i["ctx"] for i in extra.progresses]) - return client.call_with(url, body, "text/plain", len(body)) diff --git a/qiniu/retry/__init__.py b/qiniu/retry/__init__.py new file mode 100644 index 00000000..e726010f --- /dev/null +++ b/qiniu/retry/__init__.py @@ -0,0 +1,7 @@ +from .attempt import Attempt +from .retrier import Retrier + +__all__ = [ + 'Attempt', + 'Retrier' +] diff --git a/qiniu/retry/abc/__init__.py b/qiniu/retry/abc/__init__.py new file mode 100644 index 00000000..4f458a73 --- /dev/null +++ b/qiniu/retry/abc/__init__.py @@ -0,0 +1,5 @@ +from .policy import RetryPolicy + +__all__ = [ + 'RetryPolicy' +] diff --git a/qiniu/retry/abc/policy.py b/qiniu/retry/abc/policy.py new file mode 100644 index 00000000..b5b792bf --- /dev/null +++ b/qiniu/retry/abc/policy.py @@ -0,0 +1,61 @@ +import abc + + +class RetryPolicy(object): + __metaclass__ = abc.ABCMeta + + @abc.abstractmethod + def init_context(self, context): + """ + initial context values the policy required + + Parameters + ---------- + context: dict + """ + + @abc.abstractmethod + def should_retry(self, attempt): + """ + if returns True, this policy will be applied + + Parameters + ---------- + attempt: qiniu.retry.attempt.Attempt + + Returns + ------- + bool + """ + + @abc.abstractmethod + def prepare_retry(self, attempt): + """ + apply this policy to change the context values for next attempt + + Parameters + ---------- + attempt: qiniu.retry.attempt.Attempt + """ + + def is_important(self, attempt): + """ + if returns True, this policy will be applied, whether it should retry or not. + this is useful when want to stop retry. + + Parameters + ---------- + attempt: qiniu.retry.attempt.Attempt + + Returns + ------- + bool + """ + + def after_retry(self, attempt, policy): + """ + Parameters + ---------- + attempt: qiniu.retry.attempt.Attempt + policy: RetryPolicy + """ diff --git a/qiniu/retry/attempt.py b/qiniu/retry/attempt.py new file mode 100644 index 00000000..460145c6 --- /dev/null +++ b/qiniu/retry/attempt.py @@ -0,0 +1,18 @@ +class Attempt: + def __init__(self, custom_context=None): + """ + Parameters + ---------- + custom_context: dict or None + """ + self.context = custom_context if custom_context is not None else {} + self.exception = None + self.result = None + + def __enter__(self): + pass + + def __exit__(self, exc_type, exc_val, exc_tb): + if exc_type is not None and exc_val is not None: + self.exception = exc_val + return True # Swallow exception. diff --git a/qiniu/retry/retrier.py b/qiniu/retry/retrier.py new file mode 100644 index 00000000..23ff23b6 --- /dev/null +++ b/qiniu/retry/retrier.py @@ -0,0 +1,183 @@ +import functools + +from .attempt import Attempt + + +def before_retry_nothing(attempt, policy): + return True + + +class Retrier: + def __init__(self, policies=None, before_retry=None): + """ + Parameters + ---------- + policies: list[qiniu.retry.abc.RetryPolicy] + before_retry: callable + `(attempt: Attempt, policy: qiniu.retry.abc.RetryPolicy) -> bool` + """ + self.policies = policies if policies is not None else [] + self.before_retry = before_retry if before_retry is not None else before_retry_nothing + + def __iter__(self): + retrying = Retrying( + # change to `list.copy` for more readable when min version of python update to >= 3 + policies=self.policies[:], + before_retry=self.before_retry + ) + retrying.init_context() + while True: + attempt = Attempt(retrying.context) + yield attempt + if ( + hasattr(attempt.exception, 'no_need_retry') and + attempt.exception.no_need_retry + ): + break + policy = retrying.get_retry_policy(attempt) + if not policy: + break + if not self.before_retry(attempt, policy): + break + policy.prepare_retry(attempt) + retrying.after_retried(attempt, policy) + if attempt.exception: + raise attempt.exception + + def try_do( + self, + func, + *args, + **kwargs + ): + attempt = None + for attempt in self: + with attempt: + if kwargs.get('with_retry_context', False): + # inject retry_context + kwargs['retry_context'] = attempt.context + if 'with_retry_context' in kwargs: + del kwargs['with_retry_context'] + + # store result + attempt.result = func(*args, **kwargs) + + if attempt is None: + raise RuntimeError('attempt is none') + + return attempt.result + + def _wrap(self, with_retry_context=False): + def decorator(func): + @functools.wraps(func) + def wrapper(*args, **kwargs): + return self.try_do( + func, + with_retry_context=with_retry_context, + *args, + **kwargs + ) + + return wrapper + + return decorator + + def retry(self, *args, **kwargs): + """ + decorator to retry + """ + if len(args) == 1 and callable(args[0]): + return self.retry()(args[0]) + else: + return self._wrap(**kwargs) + + +class Retrying: + def __init__(self, policies, before_retry): + """ + Parameters + ---------- + policies: list[qiniu.retry.abc.RetryPolicy] + before_retry: callable + `(attempt: Attempt, policy: qiniu.retry.abc.RetryPolicy) -> bool` + """ + self.policies = policies + self.before_retry = before_retry + self.context = {} + + def init_context(self): + for policy in self.policies: + policy.init_context(self.context) + + def get_retry_policy(self, attempt): + """ + + Parameters + ---------- + attempt: Attempt + + Returns + ------- + qiniu.retry.abc.RetryPolicy + + """ + policy = None + + # find important policy + for p in self.policies: + if p.is_important(attempt): + policy = p + break + if policy and policy.should_retry(attempt): + return policy + else: + policy = None + + # find retry policy + for p in self.policies: + if p.should_retry(attempt): + policy = p + break + + return policy + + def after_retried(self, attempt, policy): + for p in self.policies: + p.after_retry(attempt, policy) + + +""" +Examples +-------- +retrier = Retrier() +result = None +for attempt in retrier: + with attempt: + endpoint = attempt.context.get('endpoint') + result = upload(endpoint) + attempt.result = result +return result +""" + +""" +Examples +-------- +def foo(): + print('hi') + +retrier = Retrier() +retrier.try_do(foo) +""" + +""" +Examples +-------- +retrier = Retrier() + + +@retrier.retry +def foo(): + print('hi') + +foo() +""" diff --git a/qiniu/rpc.py b/qiniu/rpc.py deleted file mode 100644 index 4a224449..00000000 --- a/qiniu/rpc.py +++ /dev/null @@ -1,202 +0,0 @@ -# -*- coding: utf-8 -*- -import httplib_chunk as httplib -import json -import cStringIO -import conf - - -class Client(object): - _conn = None - _header = None - - def __init__(self, host): - self._conn = httplib.HTTPConnection(host) - self._header = {} - - def round_tripper(self, method, path, body): - self._conn.request(method, path, body, self._header) - resp = self._conn.getresponse() - return resp - - def call(self, path): - return self.call_with(path, None) - - def call_with(self, path, body, content_type=None, content_length=None): - ret = None - - self.set_header("User-Agent", conf.USER_AGENT) - if content_type is not None: - self.set_header("Content-Type", content_type) - - if content_length is not None: - self.set_header("Content-Length", content_length) - - resp = self.round_tripper("POST", path, body) - try: - ret = resp.read() - ret = json.loads(ret) - except IOError, e: - return None, e - except ValueError: - pass - - if resp.status / 100 != 2: - err_msg = ret if "error" not in ret else ret["error"] - detail = resp.getheader("x-log", None) - if detail is not None: - err_msg += ", detail:%s" % detail - - return None, err_msg - - return ret, None - - def call_with_multipart(self, path, fields=None, files=None): - """ - * fields => {key} - * files => [{filename, data, content_type}] - """ - content_type, mr = self.encode_multipart_formdata(fields, files) - return self.call_with(path, mr, content_type, mr.length()) - - def call_with_form(self, path, ops): - """ - * ops => {"key": value/list()} - """ - - body = [] - for i in ops: - if isinstance(ops[i], (list, tuple)): - data = ('&%s=' % i).join(ops[i]) - else: - data = ops[i] - - body.append('%s=%s' % (i, data)) - body = '&'.join(body) - - content_type = "application/x-www-form-urlencoded" - return self.call_with(path, body, content_type, len(body)) - - def set_header(self, field, value): - self._header[field] = value - - def set_headers(self, headers): - self._header.update(headers) - - def encode_multipart_formdata(self, fields, files): - """ - * fields => {key} - * files => [{filename, data, content_type}] - * return content_type, content_length, body - """ - if files is None: - files = [] - if fields is None: - fields = {} - - readers = [] - BOUNDARY = '----------ThIs_Is_tHe_bouNdaRY_$' - CRLF = '\r\n' - L1 = [] - for key in fields: - L1.append('--' + BOUNDARY) - L1.append('Content-Disposition: form-data; name="%s"' % key) - L1.append('') - L1.append(fields[key]) - b1 = CRLF.join(L1) - readers.append(b1) - - for file_info in files: - L = [] - L.append('') - L.append('--' + BOUNDARY) - disposition = "Content-Disposition: form-data;" - filename = _qiniu_escape(file_info.get('filename')) - L.append('%s name="file"; filename="%s"' % (disposition, filename)) - L.append('Content-Type: %s' % file_info.get('content_type', 'application/octet-stream')) - L.append('') - L.append('') - b2 = CRLF.join(L) - readers.append(b2) - - data = file_info.get('data') - readers.append(data) - - L3 = ['', '--' + BOUNDARY + '--', ''] - b3 = CRLF.join(L3) - readers.append(b3) - - content_type = 'multipart/form-data; boundary=%s' % BOUNDARY - return content_type, MultiReader(readers) - -def _qiniu_escape(s): - edits = [('\\', '\\\\'), ('\"', '\\\"')] - for (search, replace) in edits: - s = s.replace(search, replace) - return s - - -class MultiReader(object): - """ class MultiReader([readers...]) - - MultiReader returns a read()able object that's the logical concatenation of - the provided input readers. They're read sequentially. - """ - - def __init__(self, readers): - self.readers = [] - self.content_length = 0 - self.valid_content_length = True - for r in readers: - if hasattr(r, 'read'): - if self.valid_content_length: - length = self._get_content_length(r) - if length is not None: - self.content_length += length - else: - self.valid_content_length = False - else: - buf = r - if not isinstance(buf, basestring): - buf = str(buf) - buf = encode_unicode(buf) - r = cStringIO.StringIO(buf) - self.content_length += len(buf) - self.readers.append(r) - - - # don't name it __len__, because the length of MultiReader is not alway valid. - def length(self): - return self.content_length if self.valid_content_length else None - - - def _get_content_length(self, reader): - data_len = None - if hasattr(reader, 'seek') and hasattr(reader, 'tell'): - try: - reader.seek(0, 2) - data_len= reader.tell() - reader.seek(0, 0) - except OSError: - # Don't send a length if this failed - data_len = None - return data_len - - def read(self, n=-1): - if n is None or n == -1: - return ''.join([encode_unicode(r.read()) for r in self.readers]) - else: - L = [] - while len(self.readers) > 0 and n > 0: - b = self.readers[0].read(n) - if len(b) == 0: - self.readers = self.readers[1:] - else: - L.append(encode_unicode(b)) - n -= len(b) - return ''.join(L) - - -def encode_unicode(u): - if isinstance(u, unicode): - u = u.encode('utf8') - return u diff --git a/qiniu/rs/__init__.py b/qiniu/rs/__init__.py deleted file mode 100644 index 5eed5702..00000000 --- a/qiniu/rs/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -# -*- coding: utf-8 -*- - -__all__ = [ - "Client", "EntryPath", "EntryPathPair", "uri_stat", "uri_delete", "uri_move", "uri_copy", - "PutPolicy", "GetPolicy", "make_base_url", -] - -from .rs import * -from .rs_token import * diff --git a/qiniu/rs/rs.py b/qiniu/rs/rs.py deleted file mode 100644 index 38a86bd5..00000000 --- a/qiniu/rs/rs.py +++ /dev/null @@ -1,83 +0,0 @@ -# -*- coding: utf-8 -*- -from base64 import urlsafe_b64encode - -from ..auth import digest -from .. import conf - -class Client(object): - conn = None - def __init__(self, mac=None): - if mac is None: - mac = digest.Mac() - self.conn = digest.Client(host=conf.RS_HOST, mac=mac) - - def stat(self, bucket, key): - return self.conn.call(uri_stat(bucket, key)) - - def delete(self, bucket, key): - return self.conn.call(uri_delete(bucket, key)) - - def move(self, bucket_src, key_src, bucket_dest, key_dest): - return self.conn.call(uri_move(bucket_src, key_src, bucket_dest, key_dest)) - - def copy(self, bucket_src, key_src, bucket_dest, key_dest): - return self.conn.call(uri_copy(bucket_src, key_src, bucket_dest, key_dest)) - - def batch(self, ops): - return self.conn.call_with_form("/batch", dict(op=ops)) - - def batch_stat(self, entries): - ops = [] - for entry in entries: - ops.append(uri_stat(entry.bucket, entry.key)) - return self.batch(ops) - - def batch_delete(self, entries): - ops = [] - for entry in entries: - ops.append(uri_delete(entry.bucket, entry.key)) - return self.batch(ops) - - def batch_move(self, entries): - ops = [] - for entry in entries: - ops.append(uri_move(entry.src.bucket, entry.src.key, - entry.dest.bucket, entry.dest.key)) - return self.batch(ops) - - def batch_copy(self, entries): - ops = [] - for entry in entries: - ops.append(uri_copy(entry.src.bucket, entry.src.key, - entry.dest.bucket, entry.dest.key)) - return self.batch(ops) - -class EntryPath(object): - bucket = None - key = None - def __init__(self, bucket, key): - self.bucket = bucket - self.key = key - -class EntryPathPair: - src = None - dest = None - def __init__(self, src, dest): - self.src = src - self.dest = dest - -def uri_stat(bucket, key): - return "/stat/%s" % urlsafe_b64encode("%s:%s" % (bucket, key)) - -def uri_delete(bucket, key): - return "/delete/%s" % urlsafe_b64encode("%s:%s" % (bucket, key)) - -def uri_move(bucket_src, key_src, bucket_dest, key_dest): - src = urlsafe_b64encode("%s:%s" % (bucket_src, key_src)) - dest = urlsafe_b64encode("%s:%s" % (bucket_dest, key_dest)) - return "/move/%s/%s" % (src, dest) - -def uri_copy(bucket_src, key_src, bucket_dest, key_dest): - src = urlsafe_b64encode("%s:%s" % (bucket_src, key_src)) - dest = urlsafe_b64encode("%s:%s" % (bucket_dest, key_dest)) - return "/copy/%s/%s" % (src, dest) diff --git a/qiniu/rs/rs_token.py b/qiniu/rs/rs_token.py deleted file mode 100644 index fad90198..00000000 --- a/qiniu/rs/rs_token.py +++ /dev/null @@ -1,108 +0,0 @@ -# -*- coding: utf-8 -*- -import json -import time -import urllib - -from ..auth import digest -from ..import rpc - -# @gist PutPolicy -class PutPolicy(object): - scope = None # 可以是 bucketName 或者 bucketName:key - expires = 3600 # 默认是 3600 秒 - callbackUrl = None - callbackBody = None - returnUrl = None - returnBody = None - endUser = None - asyncOps = None - - saveKey = None - insertOnly = None - detectMime = None - fsizeLimit = None - persistentNotifyUrl = None - persistentOps = None - - def __init__(self, scope): - self.scope = scope -# @endgist - - def token(self, mac=None): - if mac is None: - mac = digest.Mac() - token = dict( - scope = self.scope, - deadline = int(time.time()) + self.expires, - ) - - if self.callbackUrl is not None: - token["callbackUrl"] = self.callbackUrl - - if self.callbackBody is not None: - token["callbackBody"] = self.callbackBody - - if self.returnUrl is not None: - token["returnUrl"] = self.returnUrl - - if self.returnBody is not None: - token["returnBody"] = self.returnBody - - if self.endUser is not None: - token["endUser"] = self.endUser - - if self.asyncOps is not None: - token["asyncOps"] = self.asyncOps - - if self.saveKey is not None: - token["saveKey"] = self.saveKey - - if self.insertOnly is not None: - token["exclusive"] = self.insertOnly - - if self.detectMime is not None: - token["detectMime"] = self.detectMime - - if self.fsizeLimit is not None: - token["fsizeLimit"] = self.fsizeLimit - - if self.persistentOps is not None: - token["persistentOps"] = self.persistentOps - - if self.persistentNotifyUrl is not None: - token["persistentNotifyUrl"] = self.persistentNotifyUrl - - b = json.dumps(token, separators=(',',':')) - return mac.sign_with_data(b) - -class GetPolicy(object): - expires = 3600 - def __init__(self): - pass - - def make_request(self, base_url, mac=None): - ''' - * return private_url - ''' - if mac is None: - mac = digest.Mac() - - deadline = int(time.time()) + self.expires - if '?' in base_url: - base_url += '&' - else: - base_url += '?' - base_url = '%se=%s' % (base_url, str(deadline)) - - token = mac.sign(base_url) - return '%s&token=%s' % (base_url, token) - - -def make_base_url(domain, key): - ''' - * domain => str - * key => str - * return base_url - ''' - key = rpc.encode_unicode(key) - return 'http://%s/%s' % (domain, urllib.quote(key)) diff --git a/qiniu/rs/test/__init__.py b/qiniu/rs/test/__init__.py deleted file mode 100644 index f290dd77..00000000 --- a/qiniu/rs/test/__init__.py +++ /dev/null @@ -1,25 +0,0 @@ -# -*- coding: utf-8 -*- -import os -import urllib - -import qiniu.io -import qiniu.rs -import qiniu.conf - -pic = "http://cheneya.qiniudn.com/hello_jpg" -key = 'QINIU_UNIT_TEST_PIC' - -def setUp(): - qiniu.conf.ACCESS_KEY = os.getenv("QINIU_ACCESS_KEY") - qiniu.conf.SECRET_KEY = os.getenv("QINIU_SECRET_KEY") - bucket_name = os.getenv("QINIU_TEST_BUCKET") - - policy = qiniu.rs.PutPolicy(bucket_name) - uptoken = policy.token() - - f = urllib.urlopen(pic) - _, err = qiniu.io.put(uptoken, key, f) - f.close() - if err is None or err.startswith('file exists'): - print err - assert err is None or err.startswith('file exists') diff --git a/qiniu/rs/test/rs_test.py b/qiniu/rs/test/rs_test.py deleted file mode 100644 index a18cec66..00000000 --- a/qiniu/rs/test/rs_test.py +++ /dev/null @@ -1,95 +0,0 @@ -# -*- coding: utf-8 -*- -import unittest -import os -import random -import string - -from qiniu import rs -from qiniu import conf - -def r(length): - lib = string.ascii_uppercase - return ''.join([random.choice(lib) for i in range(0, length)]) - -conf.ACCESS_KEY = os.getenv("QINIU_ACCESS_KEY") -conf.SECRET_KEY = os.getenv("QINIU_SECRET_KEY") -key = 'QINIU_UNIT_TEST_PIC' -bucket_name = os.getenv("QINIU_TEST_BUCKET") -noexist_key = 'QINIU_UNIT_TEST_NOEXIST' + r(30) -key2 = "rs_demo_test_key_1_" + r(5) -key3 = "rs_demo_test_key_2_" + r(5) -key4 = "rs_demo_test_key_3_" + r(5) - -class TestRs(unittest.TestCase): - def test_stat(self): - r = rs.Client() - ret, err = r.stat(bucket_name, key) - assert err is None - assert ret is not None - - # error - _, err = r.stat(bucket_name, noexist_key) - assert err is not None - - def test_delete_move_copy(self): - r = rs.Client() - r.delete(bucket_name, key2) - r.delete(bucket_name, key3) - - ret, err = r.copy(bucket_name, key, bucket_name, key2) - assert err is None, err - - ret, err = r.move(bucket_name, key2, bucket_name, key3) - assert err is None, err - - ret, err = r.delete(bucket_name, key3) - assert err is None, err - - # error - _, err = r.delete(bucket_name, key2) - assert err is not None - - _, err = r.delete(bucket_name, key3) - assert err is not None - - def test_batch_stat(self): - r = rs.Client() - entries = [ - rs.EntryPath(bucket_name, key), - rs.EntryPath(bucket_name, key2), - ] - ret, err = r.batch_stat(entries) - assert err is None - self.assertEqual(ret[0]["code"], 200) - self.assertEqual(ret[1]["code"], 612) - - def test_batch_delete_move_copy(self): - r = rs.Client() - e1 = rs.EntryPath(bucket_name, key) - e2 = rs.EntryPath(bucket_name, key2) - e3 = rs.EntryPath(bucket_name, key3) - e4 = rs.EntryPath(bucket_name, key4) - r.batch_delete([e2, e3, e4]) - - # copy - entries = [ - rs.EntryPathPair(e1, e2), - rs.EntryPathPair(e1, e3), - ] - ret, err = r.batch_copy(entries) - assert err is None - self.assertEqual(ret[0]["code"], 200) - self.assertEqual(ret[1]["code"], 200) - - ret, err = r.batch_move([rs.EntryPathPair(e2, e4)]) - assert err is None - self.assertEqual(ret[0]["code"], 200) - - ret, err = r.batch_delete([e3, e4]) - assert err is None - self.assertEqual(ret[0]["code"], 200) - - r.batch_delete([e2, e3, e4]) - -if __name__ == "__main__": - unittest.main() diff --git a/qiniu/rs/test/rs_token_test.py b/qiniu/rs/test/rs_token_test.py deleted file mode 100644 index 66af6dfc..00000000 --- a/qiniu/rs/test/rs_token_test.py +++ /dev/null @@ -1,79 +0,0 @@ -# -*- coding: utf-8 -*- -import unittest -import os -import json -from base64 import urlsafe_b64decode as decode -from base64 import urlsafe_b64encode as encode -from hashlib import sha1 -import hmac -import urllib - -from qiniu import conf -from qiniu import rpc -from qiniu import rs - -conf.ACCESS_KEY = os.getenv("QINIU_ACCESS_KEY") -conf.SECRET_KEY = os.getenv("QINIU_SECRET_KEY") -bucket_name = os.getenv("QINIU_TEST_BUCKET") -domain = os.getenv("QINIU_TEST_DOMAIN") -key = 'QINIU_UNIT_TEST_PIC' - -class TestToken(unittest.TestCase): - def test_put_policy(self): - policy = rs.PutPolicy(bucket_name) - policy.endUser = "hello!" - policy.returnUrl = "http://localhost:1234/path?query=hello" - policy.returnBody = "$(sha1)" - # Do not specify the returnUrl and callbackUrl at the same time - policy.callbackUrl = "http://1.2.3.4/callback" - policy.callbackBody = "$(bucket)" - - policy.saveKey = "$(sha1)" - policy.insertOnly = 1 - policy.detectMime = 1 - policy.fsizeLimit = 1024 - policy.persistentNotifyUrl = "http://4.3.2.1/persistentNotifyUrl" - policy.persistentOps = "avthumb/flash" - - tokens = policy.token().split(':') - - # chcek first part of token - self.assertEqual(conf.ACCESS_KEY, tokens[0]) - data = json.loads(decode(tokens[2])) - - # check if same - self.assertEqual(data["scope"], bucket_name) - self.assertEqual(data["endUser"], policy.endUser) - self.assertEqual(data["returnUrl"], policy.returnUrl) - self.assertEqual(data["returnBody"], policy.returnBody) - self.assertEqual(data["callbackUrl"], policy.callbackUrl) - self.assertEqual(data["callbackBody"], policy.callbackBody) - self.assertEqual(data["saveKey"], policy.saveKey) - self.assertEqual(data["exclusive"], policy.insertOnly) - self.assertEqual(data["detectMime"], policy.detectMime) - self.assertEqual(data["fsizeLimit"], policy.fsizeLimit) - self.assertEqual(data["persistentNotifyUrl"], policy.persistentNotifyUrl) - self.assertEqual(data["persistentOps"], policy.persistentOps) - - new_hmac = encode(hmac.new(conf.SECRET_KEY, tokens[2], sha1).digest()) - self.assertEqual(new_hmac, tokens[1]) - - def test_get_policy(self): - base_url = rs.make_base_url(domain, key) - policy = rs.GetPolicy() - private_url = policy.make_request(base_url) - - f = urllib.urlopen(private_url) - body = f.read() - f.close() - self.assertEqual(len(body)>100, True) - - -class Test_make_base_url(unittest.TestCase): - def test_unicode(self): - url1 = rs.make_base_url('1.com', '你好') - url2 = rs.make_base_url('1.com', u'你好') - assert url1 == url2 - -if __name__ == "__main__": - unittest.main() diff --git a/qiniu/rsf.py b/qiniu/rsf.py deleted file mode 100644 index 0bc51192..00000000 --- a/qiniu/rsf.py +++ /dev/null @@ -1,41 +0,0 @@ -# -*- coding: utf-8 -*- -import auth.digest -import conf -import urllib - -EOF = 'EOF' - - -class Client(object): - conn = None - def __init__(self, mac=None): - if mac is None: - mac = auth.digest.Mac() - self.conn = auth.digest.Client(host=conf.RSF_HOST, mac=mac) - - def list_prefix(self, bucket, prefix=None, marker=None, limit=None): - '''前缀查询: - * bucket => str - * prefix => str - * marker => str - * limit => int - * return ret => {'items': items, 'marker': markerOut}, err => str - - 1. 首次请求 marker = None - 2. 无论 err 值如何,均应该先看 ret.get('items') 是否有内容 - 3. 如果后续没有更多数据,err 返回 EOF,markerOut 返回 None(但不通过该特征来判断是否结束) - ''' - ops = { - 'bucket': bucket, - } - if marker is not None: - ops['marker'] = marker - if limit is not None: - ops['limit'] = limit - if prefix is not None: - ops['prefix'] = prefix - url = '%s?%s' % ('/list', urllib.urlencode(ops)) - ret, err = self.conn.call_with(url, body=None, content_type='application/x-www-form-urlencoded') - if not ret.get('marker'): - err = EOF - return ret, err diff --git a/qiniu/auth/__init__.py b/qiniu/services/__init__.py similarity index 100% rename from qiniu/auth/__init__.py rename to qiniu/services/__init__.py diff --git a/qiniu/test/__init__.py b/qiniu/services/cdn/__init__.py similarity index 100% rename from qiniu/test/__init__.py rename to qiniu/services/cdn/__init__.py diff --git a/qiniu/services/cdn/manager.py b/qiniu/services/cdn/manager.py new file mode 100644 index 00000000..6700ecaf --- /dev/null +++ b/qiniu/services/cdn/manager.py @@ -0,0 +1,365 @@ +# -*- coding: utf-8 -*- + +from qiniu import http +import json + +from qiniu.compat import is_py2 +from qiniu.compat import is_py3 +from enum import Enum + +import hashlib + + +class DataType(Enum): + BANDWIDTH = 'bandwidth' + X302BANDWIDTH = '302bandwidth' + X302MBANDWIDTH = '302mbandwidth' + FLOW = 'flow' + X302FLOW = '302flow' + X302MFLOW = '302mflow' + + +def urlencode(str): + if is_py2: + import urllib2 + return urllib2.quote(str) + elif is_py3: + import urllib.parse + return urllib.parse.quote(str) + + +class CdnManager(object): + def __init__(self, auth): + self.auth = auth + self.server = 'http://fusion.qiniuapi.com' + + def refresh_urls(self, urls): + """ + 刷新文件列表,文档 https://developer.qiniu.com/fusion/api/cache-refresh + + Args: + urls: 待刷新的文件外链列表 + + Returns: + 一个dict变量和一个ResponseInfo对象 + 参考代码 examples/cdn_manager.py + """ + return self.refresh_urls_and_dirs(urls, None) + + def refresh_dirs(self, dirs): + """ + 刷新目录,文档 https://developer.qiniu.com/fusion/api/cache-refresh + + Args: + urls: 待刷新的目录列表 + + Returns: + 一个dict变量和一个ResponseInfo对象 + 参考代码 examples/cdn_manager.py + """ + return self.refresh_urls_and_dirs(None, dirs) + + def refresh_urls_and_dirs(self, urls, dirs): + """ + 刷新文件目录,文档 https://developer.qiniu.com/fusion/api/cache-refresh + + Args: + urls: 待刷新的目录列表 + dirs: 待刷新的文件列表 + + Returns: + 一个dict变量和一个ResponseInfo对象 + 参考代码 examples/cdn_manager.py + """ + req = {} + if urls is not None and len(urls) > 0: + req.update({"urls": urls}) + if dirs is not None and len(dirs) > 0: + req.update({"dirs": dirs}) + + body = json.dumps(req) + url = '{0}/v2/tune/refresh'.format(self.server) + return self.__post(url, body) + + def prefetch_urls(self, urls): + """ + 预取文件列表,文档 https://developer.qiniu.com/fusion/api/file-prefetching + + Args: + urls: 待预取的文件外链列表 + + Returns: + 一个dict变量和一个ResponseInfo对象 + 参考代码 examples/cdn_manager.py + """ + req = {} + req.update({"urls": urls}) + + body = json.dumps(req) + url = '{0}/v2/tune/prefetch'.format(self.server) + return self.__post(url, body) + + def get_bandwidth_data(self, domains, start_date, end_date, granularity, data_type=None): + """ + 查询带宽数据,文档 https://developer.qiniu.com/fusion/api/traffic-bandwidth + + Args: + domains: 域名列表 + start_date: 起始日期 + end_date: 结束日期 + granularity: 数据间隔 + data_type: 计量数据类型, see class DataType.XXXBANDWIDTH + + Returns: + 一个dict变量和一个ResponseInfo对象 + 参考代码 examples/cdn_manager.py + """ + req = {} + req.update({"domains": ';'.join(domains)}) + req.update({"startDate": start_date}) + req.update({"endDate": end_date}) + req.update({"granularity": granularity}) + if data_type is not None: + req.update({'type': data_type.value}) # should be one of 'bandwidth', '302bandwidth', '302mbandwidth' + + body = json.dumps(req) + url = '{0}/v2/tune/bandwidth'.format(self.server) + return self.__post(url, body) + + def get_flux_data(self, domains, start_date, end_date, granularity, data_type=None): + """ + 查询流量数据,文档 https://developer.qiniu.com/fusion/api/traffic-bandwidth + + Args: + domains: 域名列表 + start_date: 起始日期 + end_date: 结束日期 + granularity: 数据间隔 + data_type: 计量数据类型, see class DataType.XXXFLOW + + Returns: + 一个dict变量和一个ResponseInfo对象 + 参考代码 examples/cdn_manager.py + """ + req = {} + req.update({"domains": ';'.join(domains)}) + req.update({"startDate": start_date}) + req.update({"endDate": end_date}) + req.update({"granularity": granularity}) + if data_type is not None: + req.update({'type': data_type.value}) # should be one of 'flow', '302flow', '302mflow' + + body = json.dumps(req) + url = '{0}/v2/tune/flux'.format(self.server) + return self.__post(url, body) + + def get_log_list_data(self, domains, log_date): + """ + 获取日志下载链接,文档 https://developer.qiniu.com/fusion/api/download-the-log + + Args: + domains: 域名列表 + log_date: 日志日期 + + Returns: + 一个dict变量和一个ResponseInfo对象 + 参考代码 examples/cdn_manager.py + """ + req = {} + req.update({"domains": ';'.join(domains)}) + req.update({"day": log_date}) + + body = json.dumps(req) + url = '{0}/v2/tune/log/list'.format(self.server) + return self.__post(url, body) + + def put_httpsconf(self, name, certid, forceHttps=False): + """ + 修改证书,文档 https://developer.qiniu.com/fusion/4246/the-domain-name#11 + + Args: + domains: 域名name + CertID: 证书id,从上传或者获取证书列表里拿到证书id + ForceHttps: 是否强制https跳转 + + Returns: + {} + """ + req = {} + req.update({"certid": certid}) + req.update({"forceHttps": forceHttps}) + + body = json.dumps(req) + url = '{0}/domain/{1}/httpsconf'.format(self.server, name) + return self.__post(url, body) + + def __post(self, url, data=None): + headers = {'Content-Type': 'application/json'} + return http._post_with_auth_and_headers(url, data, self.auth, headers) + + +class DomainManager(object): + def __init__(self, auth): + self.auth = auth + self.server = 'http://api.qiniu.com' + + def create_domain(self, name, body): + """ + 创建域名,文档 https://developer.qiniu.com/fusion/api/4246/the-domain-name + + Args: + name: 域名, 如果是泛域名,必须以点号 . 开头 + bosy: 创建域名参数 + Returns: + {} + """ + url = '{0}/domain/{1}'.format(self.server, name) + return self.__post(url, body) + + def domain_online(self, name): + """ + 上线域名,文档 https://developer.qiniu.com/fusion/api/4246/the-domain-name#6 + + Args: + name: 域名, 如果是泛域名,必须以点号 . 开头 + bosy: 创建域名参数 + Returns: + {} + """ + url = '{0}/domain/{1}/online'.format(self.server, name) + return http._post_with_qiniu_mac(url, None, self.auth) + + def domain_offline(self, name): + """ + 下线域名,文档 https://developer.qiniu.com/fusion/api/4246/the-domain-name#5 + + Args: + name: 域名, 如果是泛域名,必须以点号 . 开头 + bosy: 创建域名参数 + Returns: + {} + """ + url = '{0}/domain/{1}/offline'.format(self.server, name) + return http._post_with_qiniu_mac(url, None, self.auth) + + def delete_domain(self, name): + """ + 删除域名,文档 https://developer.qiniu.com/fusion/api/4246/the-domain-name#8 + + Args: + name: 域名, 如果是泛域名,必须以点号 . 开头 + Returns: + 返回一个tuple对象,其格式为(, ) + - result 成功返回dict{},失败返回{"error": ""} + - ResponseInfo 请求的Response信息 + """ + url = '{0}/domain/{1}'.format(self.server, name) + return self.__del(url) + + def get_domain(self, name): + """ + 获取域名信息,文档 https://developer.qiniu.com/fusion/api/4246/the-domain-name + + Args: + name: 域名, 如果是泛域名,必须以点号 . 开头 + Returns: + 返回一个tuple对象,其格式为(, ) + - result 成功返回dict{},失败返回{"error": ""} + - ResponseInfo 请求的Response信息 + """ + url = '{0}/domain/{1}'.format(self.server, name) + return self.__get(url) + + def put_httpsconf(self, name, certid, forceHttps): + """ + 修改证书,文档 https://developer.qiniu.com/fusion/api/4246/the-domain-name#11 + + Args: + domains: 域名name + CertID: 证书id,从上传或者获取证书列表里拿到证书id + ForceHttps: 是否强制https跳转 + + Returns: + {} + """ + req = {} + req.update({"certid": certid}) + req.update({"forceHttps": forceHttps}) + + body = json.dumps(req) + url = '{0}/domain/{1}/httpsconf'.format(self.server, name) + return self.__put(url, body) + + def create_sslcert(self, name, common_name, pri, ca): + """ + 修改证书,文档 https://developer.qiniu.com/fusion/api/4246/the-domain-name#11 + + Args: + name: 证书名称 + common_name: 相关域名 + pri: 证书私钥 + ca: 证书内容 + Returns: + 返回一个tuple对象,其格式为(, ) + - result 成功返回dict{certID: },失败返回{"error": ""} + - ResponseInfo 请求的Response信息 + + + """ + req = {} + req.update({"name": name}) + req.update({"common_name": common_name}) + req.update({"pri": pri}) + req.update({"ca": ca}) + + body = json.dumps(req) + url = '{0}/sslcert'.format(self.server) + return self.__post(url, body) + + def __post(self, url, data=None): + headers = {'Content-Type': 'application/json'} + return http._post_with_auth_and_headers(url, data, self.auth, headers) + + def __put(self, url, data=None): + headers = {'Content-Type': 'application/json'} + return http._put_with_auth_and_headers(url, data, self.auth, headers) + + def __get(self, url, data=None): + headers = {'Content-Type': 'application/json'} + return http._get_with_auth_and_headers(url, data, self.auth, headers) + + def __del(self, url, data=None): + headers = {'Content-Type': 'application/json'} + return http._delete_with_qiniu_mac_and_headers(url, data, self.auth, headers) + + +def create_timestamp_anti_leech_url(host, file_name, query_string, encrypt_key, deadline): + """ + 创建时间戳防盗链 + + Args: + host: 带访问协议的域名 + file_name: 原始文件名,不需要urlencode + query_string: 查询参数,不需要urlencode + encrypt_key: 时间戳防盗链密钥 + deadline: 链接有效期时间戳(以秒为单位) + + Returns: + 带时间戳防盗链鉴权访问链接 + """ + if query_string: + url_to_sign = '{0}/{1}?{2}'.format(host, urlencode(file_name), query_string) + else: + url_to_sign = '{0}/{1}'.format(host, urlencode(file_name)) + + path = '/{0}'.format(urlencode(file_name)) + expire_hex = str(hex(deadline))[2:] + str_to_sign = '{0}{1}{2}'.format(encrypt_key, path, expire_hex).encode() + sign_str = hashlib.md5(str_to_sign).hexdigest() + + if query_string: + signed_url = '{0}&sign={1}&t={2}'.format(url_to_sign, sign_str, expire_hex) + else: + signed_url = '{0}?sign={1}&t={2}'.format(url_to_sign, sign_str, expire_hex) + + return signed_url diff --git a/qiniu/services/compute/__init__.py b/qiniu/services/compute/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/qiniu/services/compute/app.py b/qiniu/services/compute/app.py new file mode 100644 index 00000000..614ff668 --- /dev/null +++ b/qiniu/services/compute/app.py @@ -0,0 +1,224 @@ +# -*- coding: utf-8 -*- +from qiniu import http, QiniuMacAuth +from .config import KIRK_HOST +from .qcos_api import QcosClient + + +class AccountClient(object): + """客户端入口 + + 使用账号密钥生成账号客户端,可以进一步: + 1、获取和操作账号数据 + 2、获得部署的应用的客户端 + + 属性: + auth: 账号管理密钥对,QiniuMacAuth对象 + host: API host,在『内网模式』下使用时,auth=None,会自动使用 apiproxy 服务 + + 接口: + get_qcos_client(app_uri) + create_qcos_client(app_uri) + get_app_keys(app_uri) + get_valid_app_auth(app_uri) + get_account_info() + get_app_region_products(app_uri) + get_region_products(region) + list_regions() + list_apps() + create_app(args) + delete_app(app_uri) + + """ + + def __init__(self, auth, host=None): + self.auth = auth + self.qcos_clients = {} + if (auth is None): + self.host = KIRK_HOST['APPPROXY'] + else: + self.host = host or KIRK_HOST['APPGLOBAL'] + acc, info = self.get_account_info() + self.uri = acc.get('name') + + def get_qcos_client(self, app_uri): + """获得资源管理客户端 + 缓存,但不是线程安全的 + """ + + client = self.qcos_clients.get(app_uri) + if (client is None): + client = self.create_qcos_client(app_uri) + self.qcos_clients[app_uri] = client + + return client + + def create_qcos_client(self, app_uri): + """创建资源管理客户端 + + """ + + if (self.auth is None): + return QcosClient(None) + + products = self.get_app_region_products(app_uri) + auth = self.get_valid_app_auth(app_uri) + + if products is None or auth is None: + return None + + return QcosClient(auth, products.get('api')) + + def get_app_keys(self, app_uri): + """获得账号下应用的密钥 + + 列出指定应用的密钥,仅当访问者对指定应用有管理权限时有效: + 用户对创建的应用有管理权限。 + 用户对使用的第三方应用没有管理权限,第三方应用的运维方有管理权限。 + + Args: + - app_uri: 应用的完整标识 + + Returns: + 返回一个tuple对象,其格式为(, ) + - result 成功返回秘钥列表,失败返回None + - ResponseInfo 请求的Response信息 + """ + + url = '{0}/v3/apps/{1}/keys'.format(self.host, app_uri) + return http._get_with_qiniu_mac(url, None, self.auth) + + def get_valid_app_auth(self, app_uri): + """获得账号下可用的应用的密钥 + + 列出指定应用的可用密钥 + + Args: + - app_uri: 应用的完整标识 + + Returns: + 返回一个tuple对象,其格式为(, ) + - result 成功返回可用秘钥列表,失败返回None + - ResponseInfo 请求的Response信息 + """ + + ret, retInfo = self.get_app_keys(app_uri) + + if ret is None: + return None + + for k in ret: + if (k.get('state') == 'enabled'): + return QiniuMacAuth(k.get('ak'), k.get('sk')) + + return None + + def get_account_info(self): + """获得当前账号的信息 + + 查看当前请求方(请求鉴权使用的 AccessKey 的属主)的账号信息。 + + Returns: + 返回一个tuple对象,其格式为(, ) + - result 成功返回用户信息,失败返回None + - ResponseInfo 请求的Response信息 + """ + + url = '{0}/v3/info'.format(self.host) + return http._get_with_qiniu_mac(url, None, self.auth) + + def get_app_region_products(self, app_uri): + """获得指定应用所在区域的产品信息 + + Args: + - app_uri: 应用的完整标识 + + Returns: + 返回产品信息列表,若失败则返回None + """ + apps, retInfo = self.list_apps() + if apps is None: + return None + + for app in apps: + if (app.get('uri') == app_uri): + return self.get_region_products(app.get('region')) + + return + + def get_region_products(self, region): + """获得指定区域的产品信息 + + Args: + - region: 区域,如:"nq" + + Returns: + 返回该区域的产品信息,若失败则返回None + """ + + regions, retInfo = self.list_regions() + if regions is None: + return None + + for r in regions: + if r.get('name') == region: + return r.get('products') + + def list_regions(self): + """获得账号可见的区域的信息 + + 列出当前用户所有可使用的区域。 + + Returns: + 返回一个tuple对象,其格式为(, ) + - result 成功返回区域列表,失败返回None + - ResponseInfo 请求的Response信息 + """ + + url = '{0}/v3/regions'.format(self.host) + return http._get_with_qiniu_mac(url, None, self.auth) + + def list_apps(self): + """获得当前账号的应用列表 + + 列出所属应用为当前请求方的应用列表。 + + Returns: + 返回一个tuple对象,其格式为(, ) + - result 成功返回应用列表,失败返回None + - ResponseInfo 请求的Response信息 + """ + + url = '{0}/v3/apps'.format(self.host) + return http._get_with_qiniu_mac(url, None, self.auth) + + def create_app(self, args): + """创建应用 + + 在指定区域创建一个新应用,所属应用为当前请求方。 + + Args: + - args: 请求参数(json),参考 http://kirk-docs.qiniu.com/apidocs/ + + Returns: + - result 成功返回所创建的应用信息,若失败则返回None + - ResponseInfo 请求的Response信息 + """ + + url = '{0}/v3/apps'.format(self.host) + return http._post_with_qiniu_mac(url, args, self.auth) + + def delete_app(self, app_uri): + """删除应用 + + 删除指定标识的应用,当前请求方对该应用应有删除权限。 + + Args: + - app_uri: 应用的完整标识 + + Returns: + - result 成功返回空dict{},若失败则返回None + - ResponseInfo 请求的Response信息 + """ + + url = '{0}/v3/apps/{1}'.format(self.host, app_uri) + return http._delete_with_qiniu_mac(url, None, self.auth) diff --git a/qiniu/services/compute/config.py b/qiniu/services/compute/config.py new file mode 100644 index 00000000..045ed784 --- /dev/null +++ b/qiniu/services/compute/config.py @@ -0,0 +1,21 @@ +# -*- coding: utf-8 -*- + +KIRK_HOST = { + 'APPGLOBAL': "https://app-api.qiniu.com", # 公有云 APP API + 'APPPROXY': "http://app.qcos.qiniu", # 内网 APP API + 'APIPROXY': "http://api.qcos.qiniu", # 内网 API +} + +CONTAINER_UINT_TYPE = { + '1U1G': '单核(CPU),1GB(内存)', + '1U2G': '单核(CPU),2GB(内存)', + '1U4G': '单核(CPU),4GB(内存)', + '1U8G': '单核(CPU),8GB(内存)', + '2U2G': '双核(CPU),2GB(内存)', + '2U4G': '双核(CPU),4GB(内存)', + '2U8G': '双核(CPU),8GB(内存)', + '2U16G': '双核(CPU),16GB(内存)', + '4U8G': '四核(CPU),8GB(内存)', + '4U16G': '四核(CPU),16GB(内存)', + '8U16G': '八核(CPU),16GB(内存)', +} diff --git a/qiniu/services/compute/qcos_api.py b/qiniu/services/compute/qcos_api.py new file mode 100644 index 00000000..250a4bf6 --- /dev/null +++ b/qiniu/services/compute/qcos_api.py @@ -0,0 +1,694 @@ +# -*- coding: utf-8 -*- +from qiniu import http +from .config import KIRK_HOST + + +class QcosClient(object): + """资源管理客户端 + + 使用应用密钥生成资源管理客户端,可以进一步: + 1、部署服务和容器,获得信息 + 2、创建网络资源,获得信息 + + 属性: + auth: 应用密钥对,QiniuMacAuth对象 + host: API host,在『内网模式』下使用时,auth=None,会自动使用 apiproxy 服务,只能管理当前容器所在的应用资源。 + + 接口: + list_stacks() + create_stack(args) + delete_stack(stack) + get_stack(stack) + start_stack(stack) + stop_stack(stack) + + list_services(stack) + create_service(stack, args) + get_service_inspect(stack, service) + start_service(stack, service) + stop_service(stack, service) + update_service(stack, service, args) + scale_service(stack, service, args) + delete_service(stack, service) + create_service_volume(stack, service, volume, args) + extend_service_volume(stack, service, volume, args) + delete_service_volume(stack, service, volume) + + list_containers(args) + get_container_inspect(ip) + start_container(ip) + stop_container(ip) + restart_container(ip) + + list_aps() + create_ap(args) + search_ap(mode, query) + get_ap(apid) + update_ap(apid, args) + set_ap_port(apid, port, args) + delete_ap(apid) + publish_ap(apid, args) + unpublish_ap(apid) + get_ap_port_healthcheck(apid, port) + set_ap_port_container(apid, port, args) + disable_ap_port(apid, port) + enable_ap_port(apid, port) + get_ap_providers() + get_web_proxy(backend) + """ + + def __init__(self, auth, host=None): + self.auth = auth + if auth is None: + self.host = KIRK_HOST['APIPROXY'] + else: + self.host = host + + def list_stacks(self): + """获得服务组列表 + + 列出当前应用的所有服务组信息。 + + Returns: + 返回一个tuple对象,其格式为(, ) + - result 成功返回服务组列表[, , ...],失败返回{"error": ""} + - ResponseInfo 请求的Response信息 + """ + url = '{0}/v3/stacks'.format(self.host) + return self.__get(url) + + def create_stack(self, args): + """创建服务组 + + 创建新一个指定名称的服务组,并创建其下的服务。 + + Args: + - args: 服务组描述,参考 http://kirk-docs.qiniu.com/apidocs/ + + Returns: + 返回一个tuple对象,其格式为(, ) + - result 成功返回空dict{},失败返回{"error": ""} + - ResponseInfo 请求的Response信息 + """ + url = '{0}/v3/stacks'.format(self.host) + return self.__post(url, args) + + def delete_stack(self, stack): + """删除服务组 + + 删除服务组内所有服务并销毁服务组。 + + Args: + - stack: 服务所属的服务组名称 + + Returns: + 返回一个tuple对象,其格式为(, ) + - result 成功返回空dict{},失败返回{"error": ""} + - ResponseInfo 请求的Response信息 + """ + url = '{0}/v3/stacks/{1}'.format(self.host, stack) + return self.__delete(url) + + def get_stack(self, stack): + """获取服务组 + + 查看服务组的属性信息。 + + Args: + - stack: 服务所属的服务组名称 + + Returns: + 返回一个tuple对象,其格式为(, ) + - result 成功返回stack信息,失败返回{"error": ""} + - ResponseInfo 请求的Response信息 + """ + url = '{0}/v3/stacks/{1}'.format(self.host, stack) + return self.__get(url) + + def start_stack(self, stack): + """启动服务组 + + 启动服务组中的所有停止状态的服务。 + + Args: + - stack: 服务所属的服务组名称 + + Returns: + 返回一个tuple对象,其格式为(, ) + - result 成功返回空dict{},失败返回{"error": ""} + - ResponseInfo 请求的Response信息 + """ + url = '{0}/v3/stacks/{1}/start'.format(self.host, stack) + return self.__post(url) + + def stop_stack(self, stack): + """停止服务组 + + 停止服务组中所有运行状态的服务。 + + Args: + - stack: 服务所属的服务组名称 + + Returns: + 返回一个tuple对象,其格式为(, ) + - result 成功返回空dict{},失败返回{"error": ""} + - ResponseInfo 请求的Response信息 + """ + url = '{0}/v3/stacks/{1}/stop'.format(self.host, stack) + return self.__post(url) + + def list_services(self, stack): + """获得服务列表 + + 列出指定名称的服务组内所有的服务, 返回一组详细的服务信息。 + + Args: + - stack: 服务所属的服务组名称 + + Returns: + 返回一个tuple对象,其格式为(, ) + - result 成功返回服务信息列表[, , ...],失败返回{"error": ""} + - ResponseInfo 请求的Response信息 + """ + url = '{0}/v3/stacks/{1}/services'.format(self.host, stack) + return self.__get(url) + + def create_service(self, stack, args): + """创建服务 + + 创建一个服务,平台会异步地按模板分配资源并部署所有容器。 + + Args: + - stack: 服务所属的服务组名称 + - args: 服务具体描述请求参数(json),参考 http://kirk-docs.qiniu.com/apidocs/ + + Returns: + 返回一个tuple对象,其格式为(, ) + - result 成功返回空dict{},失败返回{"error": ""} + - ResponseInfo 请求的Response信息 + """ + url = '{0}/v3/stacks/{1}/services'.format(self.host, stack) + return self.__post(url, args) + + def delete_service(self, stack, service): + """删除服务 + + 删除指定名称服务,并自动销毁服务已部署的所有容器和存储卷。 + + Args: + - stack: 服务所属的服务组名称 + - service: 服务名 + + Returns: + 返回一个tuple对象,其格式为(, ) + - result 成功返回空dict{},失败返回{"error": ""} + - ResponseInfo 请求的Response信息 + """ + url = '{0}/v3/stacks/{1}/services/{2}'.format(self.host, stack, service) + return self.__delete(url) + + def get_service_inspect(self, stack, service): + """查看服务 + + 查看指定名称服务的属性。 + + Args: + - stack: 服务所属的服务组名称 + - service: 服务名 + + Returns: + 返回一个tuple对象,其格式为(, ) + - result 成功返回服务信息,失败返回{"error": ""} + - ResponseInfo 请求的Response信息 + """ + url = '{0}/v3/stacks/{1}/services/{2}/inspect'.format(self.host, stack, service) + return self.__get(url) + + def start_service(self, stack, service): + """启动服务 + + 启动指定名称服务的所有容器。 + + Args: + - stack: 服务所属的服务组名称 + - service: 服务名 + + Returns: + 返回一个tuple对象,其格式为(, ) + - result 成功返回空dict{},失败返回{"error": ""} + - ResponseInfo 请求的Response信息 + """ + url = '{0}/v3/stacks/{1}/services/{2}/start'.format(self.host, stack, service) + return self.__post(url) + + def stop_service(self, stack, service): + """停止服务 + + 停止指定名称服务的所有容器。 + + Args: + - stack: 服务所属的服务组名称 + - service: 服务名 + + Returns: + 返回一个tuple对象,其格式为(, ) + - result 成功返回空dict{},失败返回{"error": ""} + - ResponseInfo 请求的Response信息 + """ + url = '{0}/v3/stacks/{1}/services/{2}/stop'.format(self.host, stack, service) + return self.__post(url) + + def update_service(self, stack, service, args): + """更新服务 + + 更新指定名称服务的配置如容器镜像等参数,容器被重新部署后生效。 + 如果指定manualUpdate参数,则需要额外调用 部署服务 接口并指定参数进行部署;处于人工升级模式的服务禁止执行其他修改操作。 + 如果不指定manualUpdate参数,平台会自动完成部署。 + + Args: + - stack: 服务所属的服务组名称 + - service: 服务名 + - args: 服务具体描述请求参数(json),参考 http://kirk-docs.qiniu.com/apidocs/ + + Returns: + 返回一个tuple对象,其格式为(, ) + - result 成功返回空dict{},失败返回{"error": ""} + - ResponseInfo 请求的Response信息 + """ + url = '{0}/v3/stacks/{1}/services/{2}'.format(self.host, stack, service) + return self.__post(url, args) + + def scale_service(self, stack, service, args): + """扩容/缩容服务 + + 更新指定名称服务的配置如容器镜像等参数,容器被重新部署后生效。 + 如果指定manualUpdate参数,则需要额外调用 部署服务 接口并指定参数进行部署;处于人工升级模式的服务禁止执行其他修改操作。 + 如果不指定manualUpdate参数,平台会自动完成部署。 + + Args: + - stack: 服务所属的服务组名称 + - service: 服务名 + - args: 请求参数(json),参考 http://kirk-docs.qiniu.com/apidocs/ + + Returns: + 返回一个tuple对象,其格式为(, ) + - result 成功返回空dict{},失败返回{"error": ""} + - ResponseInfo 请求的Response信息 + """ + url = '{0}/v3/stacks/{1}/services/{2}/scale'.format(self.host, stack, service) + return self.__post(url, args) + + def create_service_volume(self, stack, service, args): + """创建存储卷 + + 为指定名称的服务增加存储卷资源,并挂载到部署的容器中。 + + Args: + - stack: 服务所属的服务组名称 + - service: 服务名 + - args: 请求参数(json),参考 http://kirk-docs.qiniu.com/apidocs/ + + Returns: + 返回一个tuple对象,其格式为(, ) + - result 成功返回空dict{},失败返回{"error": ""} + - ResponseInfo 请求的Response信息 + """ + url = '{0}/v3/stacks/{1}/services/{2}/volumes'.format(self.host, stack, service) + return self.__post(url, args) + + def extend_service_volume(self, stack, service, volume, args): + """扩容存储卷 + + 为指定名称的服务增加存储卷资源,并挂载到部署的容器中。 + + Args: + - stack: 服务所属的服务组名称 + - service: 服务名 + - volume: 存储卷名 + - args: 请求参数(json),参考 http://kirk-docs.qiniu.com/apidocs/ + + Returns: + 返回一个tuple对象,其格式为(, ) + - result 成功返回空dict{},失败返回{"error": ""} + - ResponseInfo 请求的Response信息 + """ + url = '{0}/v3/stacks/{1}/services/{2}/volumes/{3}/extend'.format(self.host, stack, service, volume) + return self.__post(url, args) + + def delete_service_volume(self, stack, service, volume): + """删除存储卷 + + 从部署的容器中移除挂载,并销毁指定服务下指定名称的存储卷, 并重新启动该容器。 + + Args: + - stack: 服务所属的服务组名称 + - service: 服务名 + - volume: 存储卷名 + + Returns: + 返回一个tuple对象,其格式为(, ) + - result 成功返回空dict{},失败返回{"error": ""} + - ResponseInfo 请求的Response信息 + """ + url = '{0}/v3/stacks/{1}/services/{2}/volumes/{3}'.format(self.host, stack, service, volume) + return self.__delete(url) + + def list_containers(self, stack=None, service=None): + """列出容器列表 + + 列出应用内所有部署的容器, 返回一组容器IP。 + + Args: + - stack: 要列出容器的服务组名(可不填,表示默认列出所有) + - service: 要列出容器服务的服务名(可不填,表示默认列出所有) + + Returns: + 返回一个tuple对象,其格式为(, ) + - result 成功返回容器的ip数组,失败返回{"error": ""} + - ResponseInfo 请求的Response信息 + """ + url = '{0}/v3/containers'.format(self.host) + params = {} + if stack is not None: + params['stack'] = stack + if service is not None: + params['service'] = service + return self.__get(url, params or None) + + def get_container_inspect(self, ip): + """查看容器 + + 查看指定IP的容器,返回容器属性。 + + Args: + - ip: 容器ip + + Returns: + 返回一个tuple对象,其格式为(, ) + - result 成功返回容器的信息,失败返回{"error": ""} + - ResponseInfo 请求的Response信息 + """ + url = '{0}/v3/containers/{1}/inspect'.format(self.host, ip) + return self.__get(url) + + def start_container(self, ip): + """启动容器 + + 启动指定IP的容器。 + + Args: + - ip: 容器ip + + Returns: + 返回一个tuple对象,其格式为(, ) + - result 成功返回空dict{},失败返回{"error": ""} + - ResponseInfo 请求的Response信息 + """ + url = '{0}/v3/containers/{1}/start'.format(self.host, ip) + return self.__post(url) + + def stop_container(self, ip): + """停止容器 + + 停止指定IP的容器。 + + Args: + - ip: 容器ip + + Returns: + 返回一个tuple对象,其格式为(, ) + - result 成功返回空dict{},失败返回{"error": ""} + - ResponseInfo 请求的Response信息 + """ + url = '{0}/v3/containers/{1}/stop'.format(self.host, ip) + return self.__post(url) + + def restart_container(self, ip): + """重启容器 + + 重启指定IP的容器。 + + Args: + - ip: 容器ip + + Returns: + 返回一个tuple对象,其格式为(, ) + - result 成功返回空dict{},失败返回{"error": ""} + - ResponseInfo 请求的Response信息 + """ + url = '{0}/v3/containers/{1}/restart'.format(self.host, ip) + return self.__post(url) + + def list_aps(self): + """列出接入点 + + 列出当前应用的所有接入点。 + + Returns: + 返回一个tuple对象,其格式为(, ) + - result 成功返回接入点列表,失败返回{"error": ""} + - ResponseInfo 请求的Response信息 + """ + url = '{0}/v3/aps'.format(self.host) + return self.__get(url) + + def create_ap(self, args): + """申请接入点 + + 申请指定配置的接入点资源。 + + Args: + - args: 请求参数(json),参考 http://kirk-docs.qiniu.com/apidocs/ + + Returns: + 返回一个tuple对象,其格式为(, ) + - result 成功返回申请到的接入点信息,失败返回{"error": ""} + - ResponseInfo 请求的Response信息 + """ + url = '{0}/v3/aps'.format(self.host) + return self.__post(url, args) + + def search_ap(self, mode, query): + """搜索接入点 + + 查看指定接入点的所有配置信息,包括所有监听端口的配置。 + + Args: + - mode: 搜索模式,可以是domain、ip、host + - query: 搜索文本 + + Returns: + 返回一个tuple对象,其格式为(, ) + - result 成功返回搜索结果,失败返回{"error": ""} + - ResponseInfo 请求的Response信息 + """ + url = '{0}/v3/aps/search?{1}={2}'.format(self.host, mode, query) + return self.__get(url) + + def get_ap(self, apid): + """查看接入点 + + 给出接入点的域名或IP,查看配置信息,包括所有监听端口的配置。 + + Args: + - apid: 接入点ID + + Returns: + 返回一个tuple对象,其格式为(, ) + - result 成功返回接入点信息,失败返回{"error": ""} + - ResponseInfo 请求的Response信息 + """ + url = '{0}/v3/aps/{1}'.format(self.host, apid) + return self.__get(url) + + def update_ap(self, apid, args): + """更新接入点 + + 更新指定接入点的配置,如带宽。 + + Args: + - apid: 接入点ID + - args: 请求参数(json),参考 http://kirk-docs.qiniu.com/apidocs/ + + Returns: + 返回一个tuple对象,其格式为(, ) + - result 成功返回空dict{},失败返回{"error": ""} + - ResponseInfo 请求的Response信息 + """ + url = '{0}/v3/aps/{1}'.format(self.host, apid) + return self.__post(url, args) + + def set_ap_port(self, apid, port, args): + """更新接入点端口配置 + + 更新接入点指定端口的配置。 + + Args: + - apid: 接入点ID + - port: 要设置的端口号 + - args: 请求参数(json),参考 http://kirk-docs.qiniu.com/apidocs/ + + Returns: + 返回一个tuple对象,其格式为(, ) + - result 成功返回空dict{},失败返回{"error": ""} + - ResponseInfo 请求的Response信息 + """ + url = '{0}/v3/aps/{1}/{2}'.format(self.host, apid, port) + return self.__post(url, args) + + def delete_ap(self, apid): + """释放接入点 + + 销毁指定接入点资源。 + + Args: + - apid: 接入点ID + + Returns: + 返回一个tuple对象,其格式为(, ) + - result 成功返回空dict{},失败返回{"error": ""} + - ResponseInfo 请求的Response信息 + """ + url = '{0}/v3/aps/{1}'.format(self.host, apid) + return self.__delete(url) + + def publish_ap(self, apid, args): + """绑定自定义域名 + + 绑定用户自定义的域名,仅对公网域名模式接入点生效。 + + Args: + - apid: 接入点ID + - args: 请求参数(json),参考 http://kirk-docs.qiniu.com/apidocs/ + + Returns: + 返回一个tuple对象,其格式为(, ) + - result 成功返回空dict{},失败返回{"error": ""} + - ResponseInfo 请求的Response信息 + """ + url = '{0}/v3/aps/{1}/publish'.format(self.host, apid) + return self.__post(url, args) + + def unpublish_ap(self, apid, args): + """解绑自定义域名 + + 解绑用户自定义的域名,仅对公网域名模式接入点生效。 + + Args: + - apid: 接入点ID + - args: 请求参数(json),参考 http://kirk-docs.qiniu.com/apidocs/ + + Returns: + 返回一个tuple对象,其格式为(, ) + - result 成功返回空dict{},失败返回{"error": ""} + - ResponseInfo 请求的Response信息 + """ + url = '{0}/v3/aps/{1}/unpublish'.format(self.host, apid) + return self.__post(url, args) + + def get_ap_port_healthcheck(self, apid, port): + """查看健康检查结果 + + 检查接入点的指定端口的后端健康状况。 + + Args: + - apid: 接入点ID + - port: 要设置的端口号 + + Returns: + 返回一个tuple对象,其格式为(, ) + - result 成功返回健康状况,失败返回{"error": ""} + - ResponseInfo 请求的Response信息 + """ + url = '{0}/v3/aps/{1}/{2}/healthcheck'.format(self.host, apid, port) + return self.__get(url) + + def set_ap_port_container(self, apid, port, args): + """调整后端实例配置 + + 调整接入点指定后端实例(容器)的配置,例如临时禁用流量等。 + + Args: + - apid: 接入点ID + - port: 要设置的端口号 + + Returns: + 返回一个tuple对象,其格式为(, ) + - result 成功返回空dict{},失败返回{"error": ""} + - ResponseInfo 请求的Response信息 + """ + url = '{0}/v3/aps/{1}/{2}/setcontainer'.format(self.host, apid, port) + return self.__post(url, args) + + def disable_ap_port(self, apid, port): + """临时关闭接入点端口 + + 临时关闭接入点端口,仅对公网域名,公网ip有效。 + + Args: + - apid: 接入点ID + - port: 要设置的端口号 + + Returns: + 返回一个tuple对象,其格式为(, ) + - result 成功返回空dict{},失败返回{"error": ""} + - ResponseInfo 请求的Response信息 + """ + url = '{0}/v3/aps/{1}/{2}/disable'.format(self.host, apid, port) + return self.__post(url) + + def enable_ap_port(self, apid, port): + """开启接入点端口 + + 开启临时关闭的接入点端口,仅对公网域名,公网ip有效。 + + Args: + - apid: 接入点ID + - port: 要设置的端口号 + + Returns: + 返回一个tuple对象,其格式为(, ) + - result 成功返回空dict{},失败返回{"error": ""} + - ResponseInfo 请求的Response信息 + """ + url = '{0}/v3/aps/{1}/{2}/enable'.format(self.host, apid, port) + return self.__post(url) + + def get_ap_providers(self): + """列出入口提供商 + + 列出当前支持的入口提供商,仅对申请公网IP模式接入点有效。 + 注:公网IP供应商telecom=电信,unicom=联通,mobile=移动。 + + Returns: + 返回一个tuple对象,其格式为(, ) + - result 成功返回接入商列表,失败返回{"error": ""} + - ResponseInfo 请求的Response信息 + """ + url = '{0}/v3/aps/providers'.format(self.host) + return self.__get(url) + + def get_web_proxy(self, backend): + """获取一次性代理地址 + + 对内网地址获取一个一次性的外部可访问的代理地址 + + Args: + - backend: 后端地址,如:"10.128.0.1:8080" + + Returns: + 返回一个tuple对象,其格式为(, ) + - result 成功返回代理地址信息,失败返回{"error": ""} + - ResponseInfo 请求的Response信息 + """ + url = '{0}/v3/webproxy'.format(self.host) + return self.__post(url, {'backend': backend}) + + def __post(self, url, data=None): + return http._post_with_qiniu_mac(url, data, self.auth) + + def __get(self, url, params=None): + return http._get_with_qiniu_mac(url, params, self.auth) + + def __delete(self, url): + return http._delete_with_qiniu_mac(url, None, self.auth) diff --git a/qiniu/services/pili/__init__.py b/qiniu/services/pili/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/qiniu/services/pili/rtc_server_manager.py b/qiniu/services/pili/rtc_server_manager.py new file mode 100644 index 00000000..ba12bcb1 --- /dev/null +++ b/qiniu/services/pili/rtc_server_manager.py @@ -0,0 +1,60 @@ +# -*- coding: utf-8 -*- +from qiniu import http, Auth +import json + + +class RtcServer(object): + """ + 直播连麦管理类 + 主要涉及了直播连麦管理及操作接口的实现,具体的接口规格可以参考官方文档 https://developer.qiniu.com + Attributes: + auth: 账号管理密钥对,Auth对象 + + """ + + def __init__(self, auth): + self.auth = auth + self.host = 'http://rtc.qiniuapi.com' + + def create_app(self, data): + return self.__post(self.host + '/v3/apps', data) + + def get_app(self, app_id=None): + if app_id: + return self.__get(self.host + '/v3/apps/%s' % app_id) + else: + return self.__get(self.host + '/v3/apps') + + def delete_app(self, app_id): + return self.__delete(self.host + '/v3/apps/%s' % app_id) + + def update_app(self, app_id, data): + return self.__post(self.host + '/v3/apps/%s' % app_id, data) + + def list_user(self, app_id, room_name): + return self.__get(self.host + '/v3/apps/%s/rooms/%s/users' % (app_id, room_name)) + + def kick_user(self, app_id, room_name, user_id): + return self.__delete(self.host + '/v3/apps/%s/rooms/%s/users/%s' % (app_id, room_name, user_id)) + + def list_active_rooms(self, app_id, room_name_prefix=None): + if room_name_prefix: + return self.__get(self.host + '/v3/apps/%s/rooms?prefix=%s' % (app_id, room_name_prefix)) + else: + return self.__get(self.host + '/v3/apps/%s/rooms' % app_id) + + def __post(self, url, data=None): + return http._post_with_qiniu_mac(url, data, self.auth) + + def __get(self, url, params=None): + return http._get_with_qiniu_mac(url, params, self.auth) + + def __delete(self, url, params=None): + return http._delete_with_qiniu_mac(url, params, self.auth) + + +def get_room_token(access_key, secret_key, room_access): + auth = Auth(access_key, secret_key) + room_access_str = json.dumps(room_access) + room_token = auth.token_with_data(room_access_str) + return room_token diff --git a/qiniu/services/processing/__init__.py b/qiniu/services/processing/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/qiniu/services/processing/cmd.py b/qiniu/services/processing/cmd.py new file mode 100644 index 00000000..6feaba74 --- /dev/null +++ b/qiniu/services/processing/cmd.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- + +from qiniu.utils import entry + + +def build_op(cmd, first_arg, **kwargs): + op = [cmd] + if first_arg is not None: + op.append(first_arg) + + for k, v in kwargs.items(): + op.append('{0}/{1}'.format(k, v)) + + return '/'.join(op) + + +def pipe_cmd(*cmds): + return '|'.join(cmds) + + +def op_save(op, bucket, key): + return pipe_cmd(op, 'saveas/' + entry(bucket, key)) diff --git a/qiniu/services/processing/pfop.py b/qiniu/services/processing/pfop.py new file mode 100644 index 00000000..4b2641e2 --- /dev/null +++ b/qiniu/services/processing/pfop.py @@ -0,0 +1,90 @@ +# -*- coding: utf-8 -*- + +from qiniu import config +from qiniu import http + + +class PersistentFop(object): + """持久化处理类 + + 该类用于主动触发异步持久化操作,具体规格参考: + https://developer.qiniu.com/dora/api/persistent-data-processing-pfop + + Attributes: + auth: 账号管理密钥对,Auth对象 + bucket: 操作资源所在空间 + pipeline: 多媒体处理队列,详见 https://developer.qiniu.com/dora/6499/tasks-and-workflows + notify_url: 持久化处理结果通知URL + """ + + def __init__(self, auth, bucket, pipeline=None, notify_url=None): + """初始化持久化处理类""" + self.auth = auth + self.bucket = bucket + self.pipeline = pipeline + self.notify_url = notify_url + + def execute(self, key, fops=None, force=None, persistent_type=None, workflow_template_id=None): + """ + 执行持久化处理 + + Parameters + ---------- + key: str + 待处理的源文件 + fops: list[str], optional + 处理详细操作,规格详见 https://developer.qiniu.com/dora/manual/1291/persistent-data-processing-pfop + 与 template_id 二选一 + force: int or str, optional + 强制执行持久化处理开关 + persistent_type: int or str, optional + 持久化处理类型,为 '1' 时开启闲时任务 + template_id: str, optional + 与 fops 二选一 + Returns + ------- + ret: dict + 持久化处理的 persistentId,类似 {"persistentId": 5476bedf7823de4068253bae}; + resp: ResponseInfo + """ + if not fops and not workflow_template_id: + raise ValueError('Must provide one of fops or template_id') + data = { + 'bucket': self.bucket, + 'key': key, + } + if self.pipeline: + data['pipeline'] = self.pipeline + if self.notify_url: + data['notifyURL'] = self.notify_url + if fops: + data['fops'] = ';'.join(fops) + if force == 1 or force == '1': + data['force'] = str(force) + if persistent_type and type(int(persistent_type)) is int: + data['type'] = str(persistent_type) + if workflow_template_id: + data['workflowTemplateID'] = workflow_template_id + + url = '{0}/pfop'.format(config.get_default('default_api_host')) + return http._post_with_auth(url, data, self.auth) + + def get_status(self, persistent_id): + """ + 获取持久化处理状态 + + Parameters + ---------- + persistent_id: str + + Returns + ------- + ret: dict + 持久化处理的状态,详见 https://developer.qiniu.com/dora/1294/persistent-processing-status-query-prefop + resp: ResponseInfo + """ + url = '{0}/status/get/prefop'.format(config.get_default('default_api_host')) + data = { + 'id': persistent_id + } + return http._get_with_auth(url, data, self.auth) diff --git a/qiniu/services/sms/__init__.py b/qiniu/services/sms/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/qiniu/services/sms/sms.py b/qiniu/services/sms/sms.py new file mode 100644 index 00000000..4c279083 --- /dev/null +++ b/qiniu/services/sms/sms.py @@ -0,0 +1,216 @@ +# -*- coding: utf-8 -*- + +from qiniu import http +import json + + +class Sms(object): + def __init__(self, auth): + self.auth = auth + self.server = 'https://sms.qiniuapi.com' + + def createSignature(self, signature, source, pics=None): + """ + *创建签名 + *signature: string类型,必填,【长度限制8个字符内】超过长度会报错 + *source: string类型,必填,申请签名时必须指定签名来源。取值范围为: + enterprises_and_institutions 企事业单位的全称或简称 + website 工信部备案网站的全称或简称 + app APP应用的全称或简称 + public_number_or_small_program 公众号或小程序的全称或简称 + store_name 电商平台店铺名的全称或简称 + trade_name 商标名的全称或简称 + *pics: 签名对应的资质证明图片进行 base64 编码格式转换后的字符串 + * @ return: 类型array + { + "signature_id": < signature_id > + } + """ + req = {} + req['signature'] = signature + req['source'] = source + if pics: + req['pics'] = pics + body = json.dumps(req) + url = '{0}/v1/signature'.format(self.server) + return self.__post(url, body) + + def querySignature(self, audit_status=None, page=1, page_size=20): + """ + 查询签名 + * audit_status: 审核状态 string 类型,可选,取值范围为: "passed"(通过), "rejected"(未通过), "reviewing"(审核中) + * page:页码 int 类型, + * page_size: 分页大小 int 类型,可选, 默认为20 + *@return: 类型array { + "items": [{ + "id": string, + "signature": string, + "source": string, + "audit_status": string, + "reject_reason": string, + "created_at": int64, + "updated_at": int64 + }...], + "total": int, + "page": int, + "page_size": int, + } + """ + url = '{0}/v1/signature'.format(self.server) + if audit_status: + url = '{0}?audit_status={1}&page={2}&page_size={3}'.format(url, audit_status, page, page_size) + else: + url = '{0}?page={1}&page_size={2}'.format(url, page, page_size) + return self.__get(url) + + def updateSignature(self, id, signature): + """ + 编辑签名 + * id 签名id : string 类型,必填, + * signature: string 类型,必填, + request 类型array { + "signature": string + } + :return: + """ + url = '{0}/v1/signature/{1}'.format(self.server, id) + req = {} + req['signature'] = signature + body = json.dumps(req) + return self.__put(url, body) + + def deleteSignature(self, id): + + """ + 删除辑签名 + * id 签名id : string 类型,必填, + * @retrun : 请求成功 HTTP 状态码为 200 + + """ + url = '{0}/v1/signature/{1}'.format(self.server, id) + return self.__delete(url) + + def createTemplate(self, name, template, type, description, signature_id): + """ + 创建模版 + :param name: 模板名称 string 类型 ,必填 + :param template: 模板内容 string 类型,必填 + :param type: 模板类型 string 类型,必填, + 取值范围为: notification (通知类短信), verification (验证码短信), marketing (营销类短信) + :param description: 申请理由简述 string 类型,必填 + :param signature_id: 已经审核通过的签名 string 类型,必填 + :return: 类型 array { + "template_id": string + } + """ + url = '{0}/v1/template'.format(self.server) + req = {} + req['name'] = name + req['template'] = template + req['type'] = type + req['description'] = description + req['signature_id'] = signature_id + body = json.dumps(req) + return self.__post(url, body) + + def queryTemplate(self, audit_status, page=1, page_size=20): + """ + 查询模版 + :param audit_status: 审核状态, 取值范围为: passed (通过), rejected (未通过), reviewing (审核中) + :param page: 页码。默认为 1 + :param page_size: 分页大小。默认为 20 + :return:{ + "items": [{ + "id": string, + "name": string, + "template": string, + "audit_status": string, + "reject_reason": string, + "type": string, + "signature_id": string, // 模版绑定的签名ID + "signature_text": string, // 模版绑定的签名内容 + "created_at": int64, + "updated_at": int64 + }...], + "total": int, + "page": int, + "page_size": int + } + """ + url = '{0}/v1/template'.format(self.server) + if audit_status: + url = '{0}?audit_status={1}&page={2}&page_size={3}'.format(url, audit_status, page, page_size) + else: + url = '{0}?page={1}&page_size={2}'.format(url, page, page_size) + return self.__get(url) + + def updateTemplate(self, id, name, template, description, signature_id): + """ + 更新模版 + :param id: template_id + :param name: 模板名称 string 类型 ,必填 + :param template: 模板内容 string 类型,必填 + :param description: 申请理由简述 string 类型,必填 + :param signature_id: 已经审核通过的签名 string 类型,必填 + :return: 请求成功 HTTP 状态码为 200 + """ + url = '{0}/v1/template/{1}'.format(self.server, id) + req = {} + req['name'] = name + req['template'] = template + req['description'] = description + req['signature_id'] = signature_id + body = json.dumps(req) + return self.__put(url, body) + + def deleteTemplate(self, id): + """ + 删除模版 + :param id: template_id + :return: 请求成功 HTTP 状态码为 200 + """ + url = '{0}/v1/template/{1}'.format(self.server, id) + return self.__delete(url) + + def sendMessage(self, template_id, mobiles, parameters): + """ + 发送短信 + :param template_id: 模板 ID + :param mobiles: 手机号 + :param parameters: 自定义魔法变量,变量设置在创建模板时,参数template指定 + :return:{ + "job_id": string + } + """ + url = '{0}/v1/message'.format(self.server) + req = {} + req['template_id'] = template_id + req['mobiles'] = mobiles + req['parameters'] = parameters + body = json.dumps(req) + return self.__post(url, body) + + def get_messages_info(self): + """ + 查询发送记录,文档:https://developer.qiniu.com/sms/api/5852/query-send-sms + :return: + {} + """ + url = "{0}/v1/messages".format(self.server) + return self.__get(url) + + def __post(self, url, data=None): + headers = {'Content-Type': 'application/json'} + return http._post_with_qiniu_mac_and_headers(url, data, self.auth, headers) + + def __get(self, url, params=None): + headers = {'Content-Type': 'application/x-www-form-urlencoded'} + return http._get_with_qiniu_mac_and_headers(url, params, self.auth, headers) + + def __put(self, url, data=None): + headers = {'Content-Type': 'application/json'} + return http._put_with_qiniu_mac_and_headers(url, data, self.auth, headers) + + def __delete(self, url, data=None): + headers = {'Content-Type': 'application/json'} + return http._delete_with_qiniu_mac_and_headers(url, data, self.auth, headers) diff --git a/qiniu/services/storage/__init__.py b/qiniu/services/storage/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/qiniu/services/storage/_bucket_default_retrier.py b/qiniu/services/storage/_bucket_default_retrier.py new file mode 100644 index 00000000..70758e30 --- /dev/null +++ b/qiniu/services/storage/_bucket_default_retrier.py @@ -0,0 +1,25 @@ +from qiniu.http.endpoints_retry_policy import EndpointsRetryPolicy +from qiniu.http.regions_retry_policy import RegionsRetryPolicy +from qiniu.retry import Retrier + + +def get_default_retrier( + regions_provider, + service_names, + preferred_endpoints_provider=None, +): + if not service_names: + raise ValueError('service_names should not be empty') + + retry_policies = [ + EndpointsRetryPolicy( + skip_init_context=True + ), + RegionsRetryPolicy( + regions_provider=regions_provider, + service_names=service_names, + preferred_endpoints_provider=preferred_endpoints_provider + ) + ] + + return Retrier(retry_policies) diff --git a/qiniu/services/storage/bucket.py b/qiniu/services/storage/bucket.py new file mode 100644 index 00000000..5e21b6c4 --- /dev/null +++ b/qiniu/services/storage/bucket.py @@ -0,0 +1,910 @@ +# -*- coding: utf-8 -*- +from qiniu import config, QiniuMacAuth +from qiniu import http +from qiniu.utils import urlsafe_base64_encode, entry, decode_entry +from qiniu.http.endpoint import Endpoint +from qiniu.http.region import Region, ServiceName +from qiniu.http.regions_provider import get_default_regions_provider + +from ._bucket_default_retrier import get_default_retrier + + +class BucketManager(object): + """空间管理类 + + 主要涉及了空间资源管理及批量操作接口的实现,具体的接口规格可以参考: + https://developer.qiniu.com/kodo/1274/rs + + Attributes: + auth: 账号管理密钥对,Auth对象 + """ + + def __init__( + self, + auth, + zone=None, + regions=None, + query_regions_endpoints=None, + preferred_scheme='http' + ): + """ + Parameters + ---------- + auth: Auth + zone: LegacyRegion + regions: list[Region] + query_regions_endpoints: list[Endpoint] + preferred_scheme: str, default='http' + """ + self.auth = auth + self.mac_auth = QiniuMacAuth( + auth.get_access_key(), + auth.get_secret_key(), + auth.disable_qiniu_timestamp_signature) + + if zone is None: + self.zone = config.get_default('default_zone') + else: + self.zone = zone + + self.regions = regions + self.query_regions_endpoints = query_regions_endpoints + self.preferred_scheme = preferred_scheme + + def list(self, bucket, prefix=None, marker=None, limit=None, delimiter=None): + """前缀查询: + + 1. 首次请求 marker = None + 2. 无论 err 值如何,均应该先看 ret.get('items') 是否有内容 + 3. 如果后续没有更多数据,err 返回 EOF,marker 返回 None(但不通过该特征来判断是否结束) + 具体规格参考: + https://developer.qiniu.com/kodo/api/list + + Args: + bucket: 空间名 + prefix: 列举前缀 + marker: 列举标识符 + limit: 单次列举个数限制 + delimiter: 指定目录分隔符 + + Returns: + 一个dict变量,类似 {"hash": "", "key": ""} + 一个ResponseInfo对象 + 一个EOF信息。 + """ + options = { + 'bucket': bucket, + } + if marker is not None: + options['marker'] = marker + if limit is not None: + options['limit'] = limit + if prefix is not None: + options['prefix'] = prefix + if delimiter is not None: + options['delimiter'] = delimiter + + ret, info = self.__server_do_with_retrier( + bucket, + [ServiceName.RSF], + '/list', + data=options, + method='GET' + ) + + eof = False + if ret and not ret.get('marker'): + eof = True + + return ret, eof, info + + def list_domains(self, bucket): + """获取 Bucket 空间域名 + https://developer.qiniu.com/kodo/3949/get-the-bucket-space-domain + + Args: + bucket: 空间名 + + Returns: + resBody, respInfo + resBody 为绑定的域名列表,格式:["example.com"] + """ + return self.__uc_do_with_retrier('/v2/domains?tbl={0}'.format(bucket)) + + def stat(self, bucket, key): + """获取文件信息: + + 获取资源的元信息,但不返回文件内容,具体规格参考: + https://developer.qiniu.com/kodo/api/1308/stat + + Args: + bucket: 待获取信息资源所在的空间 + key: 待获取资源的文件名 + + Returns: + 一个dict变量,类似: + { + "fsize": 5122935, + "hash": "ljfockr0lOil_bZfyaI2ZY78HWoH", + "mimeType": "application/octet-stream", + "putTime": 13603956734587420 + "type": 0 + } + 一个ResponseInfo对象 + """ + resource = entry(bucket, key) + return self.__server_do_with_retrier( + bucket, + [ServiceName.RS], + '/stat/{0}'.format(resource) + ) + + def delete(self, bucket, key): + """删除文件: + + 删除指定资源,具体规格参考: + https://developer.qiniu.com/kodo/api/delete + + Args: + bucket: 待获取信息资源所在的空间 + key: 待获取资源的文件名 + + Returns: + 一个dict变量,成功返回NULL,失败返回{"error": ""} + 一个ResponseInfo对象 + """ + resource = entry(bucket, key) + return self.__server_do_with_retrier( + bucket, + [ServiceName.RS], + '/delete/{0}'.format(resource) + ) + + def rename(self, bucket, key, key_to, force='false'): + """重命名文件: + + 给资源进行重命名,本质为move操作。 + + Args: + bucket: 待操作资源所在空间 + key: 待操作资源文件名 + key_to: 目标资源文件名 + force: 是否强制覆盖 + + Returns: + 一个dict变量,成功返回NULL,失败返回{"error": ""} + 一个ResponseInfo对象 + """ + return self.move(bucket, key, bucket, key_to, force) + + def move(self, bucket, key, bucket_to, key_to, force='false'): + """移动文件: + + 将资源从一个空间到另一个空间,具体规格参考: + https://developer.qiniu.com/kodo/api/move + + Args: + bucket: 待操作资源所在空间 + bucket_to: 目标资源空间名 + key: 待操作资源文件名 + key_to: 目标资源文件名 + force: 是否强制覆盖 + + Returns: + 一个dict变量,成功返回NULL,失败返回{"error": ""} + 一个ResponseInfo对象 + """ + src = entry(bucket, key) + dst = entry(bucket_to, key_to) + return self.__server_do_with_retrier( + bucket, + [ServiceName.RS], + '/move/{src}/{dst}/force/{force}'.format( + src=src, + dst=dst, + force=force + ) + ) + + def copy(self, bucket, key, bucket_to, key_to, force='false'): + """复制文件: + + 将指定资源复制为新命名资源,具体规格参考: + https://developer.qiniu.com/kodo/api/copy + + Args: + bucket: 待操作资源所在空间 + bucket_to: 目标资源空间名 + key: 待操作资源文件名 + key_to: 目标资源文件名 + force: 是否强制覆盖 + + Returns: + 一个dict变量,成功返回NULL,失败返回{"error": ""} + 一个ResponseInfo对象 + """ + src = entry(bucket, key) + dst = entry(bucket_to, key_to) + return self.__server_do_with_retrier( + bucket, + [ServiceName.RS], + '/copy/{src}/{dst}/force/{force}'.format( + src=src, + dst=dst, + force=force + ) + ) + + def fetch(self, url, bucket, key=None, hostscache_dir=None): + """抓取文件: + 从指定URL抓取资源,并将该资源存储到指定空间中,具体规格参考: + https://developer.qiniu.com/kodo/api/fetch + + Args: + url: 指定的URL + bucket: 目标资源空间 + key: 目标资源文件名 + hostscache_dir: deprecated, 此参数不再生效,可修改 get_default_regions_provider 返回对象的属性达成同样功能; + 查询区域缓存文件保存位置 + + Returns: + 一个dict变量: + 成功 返回{'fsize': , 'hash': , 'key': , 'mimeType': } + 失败 返回 None + 一个ResponseInfo对象 + """ + resource = urlsafe_base64_encode(url) + to = entry(bucket, key) + return self.__server_do_with_retrier( + bucket, + [ServiceName.IO], + '/fetch/{0}/to/{1}'.format(resource, to) + ) + + def prefetch(self, bucket, key, hostscache_dir=None): + """镜像回源预取文件: + + 从镜像源站抓取资源到空间中,如果空间中已经存在,则覆盖该资源,具体规格参考 + https://developer.qiniu.com/kodo/api/prefetch + + Args: + bucket: 待获取资源所在的空间 + key: 代获取资源文件名 + hostscache_dir: deprecated, 此参数不再生效,可修改 get_default_regions_provider 返回对象的属性达成同样功能; + 查询区域缓存文件保存位置 + + Returns: + 一个dict变量,成功返回NULL,失败返回{"error": ""} + 一个ResponseInfo对象 + """ + resource = entry(bucket, key) + return self.__server_do_with_retrier( + bucket, + [ServiceName.IO], + '/prefetch/{0}'.format(resource) + ) + + def change_mime(self, bucket, key, mime): + """修改文件mimeType: + + 主动修改指定资源的文件类型,具体规格参考: + https://developer.qiniu.com/kodo/api/chgm + + Args: + bucket: 待操作资源所在空间 + key: 待操作资源文件名 + mime: 待操作文件目标mimeType + """ + resource = entry(bucket, key) + encode_mime = urlsafe_base64_encode(mime) + return self.__server_do_with_retrier( + bucket, + [ServiceName.RS], + '/chgm/{0}/mime/{1}'.format(resource, encode_mime) + ) + + def change_type(self, bucket, key, storage_type): + """修改文件的存储类型 + + 修改文件的存储类型,参考文档: + https://developer.qiniu.com/kodo/3710/chtype + + Args: + bucket: 待操作资源所在空间 + key: 待操作资源文件名 + storage_type: 待操作资源存储类型,0为普通存储,1为低频存储,2 为归档存储,3 为深度归档,4 为归档直读存储 + """ + resource = entry(bucket, key) + return self.__server_do_with_retrier( + bucket, + [ServiceName.RS], + '/chtype/{0}/type/{1}'.format(resource, storage_type) + ) + + def restoreAr(self, bucket, key, freezeAfter_days): + """ + restore_ar 的别名,用于兼容旧版本 + + Args: + bucket: 待操作资源所在空间 + key: 待操作资源文件名 + freezeAfter_days: 解冻有效时长,取值范围 1~7 + """ + return self.restore_ar( + bucket, + key, + freezeAfter_days + ) + + def restore_ar(self, bucket, key, freeze_after_days): + """ + 解冻归档存储、深度归档存储文件 + + 对归档存储、深度归档存储文件,进行解冻操作参考文档: + https://developer.qiniu.com/kodo/6380/restore-archive + + Parameters + ---------- + bucket: str + key: str + freeze_after_days: int + + Returns + ------- + ret: dict + resp: ResponseInfo + """ + + resource = entry(bucket, key) + return self.__server_do_with_retrier( + bucket, + [ServiceName.RS], + '/restoreAr/{0}/freezeAfterDays/{1}'.format(resource, freeze_after_days) + ) + + def change_status(self, bucket, key, status, cond): + """修改文件的状态 + + 修改文件的存储类型为可用或禁用: + + Args: + bucket: 待操作资源所在空间 + key: 待操作资源文件名 + status: 待操作资源存储类型,0为启用,1为禁用 + """ + resource = entry(bucket, key) + url_resource = '/chstatus/{0}/status/{1}'.format(resource, status) + if cond and isinstance(cond, dict): + condstr = urlsafe_base64_encode( + '&'.join( + '='.join([k, v]) + for k, v in cond.items() + ) + ) + url_resource += '/cond/{0}'.format(condstr) + return self.__server_do_with_retrier( + bucket, + [ServiceName.RS], + url_resource + ) + + def set_object_lifecycle( + self, + bucket, + key, + to_line_after_days=0, + to_archive_after_days=0, + to_deep_archive_after_days=0, + delete_after_days=0, + cond=None, + to_archive_ir_after_days=0 + ): + """ + + 设置对象的生命周期 + + Args: + bucket: 目标空间 + key: 目标资源 + to_line_after_days: 多少天后将文件转为低频存储,设置为 -1 表示取消已设置的转低频存储的生命周期规则, 0 表示不修改转低频生命周期规则。 + to_archive_after_days: 多少天后将文件转为归档存储,设置为 -1 表示取消已设置的转归档存储的生命周期规则, 0 表示不修改转归档生命周期规则。 + to_deep_archive_after_days: 多少天后将文件转为深度归档存储,设置为 -1 表示取消已设置的转深度归档存储的生命周期规则, 0 表示不修改转深度归档生命周期规则 + delete_after_days: 多少天后将文件删除,设置为 -1 表示取消已设置的删除存储的生命周期规则, 0 表示不修改删除存储的生命周期规则。 + cond: 匹配条件,只有条件匹配才会设置成功,当前支持设置 hash、mime、fsize、putTime。 + to_archive_ir_after_days: 多少天后将文件转为归档直读存储,设置为 -1 表示取消已设置的转归档只读存储的生命周期规则, 0 表示不修改转归档只读存储生命周期规则。 + + Returns: + resBody, respInfo + + """ + options = [ + 'toIAAfterDays', str(to_line_after_days), + 'toArchiveIRAfterDays', str(to_archive_ir_after_days), + 'toArchiveAfterDays', str(to_archive_after_days), + 'toDeepArchiveAfterDays', str(to_deep_archive_after_days), + 'deleteAfterDays', str(delete_after_days) + ] + if cond and isinstance(cond, dict): + cond_str = '&'.join( + '='.join([k, v]) + for k, v in cond.items() + ) + options += ['cond', urlsafe_base64_encode(cond_str)] + resource = entry(bucket, key) + return self.__server_do_with_retrier( + bucket, + service_names=[ServiceName.RS], + url_resource='/lifecycle/{0}/{1}'.format(resource, '/'.join(options)), + ) + + def batch(self, operations): + """批量操作: + + 在单次请求中进行多个资源管理操作,具体规格参考: + https://developer.qiniu.com/kodo/api/batch + + Args: + operations: 资源管理操作数组,可通过 + + Returns: + 一个dict变量,返回结果类似: + [ + { "code": , "data": }, + { "code": }, + { "code": }, + { "code": }, + { "code": , "data": { "error": "" } }, + ... + ] + 一个ResponseInfo对象 + """ + if not operations: + # change to ValueError when make break changes version + raise Exception('operations is empty') + bucket = '' + for op in operations: + segments = op.split('/') + e = segments[1] if len(segments) >= 2 else '' + bucket, _ = decode_entry(e) + if bucket: + break + if not bucket: + # change to ValueError when make break changes version + raise Exception('bucket is empty') + + return self.__server_do_with_retrier( + bucket, + [ServiceName.RS], + '/batch', + {'op': operations} + ) + + def buckets(self): + """获取所有空间名: + + 获取指定账号下所有的空间名。 + + Returns: + 一个dict变量,类似: + [ , , ... ] + 一个ResponseInfo对象 + """ + return self.__uc_do_with_retrier('/buckets') + + def delete_after_days(self, bucket, key, days): + """更新文件生命周期 + + Returns: + 一个dict变量,返回结果类似: + [ + { "code": , "data": }, + { "code": }, + { "code": }, + { "code": }, + { "code": , "data": { "error": "" } }, + ... + ] + 一个ResponseInfo对象 + Args: + bucket: 目标资源空间 + key: 目标资源文件名 + days: 指定天数 + """ + resource = entry(bucket, key) + return self.__server_do_with_retrier( + bucket, + [ServiceName.RS], + '/deleteAfterDays/{0}/{1}'.format(resource, days) + ) + + def mkbucketv3(self, bucket_name, region): + """ + 创建存储空间,全局唯一,其他账号有同名空间就无法创建 + + Args: + bucket_name: 存储空间名 + region: 存储区域 + """ + return self.__uc_do_with_retrier( + '/mkbucketv3/{0}/region/{1}'.format(bucket_name, region) + ) + + def list_bucket(self, region): + """ + 列举存储空间列表 + + Args: + """ + return self.__uc_do_with_retrier('/v3/buckets?region={0}'.format(region)) + + def bucket_info(self, bucket_name): + """ + 获取存储空间信息 + + Args: + bucket_name: 存储空间名 + """ + return self.__uc_do_with_retrier('/v2/bucketInfo?bucket={0}'.format(bucket_name)) + + def bucket_domain(self, bucket_name): + """ + 获取存储空间域名列表 + Args: + bucket_name: 存储空间名 + """ + return self.list_domains(bucket_name) + + def change_bucket_permission(self, bucket_name, private): + """ + 设置 存储空间访问权限 + https://developer.qiniu.com/kodo/api/3946/set-bucket-private + Args: + bucket_name: 存储空间名 + private: 0 公开;1 私有 ,str类型 + """ + return self.__uc_do_with_retrier( + '/private?bucket={0}&private={1}'.format(bucket_name, private) + ) + + def _get_regions_provider(self, bucket_name): + """ + Parameters + ---------- + bucket_name: str + + Returns + ------- + Iterable[Region] + """ + if self.regions: + return self.regions + + # handle compatibility for legacy config + if self.zone and any( + hasattr(self.zone, attr_name) and getattr(self.zone, attr_name) + for attr_name in [ + 'io_host', + 'rs_host', + 'rsf_host', + 'api_host' + ] + ): + return [self.zone] + + # handle compatibility for default_query_region_host + query_regions_endpoints = self.query_regions_endpoints + if not query_regions_endpoints: + query_region_host = config.get_default('default_query_region_host') + query_region_backup_hosts = config.get_default('default_query_region_backup_hosts') + query_regions_endpoints = [ + Endpoint.from_host(h) + for h in [query_region_host] + query_region_backup_hosts + ] + + return get_default_regions_provider( + query_endpoints_provider=query_regions_endpoints, + access_key=self.auth.get_access_key(), + bucket_name=bucket_name, + preferred_scheme=self.preferred_scheme + ) + + def __uc_do_with_retrier(self, url_resource, data=None): + """ + Parameters + ---------- + url_resource: url + data: dict or None + + Returns + ------- + ret: dict or None + resp: ResponseInfo + """ + regions = self.regions + + # ignore self.zone by no uc in it + # handle compatibility for default_uc + if not regions: + uc_host = config.get_default('default_uc_host') + uc_backup_hosts = config.get_default('default_uc_backup_hosts') + uc_endpoints = [ + Endpoint.from_host(h) + for h in [uc_host] + uc_backup_hosts + ] + regions = [Region(services={ServiceName.UC: uc_endpoints})] + + retrier = get_default_retrier( + regions_provider=regions, + service_names=[ServiceName.UC] + ) + + attempt = None + for attempt in retrier: + with attempt: + host = attempt.context.get('endpoint').get_value(scheme=self.preferred_scheme) + url = host + url_resource + attempt.result = self.__post(url, data) + ret, resp = attempt.result + if resp.ok() and ret: + return attempt.result + if not resp.need_retry(): + return attempt.result + + if attempt is None: + raise RuntimeError('Retrier is not working. attempt is None') + + return attempt.result + + def __server_do_with_retrier(self, bucket_name, service_names, url_resource, data=None, method='POST'): + """ + Parameters + ---------- + bucket_name: str + service_names: List[ServiceName] + url_resource: str + data: dict or None + method: str + + Returns + ------- + ret: dict or None + resp: ResponseInfo + """ + if not service_names: + raise ValueError('service_names is empty') + + retrier = get_default_retrier( + regions_provider=self._get_regions_provider(bucket_name=bucket_name), + service_names=service_names + ) + + method = method.upper() + if method == 'POST': + send_request = self.__post + elif method == 'GET': + send_request = self.__get + else: + raise ValueError('"method" must be "POST" or "GET"') + + attempt = None + for attempt in retrier: + with attempt: + host = attempt.context.get('endpoint').get_value(scheme=self.preferred_scheme) + url = host + url_resource + attempt.result = send_request(url, data) + ret, resp = attempt.result + if resp.ok() and ret: + return attempt.result + if not resp.need_retry(): + return attempt.result + + if attempt is None: + raise RuntimeError('Retrier is not working. attempt is None') + + return attempt.result + + def __post(self, url, data=None): + return http._post_with_qiniu_mac(url, data, self.mac_auth) + + def __get(self, url, params=None): + return http._get_with_qiniu_mac(url, params, self.mac_auth) + + +def _build_op(*args): + return '/'.join(map(str, args)) + + +def build_batch_copy(source_bucket, key_pairs, target_bucket, force='false'): + """ + Parameters + ---------- + source_bucket: str + key_pairs: dict + target_bucket: str + force: str + + Returns + ------- + list[str] + """ + return _two_key_batch('copy', source_bucket, key_pairs, target_bucket, force) + + +def build_batch_rename(bucket, key_pairs, force='false'): + """ + Parameters + ---------- + bucket: str + key_pairs: dict + force: str + + Returns + ------- + list[str] + """ + return build_batch_move(bucket, key_pairs, bucket, force) + + +def build_batch_move(source_bucket, key_pairs, target_bucket, force='false'): + """ + Parameters + ---------- + source_bucket: str + key_pairs: dict + target_bucket: str + force: str + + Returns + ------- + list[str] + """ + return _two_key_batch('move', source_bucket, key_pairs, target_bucket, force) + + +def build_batch_restoreAr(bucket, keys): + """ + alias for build_batch_restore_ar for compatibility with old version + + Parameters + ---------- + bucket: str + keys: dict + + Returns + ------- + list[str] + """ + return build_batch_restore_ar(bucket, keys) + + +def build_batch_restore_ar(bucket, keys): + """ + Parameters + ---------- + bucket: str + keys: dict + + Returns + ------- + list[str] + """ + keys = { + k: ['freezeAfterDays', v] + for k, v in keys.items() + } + return _one_key_batch('restoreAr', bucket, keys) + + +def build_batch_delete(bucket, keys): + """ + Parameters + ---------- + bucket: str + keys: list[str] + + Returns + ------- + list[str] + """ + return _one_key_batch('delete', bucket, keys) + + +def build_batch_stat(bucket, keys): + """ + Parameters + ---------- + bucket: str + keys: list[str] + + Returns + ------- + list[str] + """ + return _one_key_batch('stat', bucket, keys) + + +def _one_key_batch(operation, bucket, keys): + """ + Parameters + ---------- + operation: str + bucket: str + keys: list[str] or dict + + Returns + ------- + list[str] + """ + # use functools.singledispatch to refactor when min version of python >= 3.4 + if isinstance(keys, list): + return [ + _build_op( + operation, + entry(bucket, key), + ) + for key in keys + ] + elif isinstance(keys, dict): + return [ + _build_op( + operation, + entry(bucket, key), + *opts + ) + for key, opts in keys.items() + ] + else: + raise TypeError('"keys" only support list or dict') + + +def _two_key_batch(operation, source_bucket, key_pairs, target_bucket=None, force='false'): + """ + + Parameters + ---------- + operation: str + source_bucket: str + key_pairs: dict + target_bucket: str + force: str + + Returns + ------- + list[str] + """ + if target_bucket is None: + target_bucket = source_bucket + return _one_key_batch( + operation, + source_bucket, + { + src_key: [ + entry(target_bucket, dst_key), + 'force', + force + ] + for src_key, dst_key in key_pairs.items() + } + ) + + +def _three_key_batch(operation, bucket, keys): + """ + .. deprecated: Use `_one_key_batch` instead. + `keys` could be `{key: [freezeAfterDays, days]}` + + Parameters + ---------- + operation: str + bucket: str + keys: dict + + Returns + ------- + list[str] + """ + keys = { + k: ['freezeAfterDays', v] + for k, v in keys.items() + } + return _one_key_batch(operation, bucket, keys) diff --git a/qiniu/services/storage/legacy.py b/qiniu/services/storage/legacy.py new file mode 100644 index 00000000..db6f0979 --- /dev/null +++ b/qiniu/services/storage/legacy.py @@ -0,0 +1,354 @@ +# -*- coding: utf-8 -*- +import hashlib +import os +import time + +from qiniu import config, http +from qiniu.auth import Auth +from qiniu.compat import json +from qiniu.utils import _file_iter, crc32, rfc_from_timestamp, urlsafe_base64_encode + +from qiniu.services.storage.upload_progress_recorder import UploadProgressRecorder + + +class _Resume(object): + """deprecated 断点续上传类 + + 该类主要实现了分块上传,断点续上,以及相应地创建块和创建文件过程,详细规格参考: + https://developer.qiniu.com/kodo/api/mkblk + https://developer.qiniu.com/kodo/api/mkfile + + Attributes: + up_token: 上传凭证 + key: 上传文件名 + input_stream: 上传二进制流 + data_size: 上传流大小 + params: 自定义变量,规格参考 https://developer.qiniu.com/kodo/manual/vars#xvar + mime_type: 上传数据的mimeType + progress_handler: 上传进度 + upload_progress_recorder: 记录上传进度,用于断点续传 + modify_time: 上传文件修改日期 + hostscache_dir: host请求 缓存文件保存位置 + version 分片上传版本 目前支持v1/v2版本 默认v1 + part_size 分片上传v2必传字段 分片大小范围为1 MB - 1 GB + bucket_name 分片上传v2字段 空间名称 + """ + + def __init__(self, up_token, key, input_stream, file_name, data_size, hostscache_dir, params, mime_type, + progress_handler, upload_progress_recorder, modify_time, keep_last_modified, part_size=None, + version=None, bucket_name=None, metadata=None): + """初始化断点续上传""" + self.up_token = up_token + self.key = key + self.input_stream = input_stream + self.file_name = file_name + self.size = data_size + self.hostscache_dir = hostscache_dir + self.blockStatus = [] + self.params = params + self.mime_type = mime_type + self.progress_handler = progress_handler + self.upload_progress_recorder = upload_progress_recorder or UploadProgressRecorder() + self.modify_time = modify_time or time.time() + self.keep_last_modified = keep_last_modified + self.version = version or 'v1' + self.part_size = part_size or config._BLOCK_SIZE + self.bucket_name = bucket_name + self.metadata = metadata + + def record_upload_progress(self, offset): + record_data = { + 'size': self.size, + 'offset': offset, + } + if self.version == 'v1': + record_data['contexts'] = [ + { + 'ctx': block['ctx'], + 'expired_at': block['expired_at'] if 'expired_at' in block else 0 + } for block in self.blockStatus + ] + elif self.version == 'v2': + record_data['etags'] = self.blockStatus + record_data['expired_at'] = self.expiredAt + record_data['upload_id'] = self.uploadId + if self.modify_time: + record_data['modify_time'] = self.modify_time + self.upload_progress_recorder.set_upload_record(self.file_name, self.key, record_data) + + def recovery_from_record(self): + record = self.upload_progress_recorder.get_upload_record(self.file_name, self.key) + if not record: + if self.version == 'v1': + return 0 + elif self.version == 'v2': + return 0, None, None + try: + if not record['modify_time'] or record['size'] != self.size or \ + record['modify_time'] != self.modify_time: + if self.version == 'v1': + return 0 + elif self.version == 'v2': + return 0, None, None + except KeyError: + if self.version == 'v1': + return 0 + elif self.version == 'v2': + return 0, None, None + if self.version == 'v1': + if not record.__contains__('contexts') or len(record['contexts']) == 0: + return 0 + self.blockStatus = [ + # 兼容旧版本的 ctx 持久化 ≤v7.10.0 + ctx if type(ctx) is dict else {'ctx': ctx, 'expired_at': 0} + for ctx in record['contexts'] + ] + return record['offset'] + elif self.version == 'v2': + if not record.__contains__('etags') or len(record['etags']) == 0 or \ + not record.__contains__('expired_at') or float(record['expired_at']) < time.time() or \ + not record.__contains__('upload_id'): + return 0, None, None + self.blockStatus = record['etags'] + return record['offset'], record['upload_id'], record['expired_at'] + + def upload(self): + """上传操作""" + if self.version == 'v1': + return self._upload_v1() + elif self.version == 'v2': + return self._upload_v2() + else: + raise ValueError("version must choose v1 or v2 !") + + def _upload_v1(self): + self.blockStatus = [] + self.recovery_index = 1 + self.expiredAt = None + self.uploadId = None + self.get_bucket() + self.part_size = config._BLOCK_SIZE + + host = self.get_up_host() + offset = self.recovery_from_record() + is_resumed = offset > 0 + + # 检查原来的分片是否过期,如有则重传该分片 + for index, block_status in enumerate(self.blockStatus): + if block_status.get('expired_at', 0) > time.time(): + self.input_stream.seek(self.part_size, os.SEEK_CUR) + else: + block = self.input_stream.read(self.part_size) + response, ok = self._make_block_with_retry(block, host) + ret, info = response + if not ok: + return ret, info + self.blockStatus[index] = ret + self.record_upload_progress(offset) + + # 从断点位置上传 + for block in _file_iter(self.input_stream, self.part_size, offset): + length = len(block) + response, ok = self._make_block_with_retry(block, host) + ret, info = response + if not ok: + return ret, info + + self.blockStatus.append(ret) + offset += length + self.record_upload_progress(offset) + if callable(self.progress_handler): + self.progress_handler(((len(self.blockStatus) - 1) * self.part_size) + len(block), self.size) + + ret, info = self.make_file(host) + if info.status_code == 200 or info.status_code == 701: + self.upload_progress_recorder.delete_upload_record(self.file_name, self.key) + if info.status_code == 701 and is_resumed: + return self.upload() + return ret, info + + def _upload_v2(self): + self.blockStatus = [] + self.recovery_index = 1 + self.expiredAt = None + self.uploadId = None + self.get_bucket() + host = self.get_up_host() + + offset, self.uploadId, self.expiredAt = self.recovery_from_record() + is_resumed = False + if offset > 0 and self.blockStatus != [] and self.uploadId is not None \ + and self.expiredAt is not None: + self.recovery_index = self.blockStatus[-1]['partNumber'] + 1 + is_resumed = True + else: + self.recovery_index = 1 + init_url = self.block_url_v2(host, self.bucket_name) + self.uploadId, self.expiredAt = self.init_upload_task(init_url) + + for index, block in enumerate(_file_iter(self.input_stream, self.part_size, offset)): + length = len(block) + index_ = index + self.recovery_index + url = self.block_url_v2(host, self.bucket_name) + '/%s/%d' % (self.uploadId, index_) + ret, info = self.make_block_v2(block, url) + if info.status_code == 612: + self.upload_progress_recorder.delete_upload_record(self.file_name, self.key) + if info.status_code == 612 and is_resumed: + return self.upload() + if ret is None and not info.need_retry(): + return ret, info + if info.connect_failed(): + if config.get_default('default_zone').up_host_backup: + host = config.get_default('default_zone').up_host_backup + else: + host = config.get_default('default_zone')\ + .get_up_host_backup_by_token(self.up_token, self.hostscache_dir) + + if info.need_retry(): + url = self.block_url_v2(host, self.bucket_name) + '/%s/%d' % (self.uploadId, index + 1) + ret, info = self.make_block_v2(block, url) + if info.status_code == 612: + self.upload_progress_recorder.delete_upload_record(self.file_name, self.key) + if info.status_code == 612 and is_resumed: + return self.upload() + if ret is None: + return ret, info + del ret['md5'] + ret['partNumber'] = index_ + self.blockStatus.append(ret) + offset += length + self.record_upload_progress(offset) + if callable(self.progress_handler): + self.progress_handler(((len(self.blockStatus) - 1) * self.part_size) + len(block), self.size) + + make_file_url = self.block_url_v2(host, self.bucket_name) + '/%s' % self.uploadId + ret, info = self.make_file_v2( + self.blockStatus, + make_file_url, + self.file_name, + self.mime_type, + self.params, + self.metadata) + if info.status_code == 200 or info.status_code == 612: + self.upload_progress_recorder.delete_upload_record(self.file_name, self.key) + if info.status_code == 612 and is_resumed: + return self.upload() + return ret, info + + def make_file_v2(self, block_status, url, file_name=None, mime_type=None, customVars=None, metadata=None): + """completeMultipartUpload""" + parts = self.get_parts(block_status) + headers = { + 'Content-Type': 'application/json', + } + data = { + 'parts': parts, + 'fname': file_name, + 'mimeType': mime_type, + 'customVars': customVars, + 'metadata': metadata + } + return self.post_with_headers(url, json.dumps(data), headers=headers) + + def get_up_host(self): + if config.get_default('default_zone').up_host: + host = config.get_default('default_zone').up_host + else: + host = config.get_default('default_zone').get_up_host_by_token(self.up_token, self.hostscache_dir) + return host + + def _make_block_with_retry(self, block_data, up_host): + length = len(block_data) + crc = crc32(block_data) + ret, info = self.make_block(block_data, length, up_host) + if ret is None and not info.need_retry(): + return (ret, info), False + if info.connect_failed(): + if config.get_default('default_zone').up_host_backup: + up_host = config.get_default('default_zone').up_host_backup + else: + up_host = config.get_default('default_zone') \ + .get_up_host_backup_by_token(self.up_token, self.hostscache_dir) + if info.need_retry() or crc != ret['crc32']: + ret, info = self.make_block(block_data, length, up_host) + if ret is None or crc != ret['crc32']: + return (ret, info), False + return (ret, info), True + + def make_block(self, block, block_size, host): + """创建块""" + url = self.block_url(host, block_size) + return self.post(url, block) + + def make_block_v2(self, block, url): + headers = { + 'Content-Type': 'application/octet-stream', + 'Content-MD5': hashlib.md5(block).hexdigest(), + } + return self.put(url, block, headers) + + def block_url(self, host, size): + return '{0}/mkblk/{1}'.format(host, size) + + def block_url_v2(self, host, bucket_name): + encoded_object_name = urlsafe_base64_encode(self.key) if self.key is not None else '~' + return '{0}/buckets/{1}/objects/{2}/uploads'.format(host, bucket_name, encoded_object_name) + + def file_url(self, host): + url = ['{0}/mkfile/{1}'.format(host, self.size)] + if self.mime_type: + url.append('mimeType/{0}'.format(urlsafe_base64_encode(self.mime_type))) + + if self.key is not None: + url.append('key/{0}'.format(urlsafe_base64_encode(self.key))) + + if self.file_name is not None: + url.append('fname/{0}'.format(urlsafe_base64_encode(self.file_name))) + + if self.params: + for k, v in self.params.items(): + url.append('{0}/{1}'.format(k, urlsafe_base64_encode(v))) + + if self.modify_time and self.keep_last_modified: + url.append( + "x-qn-meta-!Last-Modified/{0}".format(urlsafe_base64_encode(rfc_from_timestamp(self.modify_time)))) + + if self.metadata: + for k, v in self.metadata.items(): + if k.startswith('x-qn-meta-'): + url.append( + "{0}/{1}".format(k, urlsafe_base64_encode(v))) + + url = '/'.join(url) + return url + + def make_file(self, host): + """创建文件""" + url = self.file_url(host) + body = ','.join([status['ctx'] for status in self.blockStatus]) + return self.post(url, body) + + def init_upload_task(self, url): + body, resp = self.post(url, '') + if body is not None: + return body['uploadId'], body['expireAt'] + else: + return None, None + + def post(self, url, data): + return http._post_with_token(url, data, self.up_token) + + def post_with_headers(self, url, data, headers): + return http._post_with_token_and_headers(url, data, self.up_token, headers) + + def put(self, url, data, headers): + return http._put_with_token_and_headers(url, data, self.up_token, headers) + + def get_parts(self, block_status): + return sorted(block_status, key=lambda i: i['partNumber']) + + def get_bucket(self): + if not self.bucket_name: + bucket_name = Auth.get_bucket_name(self.up_token) + if bucket_name: + self.bucket_name = bucket_name diff --git a/qiniu/services/storage/upload_progress_recorder.py b/qiniu/services/storage/upload_progress_recorder.py new file mode 100644 index 00000000..8673b198 --- /dev/null +++ b/qiniu/services/storage/upload_progress_recorder.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- +import hashlib +import json +import os +import tempfile +from qiniu.compat import is_py2 + + +class UploadProgressRecorder(object): + """ + 持久化上传记录类 + + 该类默认保存每个文件的上传记录到文件系统中,用于断点续传 + 上传记录为json格式 + + Attributes: + record_folder: 保存上传记录的目录 + """ + + def __init__(self, record_folder=tempfile.gettempdir()): + self.record_folder = record_folder + + def __get_upload_record_file_path(self, file_name, key): + record_key = '{0}/{1}'.format(key, file_name) + if is_py2: + record_file_name = hashlib.md5(record_key).hexdigest() + else: + record_file_name = hashlib.md5(record_key.encode('utf-8')).hexdigest() + return os.path.join(self.record_folder, record_file_name) + + def has_upload_record(self, file_name, key): + upload_record_file_path = self.__get_upload_record_file_path(file_name, key) + return os.path.isfile(upload_record_file_path) + + def get_upload_record(self, file_name, key): + upload_record_file_path = self.__get_upload_record_file_path(file_name, key) + if not self.has_upload_record(file_name, key): + return None + try: + with open(upload_record_file_path, 'r') as f: + json_data = json.load(f) + except (IOError, ValueError): + json_data = None + + return json_data + + def set_upload_record(self, file_name, key, data): + upload_record_file_path = self.__get_upload_record_file_path(file_name, key) + with open(upload_record_file_path, 'w') as f: + json.dump(data, f) + + def delete_upload_record(self, file_name, key): + upload_record_file_path = self.__get_upload_record_file_path(file_name, key) + try: + os.remove(upload_record_file_path) + except OSError: + pass diff --git a/qiniu/services/storage/uploader.py b/qiniu/services/storage/uploader.py new file mode 100644 index 00000000..5bcdf4e5 --- /dev/null +++ b/qiniu/services/storage/uploader.py @@ -0,0 +1,229 @@ +# -*- coding: utf-8 -*- +import os + +from qiniu.config import _BLOCK_SIZE, get_default + +from qiniu.auth import Auth +from qiniu.utils import crc32, file_crc32, rfc_from_timestamp + +from qiniu.services.storage.uploaders import FormUploader, ResumeUploaderV1, ResumeUploaderV2 +from qiniu.services.storage.upload_progress_recorder import UploadProgressRecorder + +# for compat to old sdk (<= v7.11.1) +from qiniu.services.storage.legacy import _Resume # noqa + + +def put_data( + up_token, + key, + data, + params=None, + mime_type='application/octet-stream', + check_crc=False, + progress_handler=None, + fname=None, + hostscache_dir=None, + metadata=None, + regions=None, + accelerate_uploading=False +): + """上传二进制流到七牛 + + Args: + up_token: 上传凭证 + key: 上传文件名 + data: 上传二进制流 + params: 自定义变量,规格参考 https://developer.qiniu.com/kodo/manual/vars#xvar + mime_type: 上传数据的mimeType + check_crc: 是否校验crc32 + progress_handler: 上传进度 + fname: 文件名 + hostscache_dir: host请求 缓存文件保存位置 + metadata: 元数据 + regions: 区域信息,默认自动查询 + accelerate_uploading: 是否优先使用加速上传 + + Returns: + 一个dict变量,类似 {"hash": "", "key": ""} + 一个ResponseInfo对象 + """ + final_data = b'' + if hasattr(data, 'read'): + while True: + tmp_data = data.read(_BLOCK_SIZE) + if len(tmp_data) == 0: + break + else: + final_data += tmp_data + else: + final_data = data + + crc = crc32(final_data) + return _form_put( + up_token, key, final_data, params, mime_type, + crc, hostscache_dir, progress_handler, fname, metadata=metadata, + regions=regions, accelerate_uploading=accelerate_uploading + ) + + +def put_file( + up_token, key, file_path, params=None, + mime_type='application/octet-stream', check_crc=False, + progress_handler=None, upload_progress_recorder=None, keep_last_modified=False, hostscache_dir=None, + part_size=None, version=None, bucket_name=None, metadata=None, + regions=None, accelerate_uploading=False +): + """上传文件到七牛 + + Args: + up_token: 上传凭证 + key: 上传文件名 + file_path: 上传文件的路径 + params: 自定义变量,规格参考 https://developer.qiniu.com/kodo/manual/vars#xvar + mime_type: 上传数据的mimeType + check_crc: 是否校验crc32 + progress_handler: 上传进度 + upload_progress_recorder: 记录上传进度,用于断点续传 + keep_last_modified: 是否保留文件的最后修改时间 + hostscache_dir: host请求 缓存文件保存位置 + version: 分片上传版本 目前支持v1/v2版本 默认v1 + part_size: 分片上传v2必传字段 默认大小为4MB 分片大小范围为1 MB - 1 GB + bucket_name: 分片上传v2字段 空间名称 + metadata: 元数据信息 + regions: region信息 + accelerate_uploading: 是否开启加速上传 + + Returns: + 一个dict变量,类似 {"hash": "", "key": ""} + 一个ResponseInfo对象 + """ + ret = {} + size = os.stat(file_path).st_size + with open(file_path, 'rb') as input_stream: + file_name = os.path.basename(file_path) + modify_time = int(os.path.getmtime(file_path)) + if size > get_default('default_upload_threshold'): + ret, info = put_stream( + up_token, key, input_stream, file_name, size, hostscache_dir, params, + mime_type, progress_handler, + upload_progress_recorder=upload_progress_recorder, + modify_time=modify_time, keep_last_modified=keep_last_modified, + part_size=part_size, version=version, bucket_name=bucket_name, metadata=metadata, + regions=regions, accelerate_uploading=accelerate_uploading + ) + else: + crc = file_crc32(file_path) + ret, info = _form_put( + up_token, key, input_stream, params, mime_type, + crc, hostscache_dir, progress_handler, file_name, + modify_time=modify_time, keep_last_modified=keep_last_modified, metadata=metadata, + regions=regions, accelerate_uploading=accelerate_uploading + ) + return ret, info + + +def _form_put( + up_token, + key, + data, + params, + mime_type, + crc, + hostscache_dir=None, + progress_handler=None, + file_name=None, + modify_time=None, + keep_last_modified=False, + metadata=None, + regions=None, + accelerate_uploading=False +): + bucket_name = Auth.get_bucket_name(up_token) + uploader = FormUploader( + bucket_name, + progress_handler=progress_handler, + regions=regions, + accelerate_uploading=accelerate_uploading, + preferred_scheme=get_default('default_zone').scheme + ) + + if modify_time and keep_last_modified: + metadata['x-qn-meta-!Last-Modified'] = rfc_from_timestamp(modify_time) + + return uploader.upload( + key=key, + data=data, + data_size=None, + file_name=file_name, + modify_time=modify_time, + mime_type=mime_type, + metadata=metadata, + custom_vars=params, + crc32_int=crc, + up_token=up_token + ) + + +def put_stream( + up_token, + key, + input_stream, + file_name, + data_size, + hostscache_dir=None, + params=None, + mime_type=None, + progress_handler=None, + upload_progress_recorder=None, + modify_time=None, + keep_last_modified=False, + part_size=None, + version='v1', + bucket_name=None, + metadata=None, + regions=None, + accelerate_uploading=False +): + if not bucket_name: + bucket_name = Auth.get_bucket_name(up_token) + if not upload_progress_recorder: + upload_progress_recorder = UploadProgressRecorder() + if not version: + version = 'v1' + if not part_size: + part_size = 4 * (1024 * 1024) + + if version == 'v1': + uploader = ResumeUploaderV1( + bucket_name, + progress_handler=progress_handler, + upload_progress_recorder=upload_progress_recorder, + regions=regions, + accelerate_uploading=accelerate_uploading, + preferred_scheme=get_default('default_zone').scheme + ) + if modify_time and keep_last_modified: + metadata['x-qn-meta-!Last-Modified'] = rfc_from_timestamp(modify_time) + elif version == 'v2': + uploader = ResumeUploaderV2( + bucket_name, + progress_handler=progress_handler, + upload_progress_recorder=upload_progress_recorder, + part_size=part_size, + regions=regions, + accelerate_uploading=accelerate_uploading, + preferred_scheme=get_default('default_zone').scheme + ) + else: + raise ValueError('version only could be v1 or v2') + return uploader.upload( + key=key, + data=input_stream, + data_size=data_size, + file_name=file_name, + modify_time=modify_time, + mime_type=mime_type, + metadata=metadata, + custom_vars=params, + up_token=up_token + ) diff --git a/qiniu/services/storage/uploaders/__init__.py b/qiniu/services/storage/uploaders/__init__.py new file mode 100644 index 00000000..0e2ea39d --- /dev/null +++ b/qiniu/services/storage/uploaders/__init__.py @@ -0,0 +1,9 @@ +from .form_uploader import FormUploader +from .resume_uploader_v1 import ResumeUploaderV1 +from .resume_uploader_v2 import ResumeUploaderV2 + +__all__ = [ + 'FormUploader', + 'ResumeUploaderV1', + 'ResumeUploaderV2' +] diff --git a/qiniu/services/storage/uploaders/_default_retrier.py b/qiniu/services/storage/uploaders/_default_retrier.py new file mode 100644 index 00000000..6c15df1a --- /dev/null +++ b/qiniu/services/storage/uploaders/_default_retrier.py @@ -0,0 +1,216 @@ +from collections import namedtuple + +from qiniu.http.endpoints_retry_policy import EndpointsRetryPolicy +from qiniu.http.region import ServiceName +from qiniu.http.regions_retry_policy import RegionsRetryPolicy +from qiniu.retry.abc import RetryPolicy +from qiniu.retry import Retrier + + +_TokenExpiredRetryState = namedtuple( + 'TokenExpiredRetryState', + [ + 'retried_times', + 'upload_api_version' + ] +) + + +class TokenExpiredRetryPolicy(RetryPolicy): + def __init__( + self, + upload_api_version, + record_delete_handler, + record_exists_handler, + max_retry_times=1 + ): + """ + Parameters + ---------- + upload_api_version: str + record_delete_handler: callable + `() -> None` + record_exists_handler: callable + `() -> bool` + max_retry_times: int + """ + self.upload_api_version = upload_api_version + self.record_delete_handler = record_delete_handler + self.record_exists_handler = record_exists_handler + self.max_retry_times = max_retry_times + + def init_context(self, context): + """ + Parameters + ---------- + context: dict + """ + context[self] = _TokenExpiredRetryState( + retried_times=0, + upload_api_version=self.upload_api_version + ) + + def should_retry(self, attempt): + """ + Parameters + ---------- + attempt: qiniu.retry.Attempt + + Returns + ------- + bool + """ + state = attempt.context[self] + + if ( + state.retried_times >= self.max_retry_times or + not self.record_exists_handler() + ): + return False + + if not attempt.result: + return False + + _ret, resp = attempt.result + + if ( + state.upload_api_version == 'v1' and + resp.status_code == 701 + ): + return True + + if ( + state.upload_api_version == 'v2' and + resp.status_code == 612 + ): + return True + + return False + + def prepare_retry(self, attempt): + """ + Parameters + ---------- + attempt: qiniu.retry.Attempt + """ + state = attempt.context[self] + attempt.context[self] = state._replace(retried_times=state.retried_times + 1) + + if not self.record_exists_handler(): + return + + self.record_delete_handler() + + +class AccUnavailableRetryPolicy(RetryPolicy): + def __init__(self): + pass + + def init_context(self, context): + pass + + def should_retry(self, attempt): + """ + Parameters + ---------- + attempt: qiniu.retry.Attempt + + Returns + ------- + bool + """ + if not attempt.result: + return False + + region = attempt.context.get('region') + if not region: + return False + + if all( + not region.services[sn] + for sn in attempt.context.get('alternative_service_names') + ): + return False + + _ret, resp = attempt.result + + return resp.status_code == 400 and \ + 'transfer acceleration is not configured on this bucket' in resp.text_body + + def prepare_retry(self, attempt): + """ + Parameters + ---------- + attempt: qiniu.retry.Attempt + """ + endpoints = [] + while not endpoints: + if not attempt.context.get('alternative_service_names'): + raise RuntimeError('No alternative service available') + attempt.context['service_name'] = attempt.context.get('alternative_service_names').pop(0) + # shallow copy list + # change to `list.copy` for more readable when min version of python update to >= 3 + endpoints = attempt.context['region'].services.get(attempt.context['service_name'], [])[:] + attempt.context['alternative_endpoints'] = endpoints + attempt.context['endpoint'] = attempt.context['alternative_endpoints'].pop(0) + + +ProgressRecord = namedtuple( + 'ProgressRecorder', + [ + 'upload_api_version', + 'exists', + 'delete' + ] +) + + +def get_default_retrier( + regions_provider, + preferred_endpoints_provider=None, + progress_record=None, + accelerate_uploading=False +): + """ + Parameters + ---------- + regions_provider: Iterable[Region] + preferred_endpoints_provider: Iterable[Endpoint] + progress_record: ProgressRecord + accelerate_uploading: bool + + Returns + ------- + Retrier + """ + retry_policies = [] + upload_service_names = [ServiceName.UP] + handle_change_region = None + + if accelerate_uploading: + retry_policies.append(AccUnavailableRetryPolicy()) + upload_service_names.insert(0, ServiceName.UP_ACC) + + if progress_record: + retry_policies.append(TokenExpiredRetryPolicy( + upload_api_version=progress_record.upload_api_version, + record_delete_handler=progress_record.delete, + record_exists_handler=progress_record.exists + )) + + def _handle_change_region(_): + progress_record.delete() + + handle_change_region = _handle_change_region + + retry_policies += [ + EndpointsRetryPolicy(skip_init_context=True), + RegionsRetryPolicy( + regions_provider=regions_provider, + service_names=upload_service_names, + preferred_endpoints_provider=preferred_endpoints_provider, + on_change_region=handle_change_region + ) + ] + + return Retrier(retry_policies) diff --git a/qiniu/services/storage/uploaders/abc/__init__.py b/qiniu/services/storage/uploaders/abc/__init__.py new file mode 100644 index 00000000..2e528ca4 --- /dev/null +++ b/qiniu/services/storage/uploaders/abc/__init__.py @@ -0,0 +1,7 @@ +from .uploader_base import UploaderBase +from .resume_uploader_base import ResumeUploaderBase + +__all__ = [ + 'UploaderBase', + 'ResumeUploaderBase' +] diff --git a/qiniu/services/storage/uploaders/abc/resume_uploader_base.py b/qiniu/services/storage/uploaders/abc/resume_uploader_base.py new file mode 100644 index 00000000..2965dee0 --- /dev/null +++ b/qiniu/services/storage/uploaders/abc/resume_uploader_base.py @@ -0,0 +1,214 @@ +import abc +from concurrent import futures + +from qiniu.services.storage.uploaders.io_chunked import ChunkInfo +from qiniu.services.storage.uploaders.abc import UploaderBase + + +class ResumeUploaderBase(UploaderBase): + """ + Attributes + ---------- + part_size: int, optional + progress_handler: function, optional + upload_progress_recorder: UploadProgressRecorder, optional + concurrent_executor: futures.Executor, optional + """ + __metaclass__ = abc.ABCMeta + + def __init__( + self, + bucket_name, + **kwargs + ): + """ + Parameters + ---------- + bucket_name + part_size: int + progress_handler: function + upload_progress_recorder: UploadProgressRecorder + max_concurrent_workers: int + concurrent_executor: futures.Executor + kwargs + """ + super(ResumeUploaderBase, self).__init__(bucket_name, **kwargs) + + self.part_size = kwargs.get('part_size', 4 * (1024 ** 2)) + + self.progress_handler = kwargs.get( + 'progress_handler', + None + ) + + self.upload_progress_recorder = kwargs.get( + 'upload_progress_recorder', + None + ) + + max_workers = kwargs.get('max_concurrent_workers', 3) + self.concurrent_executor = kwargs.get( + 'concurrent_executor', + futures.ThreadPoolExecutor(max_workers=max_workers) + ) + + def gen_chunk_list(self, size, chunk_size=None, uploaded_chunk_no_list=None): + """ + Parameters + ---------- + size: int + chunk_size: int + uploaded_chunk_no_list: list[int] + + Yields + ------- + ChunkInfo + """ + if not chunk_size: + chunk_size = self.part_size + if not uploaded_chunk_no_list: + uploaded_chunk_no_list = [] + + for i, chunk_offset in enumerate(range(0, size, chunk_size)): + chunk_no = i + 1 + if chunk_no in uploaded_chunk_no_list: + continue + yield ChunkInfo( + chunk_no=chunk_no, + chunk_offset=chunk_offset, + chunk_size=min( + chunk_size, + size - chunk_offset + ) + ) + + @abc.abstractmethod + def _recover_from_record( + self, + file_name, + key, + context + ): + """ + Parameters + ---------- + file_name: str + key: str + context: any + + Returns + ------- + any + """ + + @abc.abstractmethod + def _set_to_record( + self, + file_name, + key, + context + ): + """ + Parameters + ---------- + file_name: str + key: str + context: any + """ + + @abc.abstractmethod + def _progress_handler( + self, + file_name, + key, + context, + uploaded_size, + total_size + ): + """ + Parameters + ---------- + file_name: str + key: str + context: any + uploaded_size: int + total_size: int + """ + + @abc.abstractmethod + def initial_parts( + self, + up_token, + key, + file_path, + data, + data_size, + modify_time, + part_size, + file_name, + **kwargs + ): + """ + Parameters + ---------- + up_token: str + key: str + file_path: str + data: IOBase + data_size: int + modify_time: int + part_size: int + file_name: str + kwargs: dict + + Returns + ------- + ret: dict + resp: ResponseInfo + """ + + @abc.abstractmethod + def upload_parts( + self, + up_token, + data, + data_size, + context, + **kwargs + ): + """ + Parameters + ---------- + up_token: str + data: IOBase + data_size: int + context: any + kwargs: dict + + Returns + ------- + ret: dict + resp: ResponseInfo + """ + + @abc.abstractmethod + def complete_parts( + self, + up_token, + data_size, + context, + **kwargs + ): + """ + Parameters + ---------- + up_token: str + data_size: int + context: any + kwargs: dict + + Returns + ------- + ret: dict + resp: ResponseInfo + """ diff --git a/qiniu/services/storage/uploaders/abc/uploader_base.py b/qiniu/services/storage/uploaders/abc/uploader_base.py new file mode 100644 index 00000000..5907aa1c --- /dev/null +++ b/qiniu/services/storage/uploaders/abc/uploader_base.py @@ -0,0 +1,275 @@ +import abc + +import qiniu.config as config +from qiniu.region import LegacyRegion +from qiniu.http.endpoint import Endpoint +from qiniu.http.regions_provider import get_default_regions_provider + +# type import +from qiniu.auth import Auth # noqa +from qiniu.http.region import Region, ServiceName # noqa + + +class UploaderBase(object): + """ + Attributes + ---------- + bucket_name: str + auth: Auth + regions: list[Region] + """ + __metaclass__ = abc.ABCMeta + + def __init__( + self, + bucket_name, + **kwargs + ): + """ + Parameters + ---------- + bucket_name: str + The name of bucket which you want to upload to. + auth: Auth + The instance of Auth to sign requests. + regions: list[Region], default=[] + The regions of bucket. It will be queried if not specified. + kwargs + The others arguments may be used by subclass. + """ + # default bucket_name + self.bucket_name = bucket_name + + # change the default when implements AuthProvider + self.auth = kwargs.get('auth', None) + + # regions config + regions = kwargs.get('regions', None) + if not regions: + regions = [] + self.regions = regions + + query_regions_endpoints = kwargs.get('query_regions_endpoints', None) + if not query_regions_endpoints: + query_regions_endpoints = [] + self.query_regions_endpoints = query_regions_endpoints + + self.preferred_scheme = kwargs.get('preferred_scheme', 'http') + + # change the default value to False when remove config.get_default('default_zone') + self.accelerate_uploading = kwargs.get('accelerate_uploading', None) + + def get_up_token( + self, + bucket_name=None, + key=None, + expired=None, + policy=None, + strict_policy=None, + **_kwargs + ): + """ + Generate up token + + Parameters + ---------- + bucket_name: str + key: str + expired: int + seconds + policy: dict + strict_policy: bool + _kwargs: dict + useless for now, just for compatibility + + Returns + ------- + str + """ + if not self.auth: + raise ValueError('can not get up_token by auth not provided') + + bucket_name = bucket_name if bucket_name else self.bucket_name + + kwargs_for_up_token = { + k: v + for k, v in { + 'bucket': bucket_name, + 'key': key, + 'expired': expired, + 'policy': policy, + 'strict_policy': strict_policy + }.items() + if k + } + up_token = self.auth.upload_token(**kwargs_for_up_token) + return up_token + + def _get_regions_provider(self, access_key=None, bucket_name=None): + """ + Parameters + ---------- + access_key: str + bucket_name: str + + Returns + ------- + Iterable[Region or LegacyRegion] + """ + if self.regions: + return self.regions + + # handle compatibility for default_zone + if config.is_customized_default('default_zone'): + return [config.get_default('default_zone')] + + # handle compatibility for default_query_region_host + query_regions_endpoints = self.query_regions_endpoints + if not query_regions_endpoints: + query_region_host = config.get_default('default_query_region_host') + query_region_backup_hosts = config.get_default('default_query_region_backup_hosts') + query_regions_endpoints = [ + Endpoint.from_host(h) + for h in [query_region_host] + query_region_backup_hosts + ] + + # get regions from default regions provider + if not self.auth and not access_key: + raise ValueError('Must provide access_key and bucket_name if auth is unavailable.') + if not access_key: + access_key = self.auth.get_access_key() + if not bucket_name: + bucket_name = self.bucket_name + + return get_default_regions_provider( + query_endpoints_provider=query_regions_endpoints, + access_key=access_key, + bucket_name=bucket_name, + accelerate_uploading=self.accelerate_uploading, + preferred_scheme=self.preferred_scheme, + ) + + def _get_regions(self, access_key=None, bucket_name=None): + """ + .. deprecated:: + This has been deprecated by implemented regions provider and endpoints + + Parameters + ---------- + access_key: str + bucket_name: str + + Returns + ------- + list[LegacyRegion] + """ + def get_legacy_region(r): + if isinstance(r, LegacyRegion): + return r + opts = { + 'scheme': self.preferred_scheme, + 'accelerate_uploading': self.accelerate_uploading + } + if r.services[ServiceName.UP]: + opts['up_host'] = r.services[ServiceName.UP][0].get_value(self.preferred_scheme) + if len(r.services[ServiceName.UP]) > 1: + opts['up_host_backup'] = [ + e.get_value(self.preferred_scheme) + for e in r.services[ServiceName.UP][1:] + ] + if r.services[ServiceName.IO]: + opts['io_host'] = r.services[ServiceName.IO][0].get_value(self.preferred_scheme) + if r.services[ServiceName.RS]: + opts['rs_host'] = r.services[ServiceName.RS][0].get_value(self.preferred_scheme) + if r.services[ServiceName.RSF]: + opts['rsf_host'] = r.services[ServiceName.RSF][0].get_value(self.preferred_scheme) + if r.services[ServiceName.API]: + opts['api_host'] = r.services[ServiceName.API][0].get_value(self.preferred_scheme) + result = LegacyRegion(**opts) + result.services = r.services + result.region_id = r.region_id + result.s3_region_id = r.s3_region_id + result.ttl = r.ttl + result.create_time = r.create_time + return result + + return [ + get_legacy_region(r) + for r in self._get_regions_provider(access_key, bucket_name) + ] + + def _get_up_hosts(self, access_key=None, bucket_name=None): + """ + get hosts of upload by access key or the first region + + .. deprecated:: + This has been deprecated by implemented regions provider and endpoints + + Returns + ------- + list[str] + """ + if not bucket_name: + bucket_name = self.bucket_name + if not self.auth and not access_key: + raise ValueError('Must provide access_key if auth is unavailable.') + if not access_key: + access_key = self.auth.get_access_key() + + regions = self._get_regions(access_key, bucket_name) + + if not regions: + raise ValueError('No region available.') + + # get up hosts in region + service_names = [ServiceName.UP] + if self.accelerate_uploading: + service_names.insert(0, ServiceName.UP_ACC) + + return [ + e.get_value() + for sn in service_names + for e in regions[0].services[sn] + ] + + @abc.abstractmethod + def upload( + self, + key, + file_path, + data, + data_size, + modify_time, + + part_size, + mime_type, + metadata, + file_name, + custom_vars, + **kwargs + ): + """ + Upload method + + Parameters + ---------- + key: str + file_path: str + data: IOBase + data_size: int + modify_time: int + + part_size: int + mime_type: str + metadata: dict + file_name: str + custom_vars: dict + kwargs: dict + + Returns + ------- + ret: dict + The parsed response body + info + The response + """ diff --git a/qiniu/services/storage/uploaders/form_uploader.py b/qiniu/services/storage/uploaders/form_uploader.py new file mode 100644 index 00000000..288a69da --- /dev/null +++ b/qiniu/services/storage/uploaders/form_uploader.py @@ -0,0 +1,281 @@ +from io import BytesIO +from os import path +from time import time + +from qiniu.compat import is_seekable +from qiniu.utils import b, io_crc32 +from qiniu.auth import Auth +from qiniu.http import qn_http_client + +from .abc import UploaderBase +from ._default_retrier import get_default_retrier + + +class FormUploader(UploaderBase): + def __init__(self, bucket_name, **kwargs): + """ + Parameters + ---------- + bucket_name: str + kwargs + auth, regions + """ + super(FormUploader, self).__init__(bucket_name, **kwargs) + + self.progress_handler = kwargs.get( + 'progress_handler', + None + ) + + def upload( + self, + key, + file_path=None, + data=None, + data_size=None, + modify_time=None, + part_size=None, + mime_type=None, + metadata=None, + file_name=None, + custom_vars=None, + **kwargs + ): + """ + Parameters + ---------- + key: str + file_path: str + data: IOBase + data_size: int + modify_time: int + part_size: int + mime_type: str + metadata: dict + file_name: str + custom_vars: dict + kwargs + up_token: str + crc32_int: int + bucket_name: str + is required if upload to another bucket + expired: int + option for generate up_token if not provide up_token. seconds + policy: dict + option for generate up_token if not provide up_token. details see `auth.Auth` + strict_policy: bool + option for generate up_token if not provide up_token + + Returns + ------- + ret: dict + resp: ResponseInfo + """ + # check and initial arguments + # bucket_name + bucket_name = kwargs.get('bucket_name', self.bucket_name) + + # up_token + up_token = kwargs.get('up_token', None) + if not up_token: + up_token = self.get_up_token(**kwargs) + access_key = self.auth.get_access_key() + else: + access_key, _, _ = Auth.up_token_decode(up_token) + + # crc32 from outside + crc32_int = kwargs.get('crc32_int', None) + # try to get file_name + if not file_name and file_path: + file_name = path.basename(file_path) + + # must provide file_path or data + if not file_path and not data: + raise TypeError('Must provide one of file_path or data.') + if file_path and data: + raise TypeError('Must provide only one of file_path or data.') + + # useless for form upload + if not modify_time: + if file_path: + modify_time = int(path.getmtime(file_path)) + else: + modify_time = int(time()) + + # upload + try: + if file_path: + data_size = path.getsize(file_path) + data = open(file_path, 'rb') + elif isinstance(data, bytes): + data_size = len(data) + data = BytesIO(data) + elif isinstance(data, str): + data_size = len(data) + data = BytesIO(b(data)) + if not crc32_int: + crc32_int = self.__get_crc32_int(data) + fields = self.__get_form_fields( + up_token=up_token, + key=key, + crc32_int=crc32_int, + custom_vars=custom_vars, + metadata=metadata + ) + ret, resp = self.__upload_data_with_retrier( + # retrier options + access_key=access_key, + bucket_name=bucket_name, + # upload_data options + fields=fields, + file_name=file_name, + data=data, + data_size=data_size, + mime_type=mime_type + ) + finally: + if file_path: + data.close() + + return ret, resp + + def __upload_data_with_retrier( + self, + access_key, + bucket_name, + **upload_data_opts + ): + retrier = get_default_retrier( + regions_provider=self._get_regions_provider( + access_key=access_key, + bucket_name=bucket_name + ), + accelerate_uploading=self.accelerate_uploading + ) + data = upload_data_opts.get('data') + attempt = None + for attempt in retrier: + with attempt: + attempt.result = self.__upload_data( + up_endpoint=attempt.context.get('endpoint'), + **upload_data_opts + ) + ret, resp = attempt.result + if resp.ok() and ret: + return attempt.result + if ( + not is_seekable(data) or + not resp.need_retry() + ): + return attempt.result + data.seek(0) + + if attempt is None: + raise RuntimeError('Retrier is not working. attempt is None') + + return attempt.result + + def __upload_data( + self, + up_endpoint, + fields, + file_name, + data, + data_size=None, + mime_type='application/octet-stream' + ): + """ + Parameters + ---------- + up_endpoint: Endpoint + fields: dict + file_name: str + data: IOBase + data_size: int + mime_type: str + + Returns + ------- + ret: dict + resp: ResponseInfo + """ + req_url = up_endpoint.get_value(scheme=self.preferred_scheme) + if not file_name or not file_name.strip(): + file_name = 'file_name' + + ret, resp = qn_http_client.post( + url=req_url, + data=fields, + files={ + 'file': (file_name, data, mime_type) + } + ) + return ret, resp + + def __get_form_fields( + self, + up_token, + **kwargs + ): + """ + Parameters + ---------- + up_token: str + kwargs + key, crc32_int, custom_vars, metadata + + Returns + ------- + dict + """ + key = kwargs.get('key', None) + crc32_int = kwargs.get('crc32_int', None) + custom_vars = kwargs.get('custom_vars', None) + metadata = kwargs.get('metadata', None) + + result = { + 'token': up_token, + } + + if key is not None: + result['key'] = key + + if crc32_int: + result['crc32'] = crc32_int + + if custom_vars: + result.update( + { + k: str(v) + for k, v in custom_vars.items() + if k.startswith('x:') + } + ) + + if metadata: + result.update( + { + k: str(v) + for k, v in metadata.items() + if k.startswith('x-qn-meta-') + } + ) + + return result + + def __get_crc32_int(self, data): + """ + Parameters + ---------- + data: BytesIO + + Returns + ------- + str + """ + result = None + if not is_seekable(data): + return result + result = io_crc32(data) + data.seek(0) + return result diff --git a/qiniu/services/storage/uploaders/io_chunked.py b/qiniu/services/storage/uploaders/io_chunked.py new file mode 100644 index 00000000..79657157 --- /dev/null +++ b/qiniu/services/storage/uploaders/io_chunked.py @@ -0,0 +1,90 @@ +import os +import io +from collections import namedtuple + +from qiniu.compat import is_seekable + + +ChunkInfo = namedtuple( + 'ChunkInfo', + [ + 'chunk_no', + 'chunk_offset', + 'chunk_size' + ] +) + + +class IOChunked(io.IOBase): + def __init__( + self, + base_io, + chunk_offset, + chunk_size, + lock, + buffer_size=4 * (1024 ** 2) # 4MB just for demo + ): + if not is_seekable(base_io): + raise TypeError('"base_io" must be seekable') + self.__base_io = base_io + self.__chunk_start = chunk_offset + self.__chunk_size = chunk_size + self.__chunk_end = chunk_offset + chunk_size + self.__lock = lock + self.__chunk_pos = 0 + + self.buffer_size = min(buffer_size, chunk_size) + + def readable(self): + return self.__base_io.readable() + + def seekable(self): + return True + + def seek(self, offset, whence=0): + if not self.seekable(): + raise io.UnsupportedOperation('does not support seek') + if whence == os.SEEK_SET: + if offset < 0: + raise ValueError('offset should be zero or positive if whence is 0') + self.__chunk_pos = offset + elif whence == os.SEEK_CUR: + self.__chunk_pos += offset + elif whence == os.SEEK_END: + if offset > 0: + raise ValueError('offset should be zero or negative if whence is 2') + self.__chunk_pos = self.__chunk_size + offset + else: + raise ValueError('whence should be 0, 1 or 2') + self.__chunk_pos = max( + 0, + min(self.__chunk_size, self.__chunk_pos) + ) + + def tell(self): + return self.__chunk_pos + + def read(self, size): + if self.__curr_base_pos >= self.__chunk_end: + return b'' + read_size = max(self.buffer_size, size) + read_size = min(self.__rest_chunk_size, read_size) + + # -- ignore size argument -- + with self.__lock: + self.__base_io.seek(self.__curr_base_pos) + data = self.__base_io.read(read_size) + + self.__chunk_pos += len(data) + return data + + def __len__(self): + return self.__chunk_size + + @property + def __curr_base_pos(self): + return self.__chunk_start + self.__chunk_pos + + @property + def __rest_chunk_size(self): + return self.__chunk_end - self.__curr_base_pos diff --git a/qiniu/services/storage/uploaders/resume_uploader_v1.py b/qiniu/services/storage/uploaders/resume_uploader_v1.py new file mode 100644 index 00000000..8a0e9cfb --- /dev/null +++ b/qiniu/services/storage/uploaders/resume_uploader_v1.py @@ -0,0 +1,830 @@ +import logging +import math +import functools +from collections import namedtuple +from concurrent import futures +from io import BytesIO +from itertools import chain +from os import path +from threading import Lock +from time import time + +from qiniu.compat import is_seekable +from qiniu.auth import Auth +from qiniu.http import qn_http_client, ResponseInfo +from qiniu.http.endpoint import Endpoint +from qiniu.utils import b, io_crc32, urlsafe_base64_encode + +from ._default_retrier import ProgressRecord, get_default_retrier +from .abc import ResumeUploaderBase +from .io_chunked import IOChunked + + +class ResumeUploaderV1(ResumeUploaderBase): + def _recover_from_record( + self, + file_name, + key, + context + ): + """ + Parameters + ---------- + file_name: str + key: str + context: _ResumeUploadV1Context + + Returns + ------- + _ResumeUploadV1Context + """ + if not isinstance(context, _ResumeUploadV1Context): + raise TypeError('"context" must be an instance of _ResumeUploadV1Context') + + if not self.upload_progress_recorder or not any([file_name, key]): + return context + + record = self.upload_progress_recorder.get_upload_record( + file_name, + key + ) + + if not record: + return context + + record_up_hosts = record.get('up_hosts', []) + record_part_size = record.get('part_size', None) + record_modify_time = record.get('modify_time', 0) + record_context = record.get('contexts', []) + + # compat with old sdk(<= v7.11.1) + if not record_up_hosts or not record_part_size: + return context + + # filter expired parts + if record_modify_time != context.modify_time: + record_context = [] + else: + now = time() + record_context = [ + ctx + for ctx in record_context + if ( + ctx.get('expired_at', 0) > now and + ctx.get('part_no', None) and + ctx.get('ctx', None) + ) + ] + + # assign to context + return context._replace( + up_hosts=record_up_hosts, + part_size=record_part_size, + parts=[ + _ResumeUploadV1Part( + part_no=p['part_no'], + ctx=p['ctx'], + expired_at=p['expired_at'], + ) + for p in record_context + ], + resumed=True + ) + + def _set_to_record( + self, + file_name, + key, + context + ): + """ + Parameters + ---------- + file_name: str + key: str + context: _ResumeUploadV1Context + + """ + if not self.upload_progress_recorder or not any([file_name, key]): + return + + record_data = { + 'up_hosts': context.up_hosts, + 'part_size': context.part_size, + 'modify_time': context.modify_time, + 'contexts': [ + { + 'ctx': part.ctx, + 'expired_at': part.expired_at, + 'part_no': part.part_no + } + for part in context.parts + ] + } + self.upload_progress_recorder.set_upload_record( + file_name, + key, + data=record_data + ) + + def _try_delete_record( + self, + file_name, + key, + context=None, + resp=None + ): + """ + Parameters + ---------- + file_name: str or None + key: str or None + context: _ResumeUploadV1Context + resp: ResponseInfo + """ + if not self.upload_progress_recorder or not any([file_name, key]): + return + if resp and not resp.ok(): + return + self.upload_progress_recorder.delete_upload_record(file_name, key) + + def _progress_handler( + self, + file_name, + key, + context, + uploaded_size, + total_size + ): + """ + Parameters + ---------- + file_name: str + key: str + context: _ResumeUploadV1Context + uploaded_size: int + total_size: int + + """ + self._set_to_record(file_name, key, context) + if not callable(self.progress_handler): + return + try: + self.progress_handler(uploaded_size, total_size) + except Exception as err: + err.no_need_retry = True + raise err + + def _initial_context( + self, + key, + file_name, + modify_time + ): + """ + Parameters + ---------- + key: str + file_name: str + modify_time: float + + Returns + ------- + _ResumeUploadV1Context + """ + part_size = 4 * (1024 ** 2) + context = _ResumeUploadV1Context( + up_hosts=[], + part_size=part_size, + parts=[], + modify_time=modify_time, + resumed=False + ) + + # try to recover from record + return self._recover_from_record( + key=key, + file_name=file_name, + context=context + ) + + def initial_parts( + self, + up_token, + key, + file_path=None, + data=None, + data_size=None, + modify_time=None, + part_size=None, + file_name=None, + up_endpoint=None, + **kwargs + ): + """ + Parameters + ---------- + up_token: str + key: str + file_path: str or None + data: str or None + modify_time: float + data_size: int + part_size: None + useless for v1 by fixed part size + file_name: str + up_endpoint: Endpoint + + kwargs: dict + + Returns + ------- + context: _ResumeUploadV1Context + resp: None + + """ + # -- check and initial arguments + # must provide file_path or data + if not file_path and not data: + raise TypeError('Must provide one of file_path or data.') + if file_path and data: + raise TypeError('Must provide only one of file_path or data.') + + # data must has length + if not file_path and not data_size: + raise TypeError('Must provide size if use data.') + + if not modify_time: + if file_path: + modify_time = int(path.getmtime(file_path)) + else: + modify_time = int(time()) + + if not file_name and file_path: + file_name = path.basename(file_path) + + context = self._initial_context( + key=key, + file_name=file_name, + modify_time=modify_time + ) + + if not context.up_hosts and up_endpoint: + context.up_hosts.extend([up_endpoint.get_value(self.preferred_scheme)]) + + if not context.up_hosts: + access_key, _, _ = Auth.up_token_decode(up_token) + context.up_hosts.extend(self._get_up_hosts(access_key)) + + return context, None + + def upload_parts( + self, + up_token, + data, + data_size, + context, + **kwargs + ): + """ + + Parameters + ---------- + up_token: str + context: _ResumeUploadV1Context + data + data_size: int + + kwargs + key, file_name + + Returns + ------- + part: _ResumeUploadV1Part + resp: ResponseInfo + + """ + # initial arguments + chunk_list = self.gen_chunk_list( + size=data_size, + chunk_size=context.part_size, + uploaded_chunk_no_list=[ + p.part_no for p in context.parts + ] + ) + up_hosts = list(context.up_hosts) + file_name = kwargs.get('file_name', None) + key = kwargs.get('key', None) + + # initial upload state + part, resp = None, None + uploaded_size = context.part_size * len(context.parts) + if math.ceil(data_size / context.part_size) in [p.part_no for p in context.parts]: + # if last part has been uploaded, should correct the uploaded size + uploaded_size += (data_size % context.part_size) - context.part_size + lock = Lock() + + if not self.concurrent_executor: + # upload sequentially + for chunk in chunk_list: + part, resp = self.__upload_part( + data=data, + chunk_info=chunk, + up_hosts=up_hosts, + up_token=up_token, + lock=lock + ) + if not resp.ok(): + return None, resp + elif not part: + return resp.json(), resp + context.parts.append(part) + uploaded_size += chunk.chunk_size + self._progress_handler( + file_name=file_name, + key=key, + context=context, + uploaded_size=uploaded_size, + total_size=data_size + ) + else: + # upload concurrently + future_chunk_dict = {} + for chunk in chunk_list: + ftr = self.concurrent_executor.submit( + self.__upload_part, + data=data, + chunk_info=chunk, + up_hosts=up_hosts, + up_token=up_token, + lock=lock + ) + future_chunk_dict[ftr] = chunk + + first_failed_resp = None + for ftr in futures.as_completed(future_chunk_dict): + if ftr.cancelled(): + continue + elif ftr.exception(): + # only keep first failed future, + # continue instead return to wait running future done. + if first_failed_resp: + continue + first_failed_resp = ResponseInfo(None, ftr.exception()) + for not_done in filter(lambda f: not f.done(), future_chunk_dict): + not_done.cancel() + else: + part, resp = ftr.result() + if not part: + if not first_failed_resp: + first_failed_resp = resp + for not_done in filter(lambda f: not f.done(), future_chunk_dict): + not_done.cancel() + else: + context.parts.append(part) + uploaded_size += future_chunk_dict[ftr].chunk_size + self._progress_handler( + file_name=file_name, + key=key, + context=context, + uploaded_size=uploaded_size, + total_size=data_size + ) + if first_failed_resp: + if first_failed_resp.ok(): + # just compat with old sdk. it's ok when crc32 check failed + return first_failed_resp.json(), first_failed_resp + return None, first_failed_resp + + return part, resp + + def complete_parts( + self, + up_token, + data_size, + context, + **kwargs + ): + """ + + Parameters + ---------- + up_token: str + data_size: int + context: _ResumeUploadV1Context + kwargs: + key, file_name, params, metadata + + Returns + ------- + ret: dict + resp: ResponseInfo + """ + key = kwargs.get('key', None) + file_name = kwargs.get('file_name', None) + params = kwargs.get('params', None) + metadata = kwargs.get('metadata', None) + mime_type = kwargs.get('mime_type', None) + + # sort contexts + sorted_parts = sorted(context.parts, key=lambda part: part.part_no) + body = ','.join((part.ctx for part in sorted_parts)) + + ret, resp = None, None + for up_host in context.up_hosts: + url = self.__get_mkfile_url( + up_host=up_host, + data_size=data_size, + mime_type=mime_type, + key=key, + file_name=file_name, + params=params, + metadata=metadata + ) + ret, resp = qn_http_client.post( + url=url, + data=body, + files=None, + headers={ + 'Authorization': 'UpToken {}'.format(up_token) + } + ) + if resp.ok() or not resp.need_retry(): + break + self._try_delete_record( + file_name, + key, + context, + resp + ) + return ret, resp + + def __upload_with_retrier( + self, + access_key, + bucket_name, + **upload_opts + ): + file_name = upload_opts.get('file_name', None) + key = upload_opts.get('key', None) + modify_time = upload_opts.get('modify_time', None) + + context = self._initial_context( + key=key, + file_name=file_name, + modify_time=modify_time + ) + preferred_endpoints = None + if context.up_hosts: + preferred_endpoints = [ + Endpoint.from_host(h) + for h in context.up_hosts + ] + + progress_record = None + if all([ + self.upload_progress_recorder, + file_name, + key + ]): + progress_record = ProgressRecord( + upload_api_version='v1', + exists=functools.partial( + self.upload_progress_recorder.has_upload_record, + file_name=file_name, + key=key + ), + delete=functools.partial( + self.upload_progress_recorder.delete_upload_record, + file_name=file_name, + key=key + ) + ) + + retrier = get_default_retrier( + regions_provider=self._get_regions_provider( + access_key=access_key, + bucket_name=bucket_name + ), + preferred_endpoints_provider=preferred_endpoints, + progress_record=progress_record, + accelerate_uploading=self.accelerate_uploading, + ) + + data = upload_opts.get('data') + attempt = None + for attempt in retrier: + with attempt: + upload_opts['up_endpoint'] = attempt.context.get('endpoint') + attempt.result = self.__upload( + **upload_opts + ) + ret, resp = attempt.result + if resp.ok() and ret: + return attempt.result + if ( + not is_seekable(data) or + not resp.need_retry() + ): + return attempt.result + data.seek(0) + + if attempt is None: + raise RuntimeError('Retrier is not working. attempt is None') + + return attempt.result + + def __upload( + self, + up_token, + key, + file_path, + file_name, + data, + data_size, + modify_time, + mime_type, + custom_vars, + metadata, + up_endpoint + ): + # initial_parts + context, resp = self.initial_parts( + up_token, + key, + file_path=file_path, + file_name=file_name, + data=data, + data_size=data_size, + modify_time=modify_time, + up_endpoint=up_endpoint + ) + + # upload_parts + try: + if file_path: + data_size = path.getsize(file_path) + data = open(file_path, 'rb') + elif isinstance(data, bytes): + data_size = len(data) + data = BytesIO(data) + elif isinstance(data, str): + data_size = len(data) + data = BytesIO(b(data)) + ret, resp = self.upload_parts( + up_token=up_token, + context=context, + data=data, + data_size=data_size, + + key=key, + file_name=file_name + ) + finally: + if file_path: + data.close() + + if resp and not resp.ok(): + return ret, resp + + # complete_parts + ret, resp = self.complete_parts( + up_token=up_token, + data_size=data_size, + context=context, + + key=key, + mime_type=mime_type, + file_name=file_name, + params=custom_vars, + metadata=metadata + ) + + return ret, resp + + def upload( + self, + key, + file_path=None, + data=None, + data_size=None, + modify_time=None, + + part_size=None, + mime_type=None, + metadata=None, + file_name=None, + custom_vars=None, + **kwargs + ): + """ + + Parameters + ---------- + key + file_path + data + data_size + modify_time + + part_size + mime_type + metadata + file_name + custom_vars + + kwargs: + up_token: str + crc32_int: int + bucket_name: str + is required if upload to another bucket + expired: int + option for generate up_token if not provide up_token. seconds + policy: dict + option for generate up_token if not provide up_token. details see `auth.Auth` + strict_policy: bool + option for generate up_token if not provide up_token + + Returns + ------- + ret: dict + resp: ResponseInfo + """ + # part_size + if part_size: + logging.warning('ResumeUploader not support part_size. It is fixed to 4MB.') + + # up_token + up_token = kwargs.get('up_token', None) + if not up_token: + kwargs.setdefault('up_token', self.get_up_token(**kwargs)) + access_key = self.auth.get_access_key() + else: + access_key, _, _ = Auth.up_token_decode(up_token) + + # bucket_name + kwargs['bucket_name'] = Auth.get_bucket_name(up_token) + + # file_name + if not file_name and file_path: + file_name = path.basename(file_path) + + # upload + return self.__upload_with_retrier( + access_key=access_key, + key=key, + file_path=file_path, + data=data, + data_size=data_size, + modify_time=modify_time, + mime_type=mime_type, + metadata=metadata, + file_name=file_name, + custom_vars=custom_vars, + **kwargs + ) + + def __upload_part( + self, + data, + chunk_info, + up_hosts, + up_token, + lock + ): + """ + Parameters + ---------- + data: IOBase + chunk_info: ChunkInfo + up_hosts: list[str] + up_token: str + lock: Lock + + Returns + ------- + part: _ResumeUploadV2Part + resp: ResponseInfo + """ + if not up_hosts: + raise ValueError('Must provide one up host at least') + + chunked_data = IOChunked( + base_io=data, + chunk_offset=chunk_info.chunk_offset, + chunk_size=chunk_info.chunk_size, + lock=lock + ) + chunk_crc32 = io_crc32(chunked_data) + chunked_data.seek(0) + part, resp = None, None + for up_host in up_hosts: + url = '/'.join([ + up_host, + 'mkblk', str(chunk_info.chunk_size) + ]) + ret, resp = qn_http_client.post( + url=url, + data=chunked_data, + files=None, + headers={ + 'Authorization': 'UpToken {}'.format(up_token) + } + ) + if resp.ok() and ret: + if ret.get('crc32', 0) != chunk_crc32: + return None, resp + part = _ResumeUploadV1Part( + part_no=chunk_info.chunk_no, + ctx=ret.get('ctx', ''), + expired_at=ret.get('expired_at', 0), + ) + return part, resp + if ( + not is_seekable(chunked_data) or + not resp.need_retry() + ): + return part, resp + chunked_data.seek(0) + return part, resp + + def __get_mkfile_url( + self, + up_host, + data_size, + mime_type=None, + key=None, + file_name=None, + params=None, + metadata=None + ): + """ + Parameters + ---------- + up_host: str + data_size: int + mime_type: str or None + key: str or None + file_name: str or None + params: dict or None + metadata: dict or None + + Returns + ------- + str + """ + url_base = [up_host, 'mkfile', str(data_size)] + url_params = [] + + if mime_type: + url_params.append(('mimeType', mime_type)) + + if key: + url_params.append(('key', key)) + + if file_name: + url_params.append(('fname', file_name)) + + if params: + url_params.extend(params.items()) + + if metadata: + url_params.extend( + (k, v) + for k, v in metadata.items() + if k.startswith('x-qn-meta-') + ) + + url_params_iter = chain.from_iterable( + (str(k), urlsafe_base64_encode(str(v))) + for k, v in url_params + ) + + return '/'.join( + chain( + url_base, + url_params_iter + ) + ) + + +# use dataclass instead namedtuple if min version of python update to 3.7 +_ResumeUploadV1Part = namedtuple( + 'ResumeUploadV1Part', + [ + 'part_no', + 'ctx', + 'expired_at', + ] +) + +_ResumeUploadV1Context = namedtuple( + 'ResumeUploadV1Context', + [ + 'up_hosts', + 'part_size', + 'parts', + 'modify_time', # the file last modify time + 'resumed' + ] +) diff --git a/qiniu/services/storage/uploaders/resume_uploader_v2.py b/qiniu/services/storage/uploaders/resume_uploader_v2.py new file mode 100644 index 00000000..3e165e2f --- /dev/null +++ b/qiniu/services/storage/uploaders/resume_uploader_v2.py @@ -0,0 +1,857 @@ +import functools +import math +from collections import namedtuple +from concurrent import futures +from io import BytesIO +from os import path +from threading import Lock +from time import time + +from qiniu.compat import is_seekable +from qiniu.auth import Auth +from qiniu.http import qn_http_client, ResponseInfo +from qiniu.http.endpoint import Endpoint +from qiniu.utils import b, io_md5, urlsafe_base64_encode +from qiniu.compat import json + +from ._default_retrier import ProgressRecord, get_default_retrier +from .abc import ResumeUploaderBase +from .io_chunked import IOChunked + + +class ResumeUploaderV2(ResumeUploaderBase): + def _recover_from_record( + self, + file_name, + key, + context + ): + """ + + Parameters + ---------- + file_name: str + key: str + context: _ResumeUploadV2Context + + Returns + ------- + _ResumeUploadV2Context + """ + if not isinstance(context, _ResumeUploadV2Context): + raise TypeError('"context" must be an instance of _ResumeUploadV2Context') + + if ( + not self.upload_progress_recorder or + not (file_name or key) + ): + return context + + record = self.upload_progress_recorder.get_upload_record( + file_name, + key + ) + + if not record: + return context + + record_up_hosts = record.get('up_hosts', []) + record_upload_id = record.get('upload_id', '') + record_expired_at = record.get('expired_at', 0) + record_part_size = record.get('part_size', None) + record_modify_time = record.get('modify_time', 0) + record_etags = record.get('etags', []) + + # compat with old sdk(<= v7.11.1) + if not record_up_hosts or not record_part_size: + return context + + if ( + not record_upload_id or + record_modify_time != context.modify_time or + record_expired_at < time() + ): + return context + + return context._replace( + up_hosts=record_up_hosts, + upload_id=record_upload_id, + expired_at=record_expired_at, + part_size=record_part_size, + parts=[ + _ResumeUploadV2Part( + part_no=p['partNumber'], + etag=p['etag'] + ) + for p in record_etags + if ( + p.get('partNumber', None) and + p.get('etag', None) + ) + ], + resumed=True + ) + + def _set_to_record(self, file_name, key, context): + """ + Parameters + ---------- + file_name: str + key: str + context: _ResumeUploadV2Context + + """ + if not self.upload_progress_recorder or not any([file_name, key]): + return + + record_data = { + 'up_hosts': context.up_hosts, + 'upload_id': context.upload_id, + 'expired_at': context.expired_at, + 'part_size': context.part_size, + 'modify_time': context.modify_time, + 'etags': [ + { + 'etag': part.etag, + 'partNumber': part.part_no + } + for part in context.parts + ] + } + self.upload_progress_recorder.set_upload_record( + file_name, + key, + data=record_data + ) + + def _try_delete_record( + self, + file_name, + key, + context=None, + resp=None + ): + """ + Parameters + ---------- + file_name: str + key: str + context: _ResumeUploadV2Context + resp: ResponseInfo + """ + if not self.upload_progress_recorder or not any([file_name, key]): + return + if resp and not resp.ok(): + return + self.upload_progress_recorder.delete_upload_record(file_name, key) + + def _progress_handler( + self, + file_name, + key, + context, + uploaded_size, + total_size + ): + """ + Parameters + ---------- + file_name: str + key: str + context: _ResumeUploadV2Context + uploaded_size: int + total_size: int + """ + self._set_to_record(file_name, key, context) + if not callable(self.progress_handler): + return + try: + self.progress_handler(uploaded_size, total_size) + except Exception as err: + err.no_need_retry = True + raise err + + def _initial_context( + self, + key, + file_name, + modify_time, + part_size + ): + context = _ResumeUploadV2Context( + up_hosts=[], + upload_id='', + expired_at=0, + part_size=part_size, + parts=[], + modify_time=modify_time, + resumed=False + ) + + # try to recover from record + + return self._recover_from_record( + file_name, + key, + context + ) + + def initial_parts( + self, + up_token, + key, + file_path=None, + data=None, + data_size=None, + modify_time=None, + part_size=None, + file_name=None, + up_endpoint=None, + **kwargs + ): + """ + + Parameters + ---------- + up_token: str + key: str + file_path: str + data: IOBase + data_size: int + modify_time: int + part_size: int + file_name: str + up_endpoint: Endpoint + kwargs + + Returns + ------- + ret: _ResumeUploadV2Context + resp: ResponseInfo + """ + # -- check and initial arguments + # must provide file_path or data + if not file_path and not data: + raise TypeError('Must provide one of file_path or data.') + if file_path and data: + raise TypeError('Must provide only one of file_path or data.') + + # data must has length + if not file_path and not data_size: + raise TypeError('Must provide size if use data.') + + if not modify_time: + if file_path: + modify_time = int(path.getmtime(file_path)) + else: + modify_time = int(time()) + + if not part_size: + part_size = self.part_size + + # -- initial context + if not file_name and file_path: + file_name = path.basename(file_path) + context = self._initial_context( + key=key, + file_name=file_name, + modify_time=modify_time, + part_size=part_size + ) + + if ( + context.up_hosts and + context.upload_id and + context.expired_at + ): + return context, None + + # -- get a new upload id + if not context.up_hosts and up_endpoint: + context.up_hosts.extend([up_endpoint.get_value(scheme=self.preferred_scheme)]) + + if not context.up_hosts: + access_key, _, _ = Auth.up_token_decode(up_token) + context.up_hosts.extend(self._get_up_hosts(access_key)) + + bucket_name = Auth.get_bucket_name(up_token) + + resp = None + for up_host in context.up_hosts: + url = self.__get_url_for_upload( + up_host, + bucket_name, + key + ) + ret, resp = qn_http_client.post( + url=url, + data='', + files=None, + headers={ + 'Authorization': 'UpToken {}'.format(up_token) + } + ) + if not resp.ok() and not resp.need_retry(): + break + if resp.ok() and ret: + context = context._replace( + upload_id=ret.get('uploadId', ''), + expired_at=ret.get('expireAt', 0) + ) + break + + return context, resp + + def upload_parts( + self, + up_token, + data, + data_size, + context, + **kwargs + ): + """ + Parameters + ---------- + up_token: str + data + data_size: int + context: _ResumeUploadV2Context + kwargs + key, file_name + + Returns + ------- + part: _ResumeUploadV2Part + resp: ResponseInfo + + """ + # initial arguments + chunk_list = self.gen_chunk_list( + size=data_size, + chunk_size=context.part_size, + uploaded_chunk_no_list=[ + p.part_no for p in context.parts + ] + ) + up_hosts = list(context.up_hosts) + file_name = kwargs.get('file_name', None) + key = kwargs.get('key', None) + + # initial upload state + part, resp = None, None + uploaded_size = context.part_size * len(context.parts) + if math.ceil(data_size / context.part_size) in [p.part_no for p in context.parts]: + # if last part uploaded, should correct the uploaded size + uploaded_size += (data_size % context.part_size) - context.part_size + lock = Lock() + + if not self.concurrent_executor: + # upload sequentially + for chunk in chunk_list: + part, resp = self.__upload_part( + data=data, + chunk_info=chunk, + up_hosts=up_hosts, + up_token=up_token, + upload_id=context.upload_id, + key=key, + lock=lock + ) + if not resp.ok(): + return None, resp + context.parts.append(part) + uploaded_size += chunk.chunk_size + self._progress_handler( + file_name=file_name, + key=key, + context=context, + uploaded_size=uploaded_size, + total_size=data_size + ) + else: + # upload concurrently + future_chunk_dict = {} + for chunk in chunk_list: + ftr = self.concurrent_executor.submit( + self.__upload_part, + data=data, + chunk_info=chunk, + up_hosts=up_hosts, + up_token=up_token, + upload_id=context.upload_id, + key=key, + lock=lock + ) + future_chunk_dict[ftr] = chunk + + first_failed_resp = None + for ftr in futures.as_completed(future_chunk_dict): + if ftr.cancelled(): + continue + elif ftr.exception(): + if first_failed_resp: + continue + first_failed_resp = ResponseInfo(None, ftr.exception()) + for not_done in filter(lambda f: not f.done(), future_chunk_dict): + not_done.cancel() + else: + part, resp = ftr.result() + if not part: + if not first_failed_resp: + first_failed_resp = resp + for not_done in filter(lambda f: not f.done(), future_chunk_dict): + not_done.cancel() + else: + context.parts.append(part) + uploaded_size += future_chunk_dict[ftr].chunk_size + self._progress_handler( + file_name=file_name, + key=key, + context=context, + uploaded_size=uploaded_size, + total_size=data_size + ) + if first_failed_resp: + return None, first_failed_resp + + return part, resp + + def complete_parts( + self, + up_token, + data_size, + context, + **kwargs + ): + """ + Parameters + ---------- + up_token: str + data_size: int + context: _ResumeUploadV2Context + kwargs + key, file_name, params, metadata + Returns + ------- + ret: dict + resp: ResponseInfo + """ + key = kwargs.get('key', None) + file_name = kwargs.get('file_name', None) + mime_type = kwargs.get('mime_type', None) + params = kwargs.get('params', None) + metadata = kwargs.get('metadata', None) + + # sort contexts + sorted_parts = sorted(context.parts, key=lambda part: part.part_no) + + bucket_name = Auth.get_bucket_name(up_token) + + ret, resp = None, None + for up_host in context.up_hosts: + url = self.__get_url_for_upload( + up_host, + bucket_name, + key, + upload_id=context.upload_id + ) + data = { + 'parts': [ + { + 'etag': p.etag, + 'partNumber': p.part_no + } + for p in sorted_parts + ], + 'fname': file_name, + 'mimeType': mime_type, + 'customVars': params, + 'metadata': metadata + } + ret, resp = qn_http_client.post( + url=url, + data=json.dumps(data), + files=None, + headers={ + 'Content-Type': 'application/json', + 'Authorization': 'UpToken {}'.format(up_token) + } + ) + if resp.ok() or not resp.need_retry(): + break + self._try_delete_record( + file_name, + key, + context, + resp + ) + return ret, resp + + def __upload_with_retrier( + self, + access_key, + bucket_name, + **upload_opts + ): + file_name = upload_opts.get('file_name', None) + key = upload_opts.get('key', None) + modify_time = upload_opts.get('modify_time', None) + part_size = upload_opts.get('part_size', self.part_size) + + context = self._initial_context( + key=key, + file_name=file_name, + modify_time=modify_time, + part_size=part_size + ) + preferred_endpoints = None + if context.up_hosts: + preferred_endpoints = [ + Endpoint.from_host(h) + for h in context.up_hosts + ] + + progress_record = None + if all( + [ + self.upload_progress_recorder, + file_name, + key + ] + ): + progress_record = ProgressRecord( + upload_api_version='v1', + exists=functools.partial( + self.upload_progress_recorder.has_upload_record, + file_name=file_name, + key=key + ), + delete=functools.partial( + self.upload_progress_recorder.delete_upload_record, + file_name=file_name, + key=key + ) + ) + + retrier = get_default_retrier( + regions_provider=self._get_regions_provider( + access_key=access_key, + bucket_name=bucket_name + ), + preferred_endpoints_provider=preferred_endpoints, + progress_record=progress_record, + accelerate_uploading=self.accelerate_uploading + ) + + data = upload_opts.get('data') + attempt = None + for attempt in retrier: + with attempt: + upload_opts['up_endpoint'] = attempt.context.get('endpoint') + attempt.result = self.__upload( + **upload_opts + ) + ret, resp = attempt.result + if resp.ok() and ret: + return attempt.result + if ( + not is_seekable(data) or + not resp.need_retry() + ): + return attempt.result + data.seek(0) + + if attempt is None: + raise RuntimeError('Retrier is not working. attempt is None') + + return attempt.result + + def __upload( + self, + up_token, + key, + file_path, + file_name, + data, + data_size, + part_size, + modify_time, + mime_type, + custom_vars, + metadata, + up_endpoint + ): + # initial_parts + context, resp = self.initial_parts( + up_token, + key, + file_path=file_path, + file_name=file_name, + data=data, + data_size=data_size, + modify_time=modify_time, + part_size=part_size, + up_endpoint=up_endpoint + ) + + if ( + not context.up_hosts or + not context.upload_id or + not context.expired_at + ): + return None, resp + + # upload parts + try: + if file_path: + data_size = path.getsize(file_path) + data = open(file_path, 'rb') + elif isinstance(data, bytes): + data_size = len(data) + data = BytesIO(data) + elif isinstance(data, str): + data_size = len(data) + data = BytesIO(b(data)) + ret, resp = self.upload_parts( + up_token=up_token, + data=data, + data_size=data_size, + context=context, + + key=key, + file_name=file_name + ) + finally: + if file_path: + data.close() + + if resp and not resp.ok(): + return ret, resp + + # complete parts + ret, resp = self.complete_parts( + up_token=up_token, + data_size=data_size, + context=context, + + key=key, + mime_type=mime_type, + file_name=file_name, + params=custom_vars, + metadata=metadata + ) + + return ret, resp + + def upload( + self, + key, + file_path=None, + data=None, + data_size=None, + + part_size=None, + modify_time=None, + mime_type=None, + metadata=None, + file_name=None, + custom_vars=None, + **kwargs + ): + """ + Parameters + ---------- + key: str + file_path: str + data: IOBase + data_size: int + part_size: int + modify_time: int + mime_type: str + metadata: dict + file_name: str + custom_vars: dict + kwargs + up_token: str + bucket_name: str, + expired: int, + policy: dict, + strict_policy: bool + + Returns + ------- + ret: dict + resp: ResponseInfo + """ + # up_token + up_token = kwargs.get('up_token', None) + if not up_token: + kwargs.setdefault('up_token', self.get_up_token(**kwargs)) + access_key = self.auth.get_access_key() + else: + access_key, _, _ = Auth.up_token_decode(up_token) + + # bucket_name + kwargs['bucket_name'] = Auth.get_bucket_name(up_token) + + # file_name + if not file_name and file_path: + file_name = path.basename(file_path) + + # upload + return self.__upload_with_retrier( + access_key=access_key, + key=key, + file_path=file_path, + file_name=file_name, + data=data, + data_size=data_size, + part_size=part_size, + modify_time=modify_time, + mime_type=mime_type, + custom_vars=custom_vars, + metadata=metadata, + **kwargs + ) + + def __get_url_for_upload( + self, + up_host, + bucket_name, + key, + upload_id=None, + part_no=None, + ): + """ + Parameters + ---------- + up_host: str + bucket_name: str + key: str + upload_id: str + part_no: int + + Returns + ------- + str + """ + if not bucket_name: + bucket_name = self.bucket_name + + object_entry = '~' + if key: + object_entry = urlsafe_base64_encode(key) + + url_segs = [ + up_host, + 'buckets', bucket_name, + 'objects', object_entry, + 'uploads', + ] + + if upload_id: + url_segs.append(upload_id) + + if part_no: + url_segs.append(str(part_no)) + + return '/'.join(url_segs) + + def __upload_part( + # resort arguments + self, + data, + chunk_info, + up_hosts, + up_token, + upload_id, + key, + lock + ): + """ + Parameters + ---------- + data: IOBase + chunk_info: ChunkInfo + up_hosts: list[str] + up_token: str + upload_id: str + key: str + lock: Lock + + Returns + ------- + part: _ResumeUploadV2Part + resp: ResponseInfo + """ + if not up_hosts: + raise ValueError('Must provide on up host at least') + + bucket_name = Auth.get_bucket_name(up_token) + if not bucket_name: + bucket_name = self.bucket_name + + chunked_data = IOChunked( + base_io=data, + chunk_offset=chunk_info.chunk_offset, + chunk_size=chunk_info.chunk_size, + lock=lock + ) + chunk_md5 = io_md5(chunked_data) + chunked_data.seek(0) + part, resp = None, None + for up_host in up_hosts: + url = self.__get_url_for_upload( + up_host, + bucket_name, + key, + upload_id=upload_id, + part_no=chunk_info.chunk_no + ) + ret, resp = qn_http_client.put( + url=url, + data=chunked_data, + files=None, + headers={ + 'Content-Type': 'application/octet-stream', + 'Content-MD5': chunk_md5, + 'Authorization': 'UpToken {}'.format(up_token) + } + ) + if resp.ok() and ret: + part = _ResumeUploadV2Part( + part_no=chunk_info.chunk_no, + etag=ret.get('etag', '') + ) + return part, resp + if ( + not is_seekable(chunked_data) or + not resp.need_retry() + ): + return part, resp + chunked_data.seek(0) + return part, resp + + +# use dataclass instead namedtuple if min version of python update to 3.7 +_ResumeUploadV2Part = namedtuple( + 'ResumeUploadV2Part', + [ + 'part_no', + 'etag' + ] +) + +_ResumeUploadV2Context = namedtuple( + 'ResumeUploadV2Context', + [ + 'up_hosts', + 'upload_id', + 'expired_at', + 'part_size', + 'parts', + 'modify_time', + 'resumed' + ] +) diff --git a/qiniu/test/conf_test.py b/qiniu/test/conf_test.py deleted file mode 100644 index daf0ed3b..00000000 --- a/qiniu/test/conf_test.py +++ /dev/null @@ -1,10 +0,0 @@ -# -*- coding: utf-8 -*- -import unittest -from qiniu import conf - -class TestConfig(unittest.TestCase): - def test_USER_AGENT(self): - assert len(conf.USER_AGENT) >= len('qiniu python-sdk') - -if __name__ == '__main__': - unittest.main() diff --git a/qiniu/test/fop_test.py b/qiniu/test/fop_test.py deleted file mode 100644 index 43741fd8..00000000 --- a/qiniu/test/fop_test.py +++ /dev/null @@ -1,32 +0,0 @@ -# -*- coding:utf-8 -*- -import unittest -import os -from qiniu import fop - -pic = "http://cheneya.qiniudn.com/hello_jpg" - -class TestFop(unittest.TestCase): - def test_exif(self): - ie = fop.Exif() - ret = ie.make_request(pic) - self.assertEqual(ret, "%s?exif" % pic) - - def test_imageView(self): - iv = fop.ImageView() - iv.height = 100 - ret = iv.make_request(pic) - self.assertEqual(ret, "%s?imageView/1/h/100" % pic) - - iv.quality = 20 - iv.format = "png" - ret = iv.make_request(pic) - self.assertEqual(ret, "%s?imageView/1/h/100/q/20/format/png" % pic) - - def test_imageInfo(self): - ii = fop.ImageInfo() - ret = ii.make_request(pic) - self.assertEqual(ret, "%s?imageInfo" % pic) - - -if __name__ == '__main__': - unittest.main() diff --git a/qiniu/test/io_test.py b/qiniu/test/io_test.py deleted file mode 100644 index 503dce5e..00000000 --- a/qiniu/test/io_test.py +++ /dev/null @@ -1,181 +0,0 @@ -# -*- coding: utf-8 -*- -import os -import unittest -import string -import random -import urllib -try: - import zlib as binascii -except ImportError: - import binascii -import cStringIO - -from qiniu import conf -from qiniu import rs -from qiniu import io - -conf.ACCESS_KEY = os.getenv("QINIU_ACCESS_KEY") -conf.SECRET_KEY = os.getenv("QINIU_SECRET_KEY") -bucket_name = os.getenv("QINIU_TEST_BUCKET") - -policy = rs.PutPolicy(bucket_name) -extra = io.PutExtra() -extra.mime_type = "text/plain" -extra.params = {'x:a':'a'} - -def r(length): - lib = string.ascii_uppercase - return ''.join([random.choice(lib) for i in range(0, length)]) - -class TestUp(unittest.TestCase): - def test(self): - def test_put(): - key = "test_%s" % r(9) - params = "op=3" - data = "hello bubby!" - extra.check_crc = 2 - extra.crc32 = binascii.crc32(data) & 0xFFFFFFFF - ret, err = io.put(policy.token(), key, data, extra) - assert err is None - assert ret['key'] == key - - def test_put_same_crc(): - key = "test_%s" % r(9) - data = "hello bubby!" - extra.check_crc = 2 - ret, err = io.put(policy.token(), key, data, extra) - assert err is None - assert ret['key'] == key - - def test_put_no_key(): - data = r(100) - extra.check_crc = 0 - ret, err = io.put(policy.token(), key=None, data=data, extra=extra) - assert err is None - assert ret['hash'] == ret['key'] - - def test_put_quote_key(): - data = r(100) - key = 'a\\b\\c"你好' + r(9) - ret, err = io.put(policy.token(), key, data) - print err - assert err is None - assert ret['key'].encode('utf8') == key - - data = r(100) - key = u'a\\b\\c"你好' + r(9) - ret, err = io.put(policy.token(), key, data) - assert err is None - assert ret['key'] == key - - def test_put_unicode1(): - key = "test_%s" % r(9) + '你好' - data = key - ret, err = io.put(policy.token(), key, data, extra) - assert err is None - assert ret[u'key'].endswith(u'你好') - - def test_put_unicode2(): - key = "test_%s" % r(9) + '你好' - data = key - data = data.decode('utf8') - ret, err = io.put(policy.token(), key, data) - assert err is None - assert ret[u'key'].endswith(u'你好') - - def test_put_unicode3(): - key = "test_%s" % r(9) + '你好' - data = key - key = key.decode('utf8') - ret, err = io.put(policy.token(), key, data) - assert err is None - assert ret[u'key'].endswith(u'你好') - - def test_put_unicode4(): - key = "test_%s" % r(9) + '你好' - data = key - key = key.decode('utf8') - data = data.decode('utf8') - ret, err = io.put(policy.token(), key, data) - assert err is None - assert ret[u'key'].endswith(u'你好') - - def test_put_StringIO(): - key = "test_%s" % r(9) - data = cStringIO.StringIO('hello buddy!') - ret, err = io.put(policy.token(), key, data) - assert err is None - assert ret['key'] == key - - def test_put_urlopen(): - key = "test_%s" % r(9) - data = urllib.urlopen('http://cheneya.qiniudn.com/hello_jpg') - ret, err = io.put(policy.token(), key, data) - assert err is None - assert ret['key'] == key - - def test_put_no_length(): - class test_reader(object): - def __init__(self): - self.data = 'abc' - self.pos = 0 - def read(self, n=None): - if n is None or n < 0: - newpos = len(self.data) - else: - newpos = min(self.pos+n, len(self.data)) - r = self.data[self.pos: newpos] - self.pos = newpos - return r - key = "test_%s" % r(9) - data = test_reader() - - extra.check_crc = 2 - extra.crc32 = binascii.crc32('abc') & 0xFFFFFFFF - ret, err = io.put(policy.token(), key, data, extra) - assert err is None - assert ret['key'] == key - - test_put() - test_put_same_crc() - test_put_no_key() - test_put_quote_key() - test_put_unicode1() - test_put_unicode2() - test_put_unicode3() - test_put_unicode4() - test_put_StringIO() - test_put_urlopen() - test_put_no_length() - - def test_put_file(self): - localfile = "%s" % __file__ - key = "test_%s" % r(9) - - extra.check_crc = 1 - ret, err = io.put_file(policy.token(), key, localfile, extra) - assert err is None - assert ret['key'] == key - - def test_put_crc_fail(self): - key = "test_%s" % r(9) - data = "hello bubby!" - extra.check_crc = 2 - extra.crc32 = "wrong crc32" - ret, err = io.put(policy.token(), key, data, extra) - assert err is not None - - -class Test_get_file_crc32(unittest.TestCase): - def test_get_file_crc32(self): - file_path = '%s' % __file__ - - data = None - with open(file_path, 'rb') as f: - data = f.read() - io._BLOCK_SIZE = 4 - assert binascii.crc32(data) & 0xFFFFFFFF == io._get_file_crc32(file_path) - - -if __name__ == "__main__": - unittest.main() diff --git a/qiniu/test/resumable_io_test.py b/qiniu/test/resumable_io_test.py deleted file mode 100644 index 19fbbf14..00000000 --- a/qiniu/test/resumable_io_test.py +++ /dev/null @@ -1,82 +0,0 @@ -# -*- coding: utf-8 -*- -import os -import unittest -import string -import random -import platform -try: - import zlib as binascii -except ImportError: - import binascii -import urllib -import tempfile -import shutil - -from qiniu import conf -from qiniu.auth import up -from qiniu import resumable_io -from qiniu import rs - -bucket = os.getenv("QINIU_TEST_BUCKET") -conf.ACCESS_KEY = os.getenv("QINIU_ACCESS_KEY") -conf.SECRET_KEY = os.getenv("QINIU_SECRET_KEY") - - -def r(length): - lib = string.ascii_uppercase - return ''.join([random.choice(lib) for i in range(0, length)]) - -class TestBlock(unittest.TestCase): - def test_block(self): - policy = rs.PutPolicy(bucket) - uptoken = policy.token() - client = up.Client(uptoken) - - rets = [0, 0] - data_slice_2 = "\nbye!" - ret, err = resumable_io.mkblock(client, len(data_slice_2), data_slice_2) - assert err is None, err - self.assertEqual(ret["crc32"], binascii.crc32(data_slice_2)) - - extra = resumable_io.PutExtra(bucket) - extra.mimetype = "text/plain" - extra.progresses = [ret] - lens = 0 - for i in xrange(0, len(extra.progresses)): - lens += extra.progresses[i]["offset"] - - key = u"sdk_py_resumable_block_4_%s" % r(9) - ret, err = resumable_io.mkfile(client, key, lens, extra) - assert err is None, err - self.assertEqual(ret["hash"], "FtCFo0mQugW98uaPYgr54Vb1QsO0", "hash not match") - rs.Client().delete(bucket, key) - - def test_put(self): - src = urllib.urlopen("http://cheneya.qiniudn.com/hello_jpg") - ostype = platform.system() - if ostype.lower().find("windows") != -1: - tmpf = "".join([os.getcwd(), os.tmpnam()]) - else: - tmpf = os.tmpnam() - dst = open(tmpf, 'wb') - shutil.copyfileobj(src, dst) - src.close() - - policy = rs.PutPolicy(bucket) - extra = resumable_io.PutExtra(bucket) - extra.bucket = bucket - extra.params = {"x:foo": "test"} - key = "sdk_py_resumable_block_5_%s" % r(9) - localfile = dst.name - ret, err = resumable_io.put_file(policy.token(), key, localfile, extra) - assert ret.get("x:foo") == "test", "return data not contains 'x:foo'" - dst.close() - os.remove(tmpf) - - assert err is None, err - self.assertEqual(ret["hash"], "FnyTMUqPNRTdk1Wou7oLqDHkBm_p", "hash not match") - rs.Client().delete(bucket, key) - - -if __name__ == "__main__": - unittest.main() diff --git a/qiniu/test/rpc_test.py b/qiniu/test/rpc_test.py deleted file mode 100644 index db35b11a..00000000 --- a/qiniu/test/rpc_test.py +++ /dev/null @@ -1,158 +0,0 @@ -# -*- coding: utf-8 -*- -import StringIO -import unittest - -from qiniu import rpc -from qiniu import conf - -def round_tripper(client, method, path, body): - pass - -class ClsTestClient(rpc.Client): - def round_tripper(self, method, path, body): - round_tripper(self, method, path, body) - return super(ClsTestClient, self).round_tripper(method, path, body) - -client = ClsTestClient(conf.RS_HOST) - -class TestClient(unittest.TestCase): - def test_call(self): - global round_tripper - - def tripper(client, method, path, body): - self.assertEqual(path, "/hello") - assert body is None - - round_tripper = tripper - client.call("/hello") - - def test_call_with(self): - global round_tripper - def tripper(client, method, path, body): - self.assertEqual(body, "body") - - round_tripper = tripper - client.call_with("/hello", "body") - - def test_call_with_multipart(self): - global round_tripper - def tripper(client, method, path, body): - target_type = "multipart/form-data" - self.assertTrue(client._header["Content-Type"].startswith(target_type)) - start_index = client._header["Content-Type"].find("boundary") - boundary = client._header["Content-Type"][start_index + 9: ] - dispostion = 'Content-Disposition: form-data; name="auth"' - tpl = "--%s\r\n%s\r\n\r\n%s\r\n--%s--\r\n" % (boundary, dispostion, - "auth_string", boundary) - self.assertEqual(len(tpl), client._header["Content-Length"]) - self.assertEqual(len(tpl), body.length()) - - round_tripper = tripper - client.call_with_multipart("/hello", fields={"auth": "auth_string"}) - - def test_call_with_form(self): - global round_tripper - def tripper(client, method, path, body): - self.assertEqual(body, "action=a&op=a&op=b") - target_type = "application/x-www-form-urlencoded" - self.assertEqual(client._header["Content-Type"], target_type) - self.assertEqual(client._header["Content-Length"], len(body)) - - round_tripper = tripper - client.call_with_form("/hello", dict(op=["a", "b"], action="a")) - - -class TestMultiReader(unittest.TestCase): - def test_multi_reader1(self): - a = StringIO.StringIO('你好') - b = StringIO.StringIO('abcdefg') - c = StringIO.StringIO(u'悲剧') - mr = rpc.MultiReader([a, b, c]) - data = mr.read() - assert data.index('悲剧') > data.index('abcdefg') - - def test_multi_reader2(self): - a = StringIO.StringIO('你好') - b = StringIO.StringIO('abcdefg') - c = StringIO.StringIO(u'悲剧') - mr = rpc.MultiReader([a, b, c]) - data = mr.read(8) - assert len(data) is 8 - - -def encode_multipart_formdata2(fields, files): - if files is None: - files = [] - if fields is None: - fields = [] - - BOUNDARY = '----------ThIs_Is_tHe_bouNdaRY_$' - CRLF = '\r\n' - L = [] - for (key, value) in fields: - L.append('--' + BOUNDARY) - L.append('Content-Disposition: form-data; name="%s"' % key) - L.append('') - L.append(value) - for (key, filename, value) in files: - L.append('--' + BOUNDARY) - disposition = "Content-Disposition: form-data;" - L.append('%s name="%s"; filename="%s"' % (disposition, key, filename)) - L.append('Content-Type: application/octet-stream') - L.append('') - L.append(value) - L.append('--' + BOUNDARY + '--') - L.append('') - body = CRLF.join(L) - content_type = 'multipart/form-data; boundary=%s' % BOUNDARY - return content_type, body - - -class TestEncodeMultipartFormdata(unittest.TestCase): - def test_encode(self): - fields = {'a': '1', 'b': '2'} - files = [ - { - 'filename': 'key1', - 'data': 'data1', - 'mime_type': 'application/octet-stream', - }, - { - 'filename': 'key2', - 'data': 'data2', - 'mime_type': 'application/octet-stream', - } - ] - content_type, mr = rpc.Client('localhost').encode_multipart_formdata(fields, files) - t, b = encode_multipart_formdata2( - [('a', '1'), ('b', '2')], - [('file', 'key1', 'data1'), ('file', 'key2', 'data2')] - ) - assert t == content_type - assert len(b) == mr.length() - - def test_unicode(self): - def test1(): - files = [{'filename': '你好', 'data': '你好', 'mime_type': ''}] - _, body = rpc.Client('localhost').encode_multipart_formdata(None, files) - return len(body.read()) - def test2(): - files = [{'filename': u'你好', 'data': '你好', 'mime_type': ''}] - _, body = rpc.Client('localhost').encode_multipart_formdata(None, files) - return len(body.read()) - def test3(): - files = [{'filename': '你好', 'data': u'你好', 'mime_type': ''}] - _, body = rpc.Client('localhost').encode_multipart_formdata(None, files) - return len(body.read()) - def test4(): - files = [{'filename': u'你好', 'data': u'你好', 'mime_type': ''}] - _, body = rpc.Client('localhost').encode_multipart_formdata(None, files) - return len(body.read()) - - assert test1() == test2() - assert test2() == test3() - assert test3() == test4() - - -if __name__ == "__main__": - unittest.main() diff --git a/qiniu/test/rsf_test.py b/qiniu/test/rsf_test.py deleted file mode 100644 index 3f0c5fd7..00000000 --- a/qiniu/test/rsf_test.py +++ /dev/null @@ -1,20 +0,0 @@ -# -*- coding: utf-8 -*- -import unittest -from qiniu import rsf -from qiniu import conf - -import os -conf.ACCESS_KEY = os.getenv("QINIU_ACCESS_KEY") -conf.SECRET_KEY = os.getenv("QINIU_SECRET_KEY") -bucket_name = os.getenv("QINIU_TEST_BUCKET") - -class TestRsf(unittest.TestCase): - def test_list_prefix(self): - c = rsf.Client() - ret, err = c.list_prefix(bucket_name, limit = 4) - self.assertEqual(err is rsf.EOF or err is None, True) - assert len(ret.get('items')) == 4 - - -if __name__ == "__main__": - unittest.main() diff --git a/qiniu/utils.py b/qiniu/utils.py new file mode 100644 index 00000000..197b8813 --- /dev/null +++ b/qiniu/utils.py @@ -0,0 +1,266 @@ +# -*- coding: utf-8 -*- +from hashlib import sha1, new as hashlib_new +from base64 import urlsafe_b64encode, urlsafe_b64decode +from datetime import datetime, tzinfo, timedelta + +from .compat import b, s + +try: + import zlib + + binascii = zlib +except ImportError: + zlib = None + import binascii + +_BLOCK_SIZE = 1024 * 1024 * 4 + + +def urlsafe_base64_encode(data): + """urlsafe的base64编码: + + 对提供的数据进行urlsafe的base64编码。规格参考: + https://developer.qiniu.com/kodo/manual/1231/appendix#1 + + Args: + data: 待编码的数据,一般为字符串 + + Returns: + 编码后的字符串 + """ + ret = urlsafe_b64encode(b(data)) + return s(ret) + + +def urlsafe_base64_decode(data): + """urlsafe的base64解码: + + 对提供的urlsafe的base64编码的数据进行解码 + + Args: + data: 待解码的数据,一般为字符串 + + Returns: + 解码后的字符串。 + """ + ret = urlsafe_b64decode(s(data)) + return ret + + +def file_crc32(filePath): + """计算文件的crc32检验码: + + Args: + filePath: 待计算校验码的文件路径 + + Returns: + 文件内容的crc32校验码。 + """ + crc = 0 + with open(filePath, 'rb') as f: + for block in _file_iter(f, _BLOCK_SIZE): + crc = binascii.crc32(block, crc) & 0xFFFFFFFF + return crc + + +def io_crc32(io_data): + result = 0 + for d in io_data: + result = binascii.crc32(d, result) & 0xFFFFFFFF + return result + + +def io_md5(io_data): + h = hashlib_new('md5') + for d in io_data: + h.update(d) + return h.hexdigest() + + +def crc32(data): + """计算输入流的crc32检验码: + + Args: + data: 待计算校验码的字符流 + + Returns: + 输入流的crc32校验码。 + """ + return binascii.crc32(b(data)) & 0xffffffff + + +def _file_iter(input_stream, size, offset=0): + """读取输入流: + + Args: + input_stream: 待读取文件的二进制流 + size: 二进制流的大小 + + Raises: + IOError: 文件流读取失败 + """ + input_stream.seek(offset) + d = input_stream.read(size) + while d: + yield d + d = input_stream.read(size) + input_stream.seek(0) + + +def _sha1(data): + """单块计算hash: + + Args: + data: 待计算hash的数据 + + Returns: + 输入数据计算的hash值 + """ + h = sha1() + h.update(data) + return h.digest() + + +def etag_stream(input_stream): + """ + 计算输入流的etag + + .. deprecated:: + 在 v2 分片上传使用 4MB 以外分片大小时无法正常工作 + + Parameters + ---------- + input_stream: io.IOBase + 支持随机访问的文件型对象 + + Returns + ------- + str + + """ + array = [_sha1(block) for block in _file_iter(input_stream, _BLOCK_SIZE)] + if len(array) == 0: + array = [_sha1(b'')] + if len(array) == 1: + data = array[0] + prefix = b'\x16' + else: + sha1_str = b('').join(array) + data = _sha1(sha1_str) + prefix = b'\x96' + return urlsafe_base64_encode(prefix + data) + + +def etag(filePath): + """ + 计算文件的etag: + + .. deprecated:: + 在 v2 分片上传使用 4MB 以外分片大小时无法正常工作 + + + Parameters + ---------- + filePath: str + 待计算 etag 的文件路径 + + Returns + ------- + str + 输入文件的etag值 + """ + with open(filePath, 'rb') as f: + return etag_stream(f) + + +def entry(bucket, key): + """计算七牛API中的数据格式: + + entry规格参考 https://developer.qiniu.com/kodo/api/1276/data-format + + Args: + bucket: 待操作的空间名 + key: 待操作的文件名 + + Returns: + 符合七牛API规格的数据格式 + """ + if key is None: + return urlsafe_base64_encode('{0}'.format(bucket)) + else: + return urlsafe_base64_encode('{0}:{1}'.format(bucket, key)) + + +def decode_entry(e): + return (s(urlsafe_base64_decode(e)).split(':') + [None] * 2)[:2] + + +def rfc_from_timestamp(timestamp): + """将时间戳转换为HTTP RFC格式 + + Args: + timestamp: 整型Unix时间戳(单位秒) + """ + last_modified_date = datetime.utcfromtimestamp(timestamp) + last_modified_str = last_modified_date.strftime( + '%a, %d %b %Y %H:%M:%S GMT') + return last_modified_str + + +def _valid_header_key_char(ch): + is_token_table = [ + "!", "#", "$", "%", "&", "\\", "*", "+", "-", ".", + "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", + "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", + "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", + "U", "W", "V", "X", "Y", "Z", + "^", "_", "`", + "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", + "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", + "u", "v", "w", "x", "y", "z", + "|", "~"] + return 0 <= ord(ch) < 128 and ch in is_token_table + + +def canonical_mime_header_key(field_name): + for ch in field_name: + if not _valid_header_key_char(ch): + return field_name + result = "" + upper = True + for ch in field_name: + if upper and "a" <= ch <= "z": + result += ch.upper() + elif not upper and "A" <= ch <= "Z": + result += ch.lower() + else: + result += ch + upper = ch == "-" + return result + + +class _UTC_TZINFO(tzinfo): + def utcoffset(self, dt): + return timedelta(hours=0) + + def tzname(self, dt): + return "UTC" + + def dst(self, dt): + return timedelta(0) + + +def dt2ts(dt): + """ + converte datetime to timestamp + + Parameters + ---------- + dt: datetime.datetime + """ + if not dt.tzinfo: + st = (dt - datetime(1970, 1, 1)).total_seconds() + else: + st = (dt - datetime(1970, 1, 1, tzinfo=_UTC_TZINFO())).total_seconds() + + return int(st) diff --git a/qiniu/zone.py b/qiniu/zone.py new file mode 100644 index 00000000..0a213eaa --- /dev/null +++ b/qiniu/zone.py @@ -0,0 +1,7 @@ +# -*- coding: utf-8 -*- + +from qiniu.region import LegacyRegion + + +class Zone(LegacyRegion): + pass diff --git a/setup.py b/setup.py index 734d7d6e..fa920d45 100644 --- a/setup.py +++ b/setup.py @@ -1,44 +1,75 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- -try: - from setuptools import setup -except ImportError: - from distutils.core import setup - -PACKAGE = 'qiniu' -NAME = 'qiniu' -DESCRIPTION = 'Qiniu Resource Storage SDK for Python 2.X.' -LONG_DESCRIPTION = 'see:\nhttps://github.com/qiniu/python-sdk\n' -AUTHOR = 'Shanghai Qiniu Information Technologies Co., Ltd.' -AUTHOR_EMAIL = 'support@qiniu.com' -MAINTAINER_EMAIL = 'fengliyuan@qiniu.com' -URL = 'https://github.com/qiniu/python-sdk' -VERSION = __import__(PACKAGE).__version__ +import io +import os +import re + +from setuptools import setup, find_packages + + +def read(*names, **kwargs): + return io.open( + os.path.join(os.path.dirname(__file__), *names), + encoding=kwargs.get("encoding", "utf8") + ).read() + + +def find_version(*file_paths): + version_file = read(*file_paths) + version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", + version_file, re.M) + if version_match: + return version_match.group(1) + raise RuntimeError("Unable to find version string.") setup( - name=NAME, - version=VERSION, - description=DESCRIPTION, - long_description=LONG_DESCRIPTION, - author=AUTHOR, - author_email=AUTHOR_EMAIL, - maintainer_email=MAINTAINER_EMAIL, - license='MIT', - url=URL, - packages=['qiniu', 'qiniu.test', 'qiniu.auth', 'qiniu.rs', 'qiniu.rs.test'], - platforms='any', - classifiers=[ - 'Intended Audience :: Developers', - 'License :: OSI Approved :: MIT License', - 'Operating System :: OS Independent', - 'Programming Language :: Python', - 'Programming Language :: Python :: 2', - 'Programming Language :: Python :: 2.6', - 'Programming Language :: Python :: 2.7', - 'Topic :: Internet :: WWW/HTTP :: Dynamic Content', - 'Topic :: Software Development :: Libraries :: Python Modules' - ], - test_suite = 'nose.collector' + name='qiniu', + version=find_version("qiniu/__init__.py"), + description='Qiniu Resource Storage SDK', + long_description='see:\nhttps://github.com/qiniu/python-sdk\n', + author='Shanghai Qiniu Information Technologies Co., Ltd.', + author_email='sdk@qiniu.com', + maintainer_email='support@qiniu.com', + license='MIT', + url='https://github.com/qiniu/python-sdk', + platforms='any', + packages=find_packages(), + classifiers=[ + 'Intended Audience :: Developers', + 'License :: OSI Approved :: MIT License', + 'Operating System :: OS Independent', + 'Programming Language :: Python', + 'Programming Language :: Python :: 2', + 'Programming Language :: Python :: 2.7', + 'Programming Language :: Python :: 3', + 'Programming Language :: Python :: 3.4', + 'Programming Language :: Python :: 3.5', + 'Programming Language :: Python :: 3.6', + 'Programming Language :: Python :: 3.7', + 'Topic :: Internet :: WWW/HTTP :: Dynamic Content', + 'Topic :: Software Development :: Libraries :: Python Modules' + ], + install_requires=[ + 'requests; python_version >= "3.7"', + 'requests<2.28; python_version < "3.7"', + 'futures; python_version == "2.7"', + 'enum34; python_version == "2.7"' + ], + extras_require={ + 'dev': [ + 'coverage<7.2', + 'flake8', + 'pytest', + 'pytest-cov', + 'freezegun', + ] + }, + + entry_points={ + 'console_scripts': [ + 'qiniupy = qiniu.main:main', + ], + } ) diff --git a/test-env.sh b/test-env.sh deleted file mode 100644 index cb40c24b..00000000 --- a/test-env.sh +++ /dev/null @@ -1,4 +0,0 @@ -export QINIU_ACCESS_KEY="" -export QINIU_SECRET_KEY="" -export QINIU_TEST_BUCKET="" -export QINIU_TEST_DOMAIN="" diff --git a/test_qiniu.py b/test_qiniu.py new file mode 100644 index 00000000..c8dce456 --- /dev/null +++ b/test_qiniu.py @@ -0,0 +1,362 @@ +# -*- coding: utf-8 -*- +# flake8: noqa +import os +import string +import random +import tempfile +import functools + +import requests + +import unittest +import pytest +from freezegun import freeze_time + +from qiniu import Auth, set_default, etag, PersistentFop, build_op, op_save, Zone, QiniuMacAuth +from qiniu import BucketManager, build_batch_copy, build_batch_rename, build_batch_move, build_batch_stat, \ + build_batch_delete, DomainManager +from qiniu import urlsafe_base64_encode, urlsafe_base64_decode, canonical_mime_header_key, entry, decode_entry + +from qiniu.compat import is_py2, is_py3, b, json + +import qiniu.config + + +if is_py2: + import sys + import StringIO + import urllib + from imp import reload + + reload(sys) + sys.setdefaultencoding('utf-8') + StringIO = StringIO.StringIO + urlopen = urllib.urlopen +elif is_py3: + import io + import urllib + + StringIO = io.StringIO + urlopen = urllib.request.urlopen + +access_key = os.getenv('QINIU_ACCESS_KEY') +secret_key = os.getenv('QINIU_SECRET_KEY') +bucket_name = os.getenv('QINIU_TEST_BUCKET') +hostscache_dir = None + + +def rand_string(length): + lib = string.ascii_uppercase + return ''.join([random.choice(lib) for i in range(0, length)]) + + +def create_temp_file(size): + t = tempfile.mktemp() + f = open(t, 'wb') + f.seek(size - 1) + f.write(b('0')) + f.close() + return t + + +def remove_temp_file(file): + try: + os.remove(file) + except OSError: + pass + +class BucketTestCase(unittest.TestCase): + q = Auth(access_key, secret_key) + bucket = BucketManager(q) + + def test_list(self): + ret, eof, info = self.bucket.list(bucket_name, limit=4) + assert eof is False + assert len(ret.get('items')) == 4 + ret, eof, info = self.bucket.list(bucket_name, limit=1000) + assert info.status_code == 200, info + + def test_buckets(self): + ret, info = self.bucket.buckets() + print(info) + assert bucket_name in ret + + def test_prefetch(self): + ret, info = self.bucket.prefetch(bucket_name, 'python-sdk.html', hostscache_dir=hostscache_dir) + print(info) + assert ret['key'] == 'python-sdk.html' + + def test_fetch(self): + ret, info = self.bucket.fetch('https://developer.qiniu.com/kodo/sdk/python', bucket_name, + 'fetch.html', hostscache_dir=hostscache_dir) + print(info) + assert ret['key'] == 'fetch.html' + assert 'hash' in ret + + def test_fetch_without_key(self): + ret, info = self.bucket.fetch('https://developer.qiniu.com/kodo/sdk/python', bucket_name, + hostscache_dir=hostscache_dir) + print(info) + assert ret['key'] == ret['hash'] + assert 'hash' in ret + + def test_stat(self): + ret, info = self.bucket.stat(bucket_name, 'python-sdk.html') + print(info) + assert 'hash' in ret + + def test_delete(self): + ret, info = self.bucket.delete(bucket_name, 'del') + print(info) + assert ret is None + assert info.status_code == 612 + + def test_rename(self): + key = 'renameto' + rand_string(8) + self.bucket.copy(bucket_name, 'copyfrom', bucket_name, key) + key2 = key + 'move' + ret, info = self.bucket.rename(bucket_name, key, key2) + print(info) + assert ret == {} + ret, info = self.bucket.delete(bucket_name, key2) + print(info) + assert ret == {} + + def test_copy(self): + key = 'copyto' + rand_string(8) + ret, info = self.bucket.copy(bucket_name, 'copyfrom', bucket_name, key) + print(info) + assert ret == {} + ret, info = self.bucket.delete(bucket_name, key) + print(info) + assert ret == {} + + def test_change_mime(self): + ret, info = self.bucket.change_mime(bucket_name, 'python-sdk.html', 'text/html') + print(info) + assert ret == {} + + def test_change_type(self): + target_key = 'copyto' + rand_string(8) + self.bucket.copy(bucket_name, 'copyfrom', bucket_name, target_key) + ret, info = self.bucket.change_type(bucket_name, target_key, 1) + print(info) + assert ret == {} + ret, info = self.bucket.stat(bucket_name, target_key) + print(info) + assert 'type' in ret + self.bucket.delete(bucket_name, target_key) + + def test_copy_force(self): + ret, info = self.bucket.copy(bucket_name, 'copyfrom', bucket_name, 'copyfrom', force='true') + print(info) + assert info.status_code == 200 + + def test_batch_copy(self): + key = 'copyto' + rand_string(8) + ops = build_batch_copy(bucket_name, {'copyfrom': key}, bucket_name) + ret, info = self.bucket.batch(ops) + print(info) + assert ret[0]['code'] == 200 + ops = build_batch_delete(bucket_name, [key]) + ret, info = self.bucket.batch(ops) + print(info) + assert ret[0]['code'] == 200 + + def test_batch_copy_force(self): + ops = build_batch_copy(bucket_name, {'copyfrom': 'copyfrom'}, bucket_name, force='true') + ret, info = self.bucket.batch(ops) + print(info) + assert ret[0]['code'] == 200 + + def test_batch_move(self): + key = 'moveto' + rand_string(8) + self.bucket.copy(bucket_name, 'copyfrom', bucket_name, key) + key2 = key + 'move' + ops = build_batch_move(bucket_name, {key: key2}, bucket_name) + ret, info = self.bucket.batch(ops) + print(info) + assert ret[0]['code'] == 200 + ret, info = self.bucket.delete(bucket_name, key2) + print(info) + assert ret == {} + + def test_batch_move_force(self): + ret, info = self.bucket.copy(bucket_name, 'copyfrom', bucket_name, 'copyfrom', force='true') + print(info) + assert info.status_code == 200 + ops = build_batch_move(bucket_name, {'copyfrom': 'copyfrom'}, bucket_name, force='true') + ret, info = self.bucket.batch(ops) + print(info) + assert ret[0]['code'] == 200 + + def test_batch_rename(self): + key = 'rename' + rand_string(8) + self.bucket.copy(bucket_name, 'copyfrom', bucket_name, key) + key2 = key + 'rename' + ops = build_batch_move(bucket_name, {key: key2}, bucket_name) + ret, info = self.bucket.batch(ops) + print(info) + assert ret[0]['code'] == 200 + ret, info = self.bucket.delete(bucket_name, key2) + print(info) + assert ret == {} + + def test_batch_rename_force(self): + ret, info = self.bucket.rename(bucket_name, 'copyfrom', 'copyfrom', force='true') + print(info) + assert info.status_code == 200 + ops = build_batch_rename(bucket_name, {'copyfrom': 'copyfrom'}, force='true') + ret, info = self.bucket.batch(ops) + print(info) + assert ret[0]['code'] == 200 + + def test_batch_stat(self): + ops = build_batch_stat(bucket_name, ['python-sdk.html']) + ret, info = self.bucket.batch(ops) + print(info) + assert ret[0]['code'] == 200 + + def test_delete_after_days(self): + days = '5' + ret, info = self.bucket.delete_after_days(bucket_name, 'invaild.html', days) + assert info.status_code == 612 + key = 'copyto' + rand_string(8) + ret, info = self.bucket.copy(bucket_name, 'copyfrom', bucket_name, key) + ret, info = self.bucket.delete_after_days(bucket_name, key, days) + assert info.status_code == 200 + + def test_set_object_lifecycle(self): + key = 'test_set_object_lifecycle' + rand_string(8) + ret, info = self.bucket.copy(bucket_name, 'copyfrom', bucket_name, key) + assert info.status_code == 200 + ret, info = self.bucket.set_object_lifecycle( + bucket=bucket_name, + key=key, + to_line_after_days=10, + to_archive_ir_after_days=15, + to_archive_after_days=20, + to_deep_archive_after_days=30, + delete_after_days=40 + ) + assert info.status_code == 200 + + def test_set_object_lifecycle_with_cond(self): + key = 'test_set_object_lifecycle_cond' + rand_string(8) + ret, info = self.bucket.copy(bucket_name, 'copyfrom', bucket_name, key) + assert info.status_code == 200 + ret, info = self.bucket.stat(bucket_name, key) + assert info.status_code == 200 + key_hash = ret['hash'] + ret, info = self.bucket.set_object_lifecycle( + bucket=bucket_name, + key=key, + to_line_after_days=10, + to_archive_ir_after_days=15, + to_archive_after_days=20, + to_deep_archive_after_days=30, + delete_after_days=40, + cond={ + 'hash': key_hash + } + ) + assert info.status_code == 200 + + def test_list_domains(self): + ret, info = self.bucket.list_domains(bucket_name) + print(info) + assert info.status_code == 200 + assert isinstance(ret, list) + + @freeze_time("1970-01-01") + def test_invalid_x_qiniu_date(self): + ret, info = self.bucket.stat(bucket_name, 'python-sdk.html') + assert ret is None + assert info.status_code == 403 + + @freeze_time("1970-01-01") + def test_invalid_x_qiniu_date_with_disable_date_sign(self): + q = Auth(access_key, secret_key, disable_qiniu_timestamp_signature=True) + bucket = BucketManager(q) + ret, info = bucket.stat(bucket_name, 'python-sdk.html') + assert 'hash' in ret + + @freeze_time("1970-01-01") + def test_invalid_x_qiniu_date_env(self): + os.environ['DISABLE_QINIU_TIMESTAMP_SIGNATURE'] = 'True' + ret, info = self.bucket.stat(bucket_name, 'python-sdk.html') + if hasattr(os, 'unsetenv'): + os.unsetenv('DISABLE_QINIU_TIMESTAMP_SIGNATURE') + else: + # fix unsetenv not exists in earlier python on windows + os.environ['DISABLE_QINIU_TIMESTAMP_SIGNATURE'] = '' + assert 'hash' in ret + + @freeze_time("1970-01-01") + def test_invalid_x_qiniu_date_env_be_ignored(self): + os.environ['DISABLE_QINIU_TIMESTAMP_SIGNATURE'] = 'True' + q = Auth(access_key, secret_key, disable_qiniu_timestamp_signature=False) + bucket = BucketManager(q) + ret, info = bucket.stat(bucket_name, 'python-sdk.html') + if hasattr(os, 'unsetenv'): + os.unsetenv('DISABLE_QINIU_TIMESTAMP_SIGNATURE') + else: + # fix unsetenv not exists in earlier python on windows + os.environ['DISABLE_QINIU_TIMESTAMP_SIGNATURE'] = '' + assert ret is None + assert info.status_code == 403 + +class DownloadTestCase(unittest.TestCase): + q = Auth(access_key, secret_key) + + def test_private_url(self): + private_bucket_domain = 'private-sdk.peterpy.cn' + private_key = 'gogopher.jpg' + base_url = 'http://%s/%s' % (private_bucket_domain, private_key) + private_url = self.q.private_download_url(base_url, expires=3600) + print(private_url) + r = requests.get(private_url) + assert r.status_code == 200 + + +class EtagTestCase(unittest.TestCase): + def test_zero_size(self): + open("x", 'a').close() + hash = etag("x") + assert hash == 'Fto5o-5ea0sNMlW_75VgGJCv2AcJ' + remove_temp_file("x") + + def test_small_size(self): + localfile = create_temp_file(1024 * 1024) + hash = etag(localfile) + assert hash == 'FnlAdmDasGTQOIgrU1QIZaGDv_1D' + remove_temp_file(localfile) + + def test_large_size(self): + localfile = create_temp_file(4 * 1024 * 1024 + 1) + hash = etag(localfile) + assert hash == 'ljF323utglY3GI6AvLgawSJ4_dgk' + remove_temp_file(localfile) + + +class CdnTestCase(unittest.TestCase): + q = Auth(access_key, secret_key) + domain_manager = DomainManager(q) + + def test_get_domain(self): + ret, info = self.domain_manager.get_domain('pythonsdk.qiniu.io') + print(info) + assert info.status_code == 200 + + +class ReadWithoutSeek(object): + def __init__(self, str): + self.str = str + pass + + def read(self): + print(self.str) + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/cases/__init__.py b/tests/cases/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/cases/conftest.py b/tests/cases/conftest.py new file mode 100644 index 00000000..13f41618 --- /dev/null +++ b/tests/cases/conftest.py @@ -0,0 +1,120 @@ +# -*- coding: utf-8 -*- +import os +import random +import string + +import pytest + +from qiniu import config as qn_config +from qiniu import Auth + + +@pytest.fixture(scope='session') +def access_key(): + yield os.getenv('QINIU_ACCESS_KEY') + + +@pytest.fixture(scope='session') +def secret_key(): + yield os.getenv('QINIU_SECRET_KEY') + + +@pytest.fixture(scope='session') +def bucket_name(): + yield os.getenv('QINIU_TEST_BUCKET') + + +@pytest.fixture(scope='session') +def no_acc_bucket_name(): + yield os.getenv('QINIU_TEST_NO_ACC_BUCKET') + + +@pytest.fixture(scope='session') +def download_domain(): + yield os.getenv('QINIU_TEST_DOMAIN') + + +@pytest.fixture(scope='session') +def upload_callback_url(): + yield os.getenv('QINIU_UPLOAD_CALLBACK_URL') + + +@pytest.fixture(scope='session') +def qn_auth(access_key, secret_key): + yield Auth(access_key, secret_key) + + +@pytest.fixture(scope='session') +def is_travis(): + """ + migrate from old test cases. + seems useless. + """ + yield os.getenv('QINIU_TEST_ENV') == 'travis' + + +@pytest.fixture(scope='function') +def set_conf_default(request): + if hasattr(request, 'param'): + qn_config.set_default(**request.param) + yield + qn_config._config = { + 'default_zone': None, + 'default_rs_host': qn_config.RS_HOST, + 'default_rsf_host': qn_config.RSF_HOST, + 'default_api_host': qn_config.API_HOST, + 'default_uc_host': qn_config.UC_HOST, + 'default_uc_backup_hosts': qn_config.UC_BACKUP_HOSTS, + 'default_query_region_host': qn_config.QUERY_REGION_HOST, + 'default_query_region_backup_hosts': [ + 'uc.qbox.me', + 'api.qiniu.com' + ], + 'default_backup_hosts_retry_times': 2, + 'connection_timeout': 30, # 链接超时为时间为30s + 'connection_retries': 3, # 链接重试次数为3次 + 'connection_pool': 10, # 链接池个数为10 + 'default_upload_threshold': 2 * qn_config._BLOCK_SIZE # put_file上传方式的临界默认值 + } + + _is_customized_default = { + 'default_zone': False, + 'default_rs_host': False, + 'default_rsf_host': False, + 'default_api_host': False, + 'default_uc_host': False, + 'default_query_region_host': False, + 'default_query_region_backup_hosts': False, + 'default_backup_hosts_retry_times': False, + 'connection_timeout': False, + 'connection_retries': False, + 'connection_pool': False, + 'default_upload_threshold': False + } + + +@pytest.fixture(scope='session') +def rand_string(): + def _rand_string(length): + # use random.choices when min version of python >= 3.6 + return ''.join( + random.choice(string.ascii_letters + string.digits) + for _ in range(length) + ) + yield _rand_string + + +class Ref: + """ + python2 not support nonlocal keyword + """ + def __init__(self, value=None): + self.value = value + + +@pytest.fixture(scope='session') +def use_ref(): + def _use_ref(value): + return Ref(value) + + yield _use_ref diff --git a/tests/cases/test_auth.py b/tests/cases/test_auth.py new file mode 100644 index 00000000..d294f018 --- /dev/null +++ b/tests/cases/test_auth.py @@ -0,0 +1,212 @@ +import pytest + +from qiniu.auth import Auth, QiniuMacAuth + + +@pytest.fixture(scope="module") +def dummy_auth(): + dummy_access_key = 'abcdefghklmnopq' + dummy_secret_key = '1234567890' + yield Auth(dummy_access_key, dummy_secret_key) + + +class TestAuth: + def test_token(self, dummy_auth): + token = dummy_auth.token('test') + assert token == 'abcdefghklmnopq:mSNBTR7uS2crJsyFr2Amwv1LaYg=' + + def test_token_with_data(self, dummy_auth): + token = dummy_auth.token_with_data('test') + assert token == 'abcdefghklmnopq:-jP8eEV9v48MkYiBGs81aDxl60E=:dGVzdA==' + + def test_nokey(self, dummy_auth): + with pytest.raises(ValueError): + Auth(None, None).token('nokey') + with pytest.raises(ValueError): + Auth('', '').token('nokey') + + def test_token_of_request(self, dummy_auth): + token = dummy_auth.token_of_request('https://www.qiniu.com?go=1', 'test', '') + assert token == 'abcdefghklmnopq:cFyRVoWrE3IugPIMP5YJFTO-O-Y=' + token = dummy_auth.token_of_request('https://www.qiniu.com?go=1', 'test', 'application/x-www-form-urlencoded') + assert token == 'abcdefghklmnopq:svWRNcacOE-YMsc70nuIYdaa1e4=' + + @pytest.mark.parametrize( + 'opts, except_token', + [ + ( + { + "method": "GET", + "host": None, + "url": "", + "qheaders": { + "X-Qiniu-": "a", + "X-Qiniu": "b", + "Content-Type": "application/x-www-form-urlencoded", + }, + "content_type": "application/x-www-form-urlencoded", + "body": "{\"name\": \"test\"}", + }, + "ak:0i1vKClRDWFyNkcTFzwcE7PzX74=", + ), + ( + { + "method": "GET", + "host": None, + "url": "", + "qheaders": { + "Content-Type": "application/json", + }, + "content_type": "application/json", + "body": "{\"name\": \"test\"}", + }, + "ak:K1DI0goT05yhGizDFE5FiPJxAj4=", + ), + ( + { + "method": "POST", + "host": None, + "url": "", + "qheaders": { + "Content-Type": "application/json", + "X-Qiniu": "b", + }, + "content_type": "application/json", + "body": "{\"name\": \"test\"}", + }, + "ak:0ujEjW_vLRZxebsveBgqa3JyQ-w=", + ), + ( + { + "method": "GET", + "host": "upload.qiniup.com", + "url": "http://upload.qiniup.com", + "qheaders": { + "X-Qiniu-": "a", + "X-Qiniu": "b", + "Content-Type": "application/x-www-form-urlencoded", + }, + "content_type": "application/x-www-form-urlencoded", + "body": "{\"name\": \"test\"}", + }, + "ak:GShw5NitGmd5TLoo38nDkGUofRw=", + ), + ( + { + "method": "GET", + "host": "upload.qiniup.com", + "url": "http://upload.qiniup.com", + "qheaders": { + "Content-Type": "application/json", + "X-Qiniu-Bbb": "BBB", + "X-Qiniu-Aaa": "DDD", + "X-Qiniu-": "a", + "X-Qiniu": "b", + }, + "content_type": "application/json", + "body": "{\"name\": \"test\"}", + }, + "ak:DhNA1UCaBqSHCsQjMOLRfVn63GQ=", + ), + ( + { + "method": "GET", + "host": "upload.qiniup.com", + "url": "http://upload.qiniup.com", + "qheaders": { + "Content-Type": "application/x-www-form-urlencoded", + "X-Qiniu-Bbb": "BBB", + "X-Qiniu-Aaa": "DDD", + "X-Qiniu-": "a", + "X-Qiniu": "b", + }, + "content_type": "application/x-www-form-urlencoded", + "body": "name=test&language=go", + }, + "ak:KUAhrYh32P9bv0COD8ugZjDCmII=", + ), + ( + { + "method": "GET", + "host": "upload.qiniup.com", + "url": "http://upload.qiniup.com", + "qheaders": { + "Content-Type": "application/x-www-form-urlencoded", + "X-Qiniu-Bbb": "BBB", + "X-Qiniu-Aaa": "DDD", + }, + "content_type": "application/x-www-form-urlencoded", + "body": "name=test&language=go", + }, + "ak:KUAhrYh32P9bv0COD8ugZjDCmII=", + ), + ( + { + "method": "GET", + "host": "upload.qiniup.com", + "url": "http://upload.qiniup.com/mkfile/sdf.jpg", + "qheaders": { + "Content-Type": "application/x-www-form-urlencoded", + "X-Qiniu-Bbb": "BBB", + "X-Qiniu-Aaa": "DDD", + "X-Qiniu-": "a", + "X-Qiniu": "b", + }, + "content_type": "application/x-www-form-urlencoded", + "body": "name=test&language=go", + }, + "ak:fkRck5_LeyfwdkyyLk-hyNwGKac=", + ), + ( + { + "method": "GET", + "host": "upload.qiniup.com", + "url": "http://upload.qiniup.com/mkfile/sdf.jpg?s=er3&df", + "qheaders": { + "Content-Type": "application/x-www-form-urlencoded", + "X-Qiniu-Bbb": "BBB", + "X-Qiniu-Aaa": "DDD", + "X-Qiniu-": "a", + "X-Qiniu": "b", + }, + "content_type": "application/x-www-form-urlencoded", + "body": "name=test&language=go", + }, + "ak:PUFPWsEUIpk_dzUvvxTTmwhp3p4=", + ) + ] + ) + def test_qiniu_mac_requests_auth(self, dummy_auth, opts, except_token): + auth = QiniuMacAuth("ak", "sk") + + sign_token = auth.token_of_request( + method=opts["method"], + host=opts["host"], + url=opts["url"], + qheaders=auth.qiniu_headers(opts["qheaders"]), + content_type=opts["content_type"], + body=opts["body"], + ) + assert sign_token == except_token + + def test_qbox_verify_callback(self, dummy_auth): + ok = dummy_auth.verify_callback( + 'QBox abcdefghklmnopq:T7F-SjxX7X2zI4Fc1vANiNt1AUE=', + url='https://test.qiniu.com/callback', + body='name=sunflower.jpg&hash=Fn6qeQi4VDLQ347NiRm-RlQx_4O2&location=Shanghai&price=1500.00&uid=123' + ) + assert ok + + def test_qiniu_verify_token(self, dummy_auth): + ok = dummy_auth.verify_callback( + 'Qiniu abcdefghklmnopq:ZqS7EZuAKrhZaEIxqNGxDJi41IQ=', + url='https://test.qiniu.com/callback', + body='name=sunflower.jpg&hash=Fn6qeQi4VDLQ347NiRm-RlQx_4O2&location=Shanghai&price=1500.00&uid=123', + content_type='application/x-www-form-urlencoded', + method='GET', + headers={ + 'X-Qiniu-Bbb': 'BBB', + } + ) + assert ok + diff --git a/tests/cases/test_http/__init__.py b/tests/cases/test_http/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/cases/test_http/conftest.py b/tests/cases/test_http/conftest.py new file mode 100644 index 00000000..9f2c23a8 --- /dev/null +++ b/tests/cases/test_http/conftest.py @@ -0,0 +1,11 @@ +import os + +import pytest + +from qiniu.compat import urlparse + + +@pytest.fixture(scope='session') +def mock_server_addr(): + addr = os.getenv('MOCK_SERVER_ADDRESS', 'http://localhost:9000') + yield urlparse(addr) diff --git a/tests/cases/test_http/test_endpoint.py b/tests/cases/test_http/test_endpoint.py new file mode 100644 index 00000000..9bfbeb66 --- /dev/null +++ b/tests/cases/test_http/test_endpoint.py @@ -0,0 +1,27 @@ +from qiniu.http.endpoint import Endpoint + + +class TestEndpoint: + def test_endpoint_with_default_scheme(self): + endpoint = Endpoint('uc.python-sdk.qiniu.com') + assert endpoint.get_value() == 'https://uc.python-sdk.qiniu.com' + + def test_endpoint_with_custom_scheme(self): + endpoint = Endpoint('uc.python-sdk.qiniu.com', default_scheme='http') + assert endpoint.get_value() == 'http://uc.python-sdk.qiniu.com' + + def test_endpoint_with_get_value_with_custom_scheme(self): + endpoint = Endpoint('uc.python-sdk.qiniu.com', default_scheme='http') + assert endpoint.get_value('https') == 'https://uc.python-sdk.qiniu.com' + + def test_create_endpoint_from_host_with_scheme(self): + endpoint = Endpoint.from_host('http://uc.python-sdk.qiniu.com') + assert endpoint.default_scheme == 'http' + assert endpoint.get_value() == 'http://uc.python-sdk.qiniu.com' + + def test_clone_endpoint(self): + endpoint = Endpoint('uc.python-sdk.qiniu.com') + another_endpoint = endpoint.clone() + another_endpoint.host = 'another-uc.python-sdk.qiniu.com' + assert endpoint.get_value() == 'https://uc.python-sdk.qiniu.com' + assert another_endpoint.get_value() == 'https://another-uc.python-sdk.qiniu.com' diff --git a/tests/cases/test_http/test_endpoints_retry_policy.py b/tests/cases/test_http/test_endpoints_retry_policy.py new file mode 100644 index 00000000..a8135ca2 --- /dev/null +++ b/tests/cases/test_http/test_endpoints_retry_policy.py @@ -0,0 +1,75 @@ +import pytest + +from qiniu.http.endpoint import Endpoint +from qiniu.http.endpoints_retry_policy import EndpointsRetryPolicy +from qiniu.retry.attempt import Attempt + + +@pytest.fixture(scope='function') +def mocked_endpoints_provider(): + yield [ + Endpoint('a'), + Endpoint('b'), + Endpoint('c') + ] + + +class TestEndpointsRetryPolicy: + def test_init_context(self, mocked_endpoints_provider): + endpoints_retry_policy = EndpointsRetryPolicy( + endpoints_provider=mocked_endpoints_provider + ) + + mocked_context = {} + endpoints_retry_policy.init_context(mocked_context) + + assert mocked_context['endpoint'].get_value() == mocked_endpoints_provider[0].get_value() + assert [ + e.get_value() + for e in mocked_context['alternative_endpoints'] + ] == [ + e.get_value() + for e in mocked_endpoints_provider[1:] + ] + + def test_should_retry(self, mocked_endpoints_provider): + mocked_attempt = Attempt() + + endpoints_retry_policy = EndpointsRetryPolicy( + endpoints_provider=mocked_endpoints_provider + ) + endpoints_retry_policy.init_context(mocked_attempt.context) + assert endpoints_retry_policy.should_retry(mocked_attempt) + + def test_prepare_retry(self, mocked_endpoints_provider): + mocked_attempt = Attempt() + + endpoints_retry_policy = EndpointsRetryPolicy( + endpoints_provider=mocked_endpoints_provider + ) + endpoints_retry_policy.init_context(mocked_attempt.context) + + actual_tried_endpoints = [ + mocked_attempt.context.get('endpoint') + ] + while endpoints_retry_policy.should_retry(mocked_attempt): + endpoints_retry_policy.prepare_retry(mocked_attempt) + actual_tried_endpoints.append(mocked_attempt.context.get('endpoint')) + + assert [ + e.get_value() for e in actual_tried_endpoints + ] == [ + e.get_value() for e in mocked_endpoints_provider + ] + + def test_skip_init_context(self, mocked_endpoints_provider): + endpoints_retry_policy = EndpointsRetryPolicy( + endpoints_provider=mocked_endpoints_provider, + skip_init_context=True + ) + + mocked_context = {} + endpoints_retry_policy.init_context(mocked_context) + + assert not mocked_context.get('endpoint') + assert not mocked_context.get('alternative_endpoints') diff --git a/tests/cases/test_http/test_middleware.py b/tests/cases/test_http/test_middleware.py new file mode 100644 index 00000000..c2292afa --- /dev/null +++ b/tests/cases/test_http/test_middleware.py @@ -0,0 +1,88 @@ +from qiniu.http.middleware import Middleware, RetryDomainsMiddleware +from qiniu.http import qn_http_client + + +class MiddlewareRecorder(Middleware): + def __init__(self, rec, label): + self.rec = rec + self.label = label + + def __call__(self, request, nxt): + self.rec.append( + 'bef_{0}{1}'.format(self.label, len(self.rec)) + ) + resp = nxt(request) + self.rec.append( + 'aft_{0}{1}'.format(self.label, len(self.rec)) + ) + return resp + + +class TestMiddleware: + def test_middlewares(self, mock_server_addr): + rec_ls = [] + mw_a = MiddlewareRecorder(rec_ls, 'A') + mw_b = MiddlewareRecorder(rec_ls, 'B') + qn_http_client.get( + '{scheme}://{host}/echo?status=200'.format( + scheme=mock_server_addr.scheme, + host=mock_server_addr.netloc + ), + middlewares=[ + mw_a, + mw_b + ] + ) + assert rec_ls == ['bef_A0', 'bef_B1', 'aft_B2', 'aft_A3'] + + def test_retry_domains(self, mock_server_addr): + rec_ls = [] + mw_rec = MiddlewareRecorder(rec_ls, 'rec') + ret, resp = qn_http_client.get( + '{scheme}://fake.pysdk.qiniu.com/echo?status=200'.format( + scheme=mock_server_addr.scheme + ), + middlewares=[ + RetryDomainsMiddleware( + backup_domains=[ + 'unavailable.pysdk.qiniu.com', + mock_server_addr.netloc + ], + max_retry_times=3 + ), + mw_rec + ] + ) + # ['bef_rec0', 'bef_rec1', 'bef_rec2'] are 'fake.pysdk.qiniu.com' with retried 3 times + # ['bef_rec3', 'bef_rec4', 'bef_rec5'] are 'unavailable.pysdk.qiniu.com' with retried 3 times + # ['bef_rec6', 'aft_rec7'] are mock_server and it's success + assert rec_ls == [ + 'bef_rec0', 'bef_rec1', 'bef_rec2', + 'bef_rec3', 'bef_rec4', 'bef_rec5', + 'bef_rec6', 'aft_rec7' + ] + assert ret == {} + assert resp.status_code == 200 + + def test_retry_domains_fail_fast(self, mock_server_addr): + rec_ls = [] + mw_rec = MiddlewareRecorder(rec_ls, 'rec') + ret, resp = qn_http_client.get( + '{scheme}://fake.pysdk.qiniu.com/echo?status=200'.format( + scheme=mock_server_addr.scheme + ), + middlewares=[ + RetryDomainsMiddleware( + backup_domains=[ + 'unavailable.pysdk.qiniu.com', + mock_server_addr.netloc + ], + retry_condition=lambda _resp, _req: False + ), + mw_rec + ] + ) + # ['bef_rec0'] are 'fake.pysdk.qiniu.com' with fail fast + assert rec_ls == ['bef_rec0'] + assert ret is None + assert resp.status_code == -1 diff --git a/tests/cases/test_http/test_qiniu_conf.py b/tests/cases/test_http/test_qiniu_conf.py new file mode 100644 index 00000000..29c6fd05 --- /dev/null +++ b/tests/cases/test_http/test_qiniu_conf.py @@ -0,0 +1,117 @@ +import pytest +import requests + +from qiniu.compat import urlencode +import qiniu.http as qiniu_http + + +@pytest.fixture(scope='function') +def retry_id(request, mock_server_addr): + success_times = [] + failure_times = [] + if hasattr(request, 'param'): + success_times = request.param.get('success_times', success_times) + failure_times = request.param.get('failure_times', failure_times) + query_dict = { + 's': success_times, + 'f': failure_times, + } + query_params = urlencode( + query_dict, + doseq=True + ) + request_url = '{scheme}://{host}/retry_me/__mgr__?{query_params}'.format( + scheme=mock_server_addr.scheme, + host=mock_server_addr.netloc, + query_params=query_params + ) + resp = requests.put(request_url) + resp.raise_for_status() + record_id = resp.text + yield record_id + request_url = '{scheme}://{host}/retry_me/__mgr__?id={id}'.format( + scheme=mock_server_addr.scheme, + host=mock_server_addr.netloc, + id=record_id + ) + resp = requests.delete(request_url) + resp.raise_for_status() + + +@pytest.fixture(scope='function') +def reset_session(): + qiniu_http._session = None + yield + + +class TestQiniuConfWithHTTP: + @pytest.mark.usefixtures('reset_session') + @pytest.mark.parametrize( + 'set_conf_default', + [ + { + 'connection_timeout': 0.3, + 'connection_retries': 0 + } + ], + indirect=True + ) + @pytest.mark.parametrize( + 'method,opts', + [ + ('get', {}), + ('put', {'data': None, 'files': None}), + ('post', {'data': None, 'files': None}), + ('delete', {'params': None}) + ], + ids=lambda v: v if type(v) is str else 'opts' + ) + def test_timeout_conf(self, mock_server_addr, method, opts, set_conf_default): + request_url = '{scheme}://{host}/timeout?delay=0.5'.format( + scheme=mock_server_addr.scheme, + host=mock_server_addr.netloc + ) + send = getattr(qiniu_http.qn_http_client, method) + _ret, resp = send(request_url, **opts) + assert 'Read timed out' in str(resp.exception) + + @pytest.mark.usefixtures('reset_session') + @pytest.mark.parametrize( + 'retry_id', + [ + { + 'success_times': [0, 1], + 'failure_times': [5, 0], + }, + ], + indirect=True + ) + @pytest.mark.parametrize( + 'set_conf_default', + [ + { + 'connection_retries': 5 + } + ], + indirect=True + ) + @pytest.mark.parametrize( + 'method,opts', + [ + # post not retry default, see + # https://urllib3.readthedocs.io/en/latest/reference/urllib3.util.html#urllib3.util.Retry.DEFAULT_ALLOWED_METHODS + ('get', {}), + ('put', {'data': None, 'files': None}), + ('delete', {'params': None}) + ], + ids=lambda v: v if type(v) is str else 'opts' + ) + def test_retry_times(self, retry_id, mock_server_addr, method, opts, set_conf_default): + request_url = '{scheme}://{host}/retry_me?id={id}'.format( + scheme=mock_server_addr.scheme, + host=mock_server_addr.netloc, + id=retry_id + ) + send = getattr(qiniu_http.qn_http_client, method) + _ret, resp = send(request_url, **opts) + assert resp.status_code == 200 diff --git a/tests/cases/test_http/test_region.py b/tests/cases/test_http/test_region.py new file mode 100644 index 00000000..976d2619 --- /dev/null +++ b/tests/cases/test_http/test_region.py @@ -0,0 +1,186 @@ +from datetime import datetime, timedelta +from itertools import chain + +from qiniu.http.endpoint import Endpoint +from qiniu.http.region import Region, ServiceName + + +class TestRegion: + def test_default_options(self): + region = Region('z0') + assert region.region_id == 'z0' + assert region.s3_region_id == 'z0' + assert all(k in region.services for k in ServiceName) + assert datetime.now() - region.create_time < timedelta(seconds=1) + assert region.ttl == 86400 + assert region.is_live + + def test_custom_options(self): + region = Region( + region_id='z0', + s3_region_id='s3-z0', + services={ + ServiceName.UP: [ + Endpoint('uc.python-sdk.qiniu.com') + ], + 'custom-service': [ + Endpoint('custom-service.python-sdk.qiniu.com') + ] + }, + create_time=datetime.now() - timedelta(days=1), + ttl=3600 + ) + assert region.region_id == 'z0' + assert region.s3_region_id == 's3-z0' + assert all( + k in region.services + for k in chain(ServiceName, ['custom-service']) + ) + assert datetime.now() - region.create_time >= timedelta(days=1) + assert region.ttl == 3600 + assert not region.is_live + + def test_from_region_id(self): + region = Region.from_region_id('z0') + + expect_services_endpoint_value = { + ServiceName.UC: [ + 'https://uc.qiniuapi.com' + ], + ServiceName.UP: [ + 'https://upload.qiniup.com', + 'https://up.qiniup.com' + ], + ServiceName.UP_ACC: [], + ServiceName.IO: [ + 'https://iovip.qiniuio.com', + ], + ServiceName.RS: [ + 'https://rs-z0.qiniuapi.com', + ], + ServiceName.RSF: [ + 'https://rsf-z0.qiniuapi.com', + ], + ServiceName.API: [ + 'https://api-z0.qiniuapi.com', + ] + } + + assert region.region_id == 'z0' + assert region.s3_region_id == 'z0' + + assert { + k: [ + e.get_value() + for e in v + ] + for k, v in region.services.items() + } == expect_services_endpoint_value + + assert datetime.now() - region.create_time < timedelta(seconds=1) + assert region.ttl == 86400 + assert region.is_live + + def test_from_region_id_with_custom_options(self): + preferred_scheme = 'http' + custom_service_endpoint = Endpoint('custom-service.python-sdk.qiniu.com') + region_z1 = Region.from_region_id( + 'z1', + s3_region_id='s3-z1', + ttl=-1, + create_time=datetime.fromtimestamp(0), + extended_services= { + 'custom-service': [ + custom_service_endpoint + ] + }, + preferred_scheme=preferred_scheme + ) + + expect_services_endpoint_value = { + ServiceName.UC: [ + preferred_scheme + '://uc.qiniuapi.com' + ], + ServiceName.UP: [ + preferred_scheme + '://upload-z1.qiniup.com', + preferred_scheme + '://up-z1.qiniup.com' + ], + ServiceName.UP_ACC: [], + ServiceName.IO: [ + preferred_scheme + '://iovip-z1.qiniuio.com', + ], + ServiceName.RS: [ + preferred_scheme + '://rs-z1.qiniuapi.com', + ], + ServiceName.RSF: [ + preferred_scheme + '://rsf-z1.qiniuapi.com', + ], + ServiceName.API: [ + preferred_scheme + '://api-z1.qiniuapi.com', + ], + 'custom-service': [ + custom_service_endpoint.get_value() + ] + } + + assert region_z1.region_id == 'z1' + assert region_z1.s3_region_id == 's3-z1' + assert { + k: [ + e.get_value() + for e in v + ] + for k, v in region_z1.services.items() + } == expect_services_endpoint_value + assert region_z1.ttl == -1 + assert region_z1.create_time == datetime.fromtimestamp(0) + assert region_z1.is_live + + def test_clone(self): + region = Region.from_region_id('z0') + cloned_region = region.clone() + cloned_region.region_id = 'another' + cloned_region.services[ServiceName.UP][0].host = 'another-uc.qiniuapi.com' + assert region.region_id == 'z0' + assert region.services[ServiceName.UP][0].get_value() == 'https://upload.qiniup.com' + assert cloned_region.services[ServiceName.UP][0].get_value() == 'https://another-uc.qiniuapi.com' + + def test_merge(self): + r1 = Region.from_region_id('z0') + r2 = Region( + region_id='r2', + s3_region_id='s3-r2', + services={ + ServiceName.UP: [ + Endpoint('up-r2.python-sdk.qiniu.com') + ], + 'custom-service': [ + Endpoint('custom-service-r2.python-sdk.qiniu.com') + ] + }, + create_time=datetime.now() - timedelta(days=1), + ttl=3600 + ) + + merged_region = Region.merge(r1, r2) + + assert merged_region.region_id == r1.region_id + assert merged_region.s3_region_id == r1.s3_region_id + assert merged_region.create_time == r1.create_time + assert merged_region.ttl == r1.ttl + + assert all( + k in merged_region.services + for k in [ + ServiceName.UP, + 'custom-service' + ] + ), merged_region.services.keys() + + for k, v in merged_region.services.items(): + if k == ServiceName.UP: + assert v == list(chain(r1.services[k], r2.services[k])) + elif k == 'custom-service': + assert v == r2.services[k] + else: + assert v == r1.services[k] diff --git a/tests/cases/test_http/test_regions_provider.py b/tests/cases/test_http/test_regions_provider.py new file mode 100644 index 00000000..73dca89d --- /dev/null +++ b/tests/cases/test_http/test_regions_provider.py @@ -0,0 +1,313 @@ +import os +import datetime +import tempfile +import time +import json +from multiprocessing.pool import ThreadPool + +import pytest + +from qiniu.compat import urlparse +from qiniu.config import QUERY_REGION_HOST, QUERY_REGION_BACKUP_HOSTS +from qiniu.http.endpoint import Endpoint +from qiniu.http.region import Region +from qiniu.http.regions_provider import ( + CachedRegionsProvider, + FileAlreadyLocked, + QueryRegionsProvider, + _FileThreadingLocker, + _FileLocker, + _global_cache_scope, + _persist_region, +) + + +@pytest.fixture(scope='session') +def query_regions_endpoints_provider(): + query_region_host = urlparse(QUERY_REGION_HOST).hostname + endpoints_provider = [ + Endpoint(h) + for h in [query_region_host] + QUERY_REGION_BACKUP_HOSTS + ] + yield endpoints_provider + + +@pytest.fixture(scope='function') +def query_regions_provider(access_key, bucket_name, query_regions_endpoints_provider): + query_regions_provider = QueryRegionsProvider( + access_key=access_key, + bucket_name=bucket_name, + endpoints_provider=query_regions_endpoints_provider + ) + yield query_regions_provider + + +@pytest.fixture(scope='function') +def temp_file_path(rand_string): + p = os.path.join(tempfile.gettempdir(), rand_string(16)) + yield p + try: + os.remove(p) + except FileNotFoundError: + pass + + +class TestQueryRegionsProvider: + def test_getter(self, query_regions_provider): + ret = list(query_regions_provider) + assert len(ret) > 0 + + def test_error_with_bad_ak(self, query_regions_endpoints_provider): + query_regions_provider = QueryRegionsProvider( + access_key='fake', + bucket_name='fake', + endpoints_provider=query_regions_endpoints_provider + ) + with pytest.raises(Exception) as exc: + list(query_regions_provider) + assert '612' in str(exc) + + def test_error_with_bad_endpoint(self, query_regions_provider): + query_regions_provider.endpoints_provider = [ + Endpoint('fake-uc.python.qiniu.com') + ] + with pytest.raises(Exception) as exc: + list(query_regions_provider) + assert '-1' in str(exc) + + def test_getter_with_retried(self, query_regions_provider, query_regions_endpoints_provider): + query_regions_provider.endpoints_provider = [ + Endpoint('fake-uc.python.qiniu.com'), + ] + list(query_regions_endpoints_provider) + + ret = list(query_regions_provider) + assert len(ret) > 0 + + def test_getter_with_preferred_scheme(self, query_regions_provider): + query_regions_provider.preferred_scheme = 'http' + for region in query_regions_provider: + for endpoints in region.services.values(): + assert all( + e.get_value().startswith('http://') + for e in endpoints + ) + + +@pytest.fixture(scope='function') +def cached_regions_provider(request): + if not hasattr(request, 'param') or not isinstance(request.param, dict): + request.param = {} + request.param.setdefault('cache_key', 'test-cache-key') + request.param.setdefault('base_regions_provider', []) + + cached_regions_provider = CachedRegionsProvider( + **request.param + ) + yield cached_regions_provider + + # clear memo_cache for test cases will affect each other with same cache_key + _global_cache_scope.memo_cache.clear() + persist_path = request.param.get('persist_path') + if persist_path: + try: + os.remove(persist_path) + except OSError: + pass + + +@pytest.fixture(scope='function') +def bad_regions_provider(): + regions_provider = QueryRegionsProvider( + access_key='fake', + bucket_name='fake', + endpoints_provider=[ + Endpoint('fake-uc.python.qiniu.com') + ] + ) + yield regions_provider + + +class TestCachedQueryRegionsProvider: + @pytest.mark.parametrize( + 'cached_regions_provider', + [ + {'base_regions_provider': [Region.from_region_id('z0')]}, + ], + indirect=True + ) + def test_getter_normally(self, cached_regions_provider): + ret = list(cached_regions_provider) + assert len(ret) > 0 + + def test_setter(self, cached_regions_provider): + regions = [Region.from_region_id('z0')] + cached_regions_provider.set_regions(regions) + assert list(cached_regions_provider) == regions + + def test_getter_with_expired_file_cache(self, cached_regions_provider): + expired_region = Region.from_region_id('z0') + expired_region.create_time = datetime.datetime.now() + + r_z0 = Region.from_region_id('z0') + r_z0.ttl = 86400 + + with open(cached_regions_provider.persist_path, 'w') as f: + json.dump({ + 'cacheKey': cached_regions_provider.cache_key, + 'regions': [_persist_region(r) for r in [expired_region]] + }, f) + + cached_regions_provider._cache_scope.memo_cache[cached_regions_provider.cache_key] = [r_z0] + + assert list(cached_regions_provider) == [r_z0] + try: + os.remove(cached_regions_provider.persist_path) + except OSError: + pass + + @pytest.mark.parametrize( + 'cached_regions_provider', + [ + { + 'persist_path': os.path.join(tempfile.gettempdir(), 'test-disable-persist.jsonl'), + }, + { + 'persist_path': None, + } + ], + indirect=True + ) + def test_disable_persist(self, cached_regions_provider): + if cached_regions_provider.persist_path: + old_persist_path = cached_regions_provider.persist_path + cached_regions_provider.persist_path = None + else: + old_persist_path = _global_cache_scope.persist_path + + regions = [Region.from_region_id('z0')] + cached_regions_provider.set_regions(regions) + + assert list(cached_regions_provider) == regions + assert not os.path.exists(old_persist_path) + + @pytest.mark.parametrize( + 'cached_regions_provider', + [ + { + 'persist_path': os.path.join(tempfile.gettempdir(), 'test-base-provider.jsonl'), + 'base_regions_provider': [Region.from_region_id('z0')] + } + ], + indirect=True + ) + def test_getter_with_base_regions_provider(self, cached_regions_provider): + assert not os.path.exists(cached_regions_provider.persist_path) + regions = list(cached_regions_provider.base_regions_provider) + assert list(cached_regions_provider) == regions + line_num = 0 + with open(cached_regions_provider.persist_path, 'r') as f: + for l in f: + # ignore empty line + if l.strip(): + line_num += 1 + assert line_num == 1 + + @pytest.mark.parametrize( + 'cached_regions_provider', + [ + { + 'persist_path': os.path.join(tempfile.gettempdir(), 'test-base-provider.jsonl') + } + ], + indirect=True + ) + def test_should_provide_memo_expired_regions_when_base_provider_failed( + self, + cached_regions_provider, + bad_regions_provider + ): + expired_region = Region.from_region_id('z0') + expired_region.create_time = datetime.datetime.fromtimestamp(0) + expired_region.ttl = 1 + cached_regions_provider.set_regions([expired_region]) + cached_regions_provider.base_regions_provider = bad_regions_provider + regions = list(cached_regions_provider) + assert len(regions) > 0 + assert not regions[0].is_live + + @pytest.mark.parametrize( + 'cached_regions_provider', + [ + { + 'persist_path': os.path.join(tempfile.gettempdir(), 'test-base-provider.jsonl') + } + ], + indirect=True + ) + def test_should_provide_file_expired_regions_when_base_provider_failed( + self, + cached_regions_provider, + bad_regions_provider + ): + expired_region = Region.from_region_id('z0') + expired_region.create_time = datetime.datetime.fromtimestamp(0) + expired_region.ttl = 1 + cached_regions_provider.set_regions([expired_region]) + cached_regions_provider._cache_scope.memo_cache.clear() + cached_regions_provider.base_regions_provider = bad_regions_provider + regions = list(cached_regions_provider) + assert len(regions) > 0 + assert not regions[0].is_live + + @pytest.mark.parametrize( + 'cached_regions_provider', + [ + { + 'should_shrink_expired_regions': True + } + ], + indirect=True + ) + def test_shrink_with_expired_regions(self, cached_regions_provider): + expired_region = Region.from_region_id('z0') + expired_region.create_time = datetime.datetime.fromtimestamp(0) + expired_region.ttl = 1 + origin_cache_key = cached_regions_provider.cache_key + cached_regions_provider.set_regions([expired_region]) + cached_regions_provider.cache_key = 'another-cache-key' + + # trigger __shrink_cache() + cached_regions_provider._cache_scope = cached_regions_provider._cache_scope._replace( + last_shrink_at=datetime.datetime.fromtimestamp(0) + ) + list(cached_regions_provider) + + assert len(cached_regions_provider._cache_scope.memo_cache[origin_cache_key]) == 0 + + def test_shrink_with_ignore_expired_regions(self, cached_regions_provider): + expired_region = Region.from_region_id('z0') + expired_region.create_time = datetime.datetime.fromtimestamp(0) + expired_region.ttl = 1 + origin_cache_key = cached_regions_provider.cache_key + cached_regions_provider.set_regions([expired_region]) + cached_regions_provider.cache_key = 'another-cache-key' + list(cached_regions_provider) # trigger __shrink_cache() + assert len(cached_regions_provider._cache_scope.memo_cache[origin_cache_key]) > 0 + + def test_file_locker(self, temp_file_path, use_ref): + handled_cnt = use_ref(0) + skipped_cnt = use_ref(0) + + + def process_file(_n): + try: + with open(temp_file_path, 'w') as f, _FileThreadingLocker(f), _FileLocker(f): + time.sleep(1) + handled_cnt.value += 1 + except FileAlreadyLocked: + skipped_cnt.value += 1 + + + ThreadPool(4).map(process_file, range(20)) + assert handled_cnt.value + skipped_cnt.value == 20 + assert 0 < handled_cnt.value <= 4 diff --git a/tests/cases/test_http/test_regions_retry_policy.py b/tests/cases/test_http/test_regions_retry_policy.py new file mode 100644 index 00000000..add39930 --- /dev/null +++ b/tests/cases/test_http/test_regions_retry_policy.py @@ -0,0 +1,263 @@ +import pytest + +from qiniu.http.endpoint import Endpoint +from qiniu.http.region import Region, ServiceName +from qiniu.http.regions_retry_policy import RegionsRetryPolicy +from qiniu.retry import Attempt + + +@pytest.fixture(scope='function') +def mocked_regions_provider(): + yield [ + Region.from_region_id('z0'), + Region.from_region_id('z1') + ] + + +class TestRegionsRetryPolicy: + def test_init(self, mocked_regions_provider): + regions_retry_policy = RegionsRetryPolicy( + regions_provider=mocked_regions_provider, + service_names=[ServiceName.UP] + ) + + mocked_context = {} + regions_retry_policy.init_context(mocked_context) + + assert mocked_context['region'] == mocked_regions_provider[0] + assert mocked_context['alternative_regions'] == mocked_regions_provider[1:] + assert mocked_context['service_name'] == ServiceName.UP + assert mocked_context['alternative_service_names'] == [] + assert mocked_context['endpoint'] == mocked_regions_provider[0].services[ServiceName.UP][0] + assert mocked_context['alternative_endpoints'] == mocked_regions_provider[0].services[ServiceName.UP][1:] + + @pytest.mark.parametrize( + 'regions,service_names,expect_should_retry,msg', + [ + ( + [ + Region.from_region_id('z0'), + Region.from_region_id('z1') + ], + [ServiceName.UP], + True, + 'Should retry when there are alternative regions' + ), + ( + [ + Region.from_region_id( + 'z0', + extended_services={ + ServiceName.UP_ACC: [ + Endpoint('python-sdk.kodo-accelerate.cn-east-1.qiniucs.com') + ] + } + ) + ], + [ServiceName.UP_ACC, ServiceName.UP], + True, + 'Should retry when there are alternative services' + ), + ( + [ + Region.from_region_id('z0') + ], + [ServiceName.UP_ACC, ServiceName.UP], + False, + 'Should not retry when there are no alternative regions or empty endpoint in services' + ), + ( + [ + Region.from_region_id('z0') + ], + [ServiceName.UP], + False, + 'Should not retry when there are no alternative regions or services' + ), + ], + ids=lambda v: v if type(v) is str else '' + ) + def test_should_retry( + self, + regions, + service_names, + expect_should_retry, + msg + ): + regions_retry_policy = RegionsRetryPolicy( + regions_provider=regions, + service_names=service_names + ) + + mocked_attempt = Attempt() + regions_retry_policy.init_context(mocked_attempt.context) + + assert regions_retry_policy.should_retry(mocked_attempt) == expect_should_retry, msg + + @pytest.mark.parametrize( + 'regions,service_names', + [ + ( + [ + Region.from_region_id('z0'), + Region.from_region_id('z1') + ], + [ServiceName.UP] + ), + ( + [ + Region.from_region_id( + 'z0', + extended_services={ + ServiceName.UP_ACC: [ + Endpoint('python-sdk.kodo-accelerate.cn-east-1.qiniucs.com') + ] + } + ) + ], + [ServiceName.UP_ACC, ServiceName.UP] + ) + ] + ) + def test_prepare_retry(self, regions, service_names): + mocked_attempt = Attempt() + + regions_retry_policy = RegionsRetryPolicy( + regions_provider=regions, + service_names=service_names + ) + regions_retry_policy.init_context(mocked_attempt.context) + + actual_tried_endpoints = [ + mocked_attempt.context.get('endpoint') + ] + while regions_retry_policy.should_retry(mocked_attempt): + regions_retry_policy.prepare_retry(mocked_attempt) + actual_tried_endpoints.append(mocked_attempt.context.get('endpoint')) + + # There is no endpoints retry policy, + # so just the first endpoint will be tried + expect_tried_endpoints = [ + r.services[sn][0] + for r in regions + for sn in service_names + if sn in r.services and r.services[sn] + ] + + print(actual_tried_endpoints) + print(expect_tried_endpoints) + + assert [ + e.get_value() + for e in actual_tried_endpoints + ] == [ + e.get_value() + for e in expect_tried_endpoints + ] + + @pytest.mark.parametrize( + 'regions,service_names,expect_change_region_times', + [ + # tow region, retry once + ( + [ + Region.from_region_id('z0'), + Region.from_region_id('z1') + ], + [ServiceName.UP], + 1 + ), + # one region, tow service, retry service once, region zero + ( + [ + Region.from_region_id( + 'z0', + extended_services={ + ServiceName.UP_ACC: [ + Endpoint('python-sdk.kodo-accelerate.cn-east-1.qiniucs.com') + ] + } + ) + ], + [ServiceName.UP_ACC, ServiceName.UP], + 0 + ), + # tow region, tow service, retry service once, region once + ( + [ + Region.from_region_id( + 'z0', + extended_services={ + ServiceName.UP_ACC: [ + Endpoint('python-sdk.kodo-accelerate.cn-east-1.qiniucs.com') + ] + } + ), + Region.from_region_id('z1') + ], + [ServiceName.UP_ACC, ServiceName.UP], + 1 + ) + ] + ) + def test_on_change_region_option( + self, + regions, + service_names, + expect_change_region_times, + use_ref + ): + actual_change_region_times_ref = use_ref(0) + + def handle_change_region(_context): + actual_change_region_times_ref.value += 1 + + regions_retry_policy = RegionsRetryPolicy( + regions_provider=regions, + service_names=service_names, + on_change_region=handle_change_region + ) + + mocked_attempt = Attempt() + regions_retry_policy.init_context(mocked_attempt.context) + + while regions_retry_policy.should_retry(mocked_attempt): + regions_retry_policy.prepare_retry(mocked_attempt) + + assert actual_change_region_times_ref.value == expect_change_region_times + + def test_init_with_preferred_endpoints_option_new_temp_region(self, mocked_regions_provider): + preferred_endpoints = [ + Endpoint('python-sdk.kodo-accelerate.cn-east-1.qiniucs.com') + ] + regions_retry_policy = RegionsRetryPolicy( + regions_provider=mocked_regions_provider, + service_names=[ServiceName.UP], + preferred_endpoints_provider=preferred_endpoints + ) + + mocked_context = {} + regions_retry_policy.init_context(mocked_context) + + assert mocked_context['region'].region_id == 'preferred_region' + assert mocked_context['region'].services[ServiceName.UP] == preferred_endpoints + assert mocked_context['alternative_regions'] == list(mocked_regions_provider) + + def test_init_with_preferred_endpoints_option_reorder_regions(self, mocked_regions_provider): + mocked_regions = list(mocked_regions_provider) + preferred_region_index = 1 + preferred_endpoints = [ + mocked_regions[preferred_region_index].services[ServiceName.UP][0] + ] + regions_retry_policy = RegionsRetryPolicy( + regions_provider=mocked_regions_provider, + service_names=[ServiceName.UP], + preferred_endpoints_provider=preferred_endpoints + ) + + mocked_context = {} + regions_retry_policy.init_context(mocked_context) + + assert mocked_context['region'] == mocked_regions[preferred_region_index] + mocked_regions.pop(preferred_region_index) + assert mocked_context['alternative_regions'] == mocked_regions diff --git a/tests/cases/test_http/test_resp.py b/tests/cases/test_http/test_resp.py new file mode 100644 index 00000000..ddfeadcf --- /dev/null +++ b/tests/cases/test_http/test_resp.py @@ -0,0 +1,64 @@ +import requests + +from qiniu.http import qn_http_client, __return_wrapper as return_wrapper + + +class TestResponse: + def test_response_need_retry(self, mock_server_addr): + def gen_case(code): + if 0 <= code < 500: + return code, False + if code in [ + 501, 509, 573, 579, 608, 612, 614, 616, 618, 630, 631, 632, 640, 701 + ]: + return code, False + return code, True + + cases = [ + gen_case(i) for i in range(-1, 800) + ] + + for test_code, should_retry in cases: + req_url = '{scheme}://{host}/echo?status={status}'.format( + scheme=mock_server_addr.scheme, + host=mock_server_addr.netloc, + status=test_code + ) + if test_code < 0: + req_url = 'http://fake.python-sdk.qiniu.com/' + _ret, resp_info = qn_http_client.get(req_url) + assert_msg = '{code} should{adv} retry'.format( + code=test_code, + adv='' if should_retry else ' NOT' + ) + assert resp_info.need_retry() == should_retry, assert_msg + + def test_json_decode_error(self, mock_server_addr): + req_url = '{scheme}://{host}/echo?status=200'.format( + scheme=mock_server_addr.scheme, + host=mock_server_addr.netloc + ) + ret, resp = qn_http_client.get(req_url) + assert resp.text_body is not None + assert ret == {} + + def test_old_json_decode_error(self): + """ + test old return_wrapper + """ + + def mock_res(): + r = requests.Response() + r.status_code = 200 + r.headers.__setitem__('X-Reqid', 'mockedReqid') + + def json_func(): + raise ValueError('%s: line %d column %d (char %d)' % ('Expecting value', 0, 0, 0)) + + r.json = json_func + + return r + + mocked_res = mock_res() + ret, _ = return_wrapper(mocked_res) + assert ret == {} diff --git a/tests/cases/test_http/test_single_flight.py b/tests/cases/test_http/test_single_flight.py new file mode 100644 index 00000000..48748ecd --- /dev/null +++ b/tests/cases/test_http/test_single_flight.py @@ -0,0 +1,59 @@ +import pytest +import time +from multiprocessing.pool import ThreadPool + +from qiniu.http.single_flight import SingleFlight + +class TestSingleFlight: + def test_single_flight_success(self): + sf = SingleFlight() + + def fn(): + return "result" + + result = sf.do("key1", fn) + assert result == "result" + + def test_single_flight_exception(self): + sf = SingleFlight() + + def fn(): + raise ValueError("error") + + with pytest.raises(ValueError, match="error"): + sf.do("key2", fn) + + def test_single_flight_concurrent(self): + sf = SingleFlight() + share_state = [] + results = [] + + def fn(): + time.sleep(1) + share_state.append('share_state') + return "result" + + def worker(_n): + result = sf.do("key3", fn) + results.append(result) + + ThreadPool(2).map(worker, range(5)) + + assert len(share_state) == 3 + assert all(result == "result" for result in results) + + def test_single_flight_different_keys(self): + sf = SingleFlight() + results = [] + + def fn(): + time.sleep(1) + return "result" + + def worker(n): + result = sf.do("key{}".format(n), fn) + results.append(result) + + ThreadPool(2).map(worker, range(2)) + assert len(results) == 2 + assert all(result == "result" for result in results) diff --git a/tests/cases/test_retry/__init__.py b/tests/cases/test_retry/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/cases/test_retry/test_retrier.py b/tests/cases/test_retry/test_retrier.py new file mode 100644 index 00000000..c4d1cd9b --- /dev/null +++ b/tests/cases/test_retry/test_retrier.py @@ -0,0 +1,142 @@ +import qiniu.retry +import qiniu.retry.abc + + +class MaxRetryPolicy(qiniu.retry.abc.RetryPolicy): + def __init__(self, max_times): + super(MaxRetryPolicy, self).__init__() + self.max_times = max_times + + def is_important(self, attempt): + return attempt.context[self]['retriedTimes'] >= self.max_times + + def init_context(self, context): + context[self] = { + 'retriedTimes': 0 + } + + def should_retry(self, attempt): + if not attempt.exception: + return False + return attempt.context[self]['retriedTimes'] < self.max_times + + def prepare_retry(self, attempt): + pass + + def after_retry(self, attempt, policy): + attempt.context[self]['retriedTimes'] += 1 + + +class TestRetry: + def test_retrier_with_code_block(self, use_ref): + retried_times_ref = use_ref(0) + + def handle_before_retry(_attempt, _policy): + retried_times_ref.value += 1 + return True + + max_retry_times = 3 + retrier = qiniu.retry.Retrier( + policies=[ + MaxRetryPolicy(max_times=max_retry_times) + ], + before_retry=handle_before_retry + ) + + tried_times = 0 + try: + for attempt in retrier: + with attempt: + tried_times += 1 + raise Exception('mocked error') + except Exception as err: + assert str(err) == 'mocked error' + + assert tried_times == max_retry_times + 1 + assert retried_times_ref.value == max_retry_times + + def test_retrier_with_try_do(self, use_ref): + retried_times_ref = use_ref(0) + + def handle_before_retry(_attempt, _policy): + retried_times_ref.value += 1 + return True + + max_retry_times = 3 + retrier = qiniu.retry.Retrier( + policies=[ + MaxRetryPolicy(max_times=max_retry_times) + ], + before_retry=handle_before_retry + ) + + tried_times_ref = use_ref(0) + + def add_one(n): + tried_times_ref.value += 1 + if tried_times_ref.value <= 3: + raise Exception('mock error') + return n + 1 + + result = retrier.try_do(add_one, 1) + assert result == 2 + assert tried_times_ref.value == max_retry_times + 1 + assert retried_times_ref.value == max_retry_times + + def test_retrier_with_decorator(self, use_ref): + retried_times_ref = use_ref(0) + + def handle_before_retry(_attempt, _policy): + retried_times_ref.value += 1 + return True + + max_retry_times = 3 + retrier = qiniu.retry.Retrier( + policies=[ + MaxRetryPolicy(max_times=max_retry_times) + ], + before_retry=handle_before_retry + ) + + tried_times_ref = use_ref(0) + + @retrier.retry + def add_one(n): + tried_times_ref.value += 1 + if tried_times_ref.value <= 3: + raise Exception('mock error') + return n + 1 + + result = add_one(1) + assert result == 2 + assert tried_times_ref.value == max_retry_times + 1 + assert retried_times_ref.value == max_retry_times + + def test_retrier_with_no_need_retry_err(self, use_ref): + retried_times_ref = use_ref(0) + + def handle_before_retry(_attempt, _policy): + retried_times_ref.value += 1 + return True + + max_retry_times = 3 + retrier = qiniu.retry.Retrier( + policies=[ + MaxRetryPolicy(max_times=max_retry_times) + ], + before_retry=handle_before_retry + ) + + tried_times = 0 + try: + for attempt in retrier: + with attempt: + tried_times += 1 + err = Exception('mocked error') + err.no_need_retry = True + raise err + except Exception as err: + assert str(err) == 'mocked error' + + assert tried_times == 1 + assert retried_times_ref.value == 0 diff --git a/tests/cases/test_services/__init__.py b/tests/cases/test_services/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/cases/test_services/test_processing/__init__.py b/tests/cases/test_services/test_processing/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/cases/test_services/test_processing/test_pfop.py b/tests/cases/test_services/test_processing/test_pfop.py new file mode 100644 index 00000000..ebaf18f4 --- /dev/null +++ b/tests/cases/test_services/test_processing/test_pfop.py @@ -0,0 +1,97 @@ +import pytest + +from qiniu import PersistentFop, op_save + + +persistent_id = None + + +class TestPersistentFop: + def test_pfop_execute(self, qn_auth): + pfop = PersistentFop(qn_auth, 'testres', 'sdktest') + op = op_save('avthumb/m3u8/segtime/10/vcodec/libx264/s/320x240', 'pythonsdk', 'pfoptest') + ops = [ + op + ] + ret, resp = pfop.execute('sintel_trailer.mp4', ops, 1) + assert resp.status_code == 200, resp + assert ret is not None, resp + assert ret['persistentId'] is not None, resp + global persistent_id + persistent_id = ret['persistentId'] + + def test_pfop_get_status(self, qn_auth): + assert persistent_id is not None + pfop = PersistentFop(qn_auth, 'testres', 'sdktest') + ret, resp = pfop.get_status(persistent_id) + assert resp.status_code == 200, resp + assert ret is not None, resp + + @pytest.mark.parametrize( + 'persistent_options', + ( + # included by above test_pfop_execute + # { + # 'persistent_type': None, + # }, + { + 'persistent_type': 0, + }, + { + 'persistent_type': 1, + }, + { + 'workflow_template_id': 'test-workflow', + }, + ) + ) + def test_pfop_idle_time_task( + self, + set_conf_default, + qn_auth, + bucket_name, + persistent_options, + ): + persistent_type = persistent_options.get('persistent_type') + workflow_template_id = persistent_options.get('workflow_template_id', None) + + execute_opts = {} + if workflow_template_id: + execute_opts['workflow_template_id'] = workflow_template_id + else: + persistent_key = '_'.join([ + 'test-pfop/test-pfop-by-api', + 'type', + str(persistent_type) + ]) + execute_opts['fops'] = [ + op_save( + op='avinfo', + bucket=bucket_name, + key=persistent_key + ) + ] + + if persistent_type is not None: + execute_opts['persistent_type'] = persistent_type + + pfop = PersistentFop(qn_auth, bucket_name) + key = 'qiniu.png' + ret, resp = pfop.execute( + key, + **execute_opts + ) + + assert resp.status_code == 200, resp + assert ret is not None + assert 'persistentId' in ret, resp + + ret, resp = pfop.get_status(ret['persistentId']) + assert resp.status_code == 200, resp + assert ret is not None + assert ret['creationDate'] is not None, resp + + if persistent_id == 1: + assert ret['type'] == 1, resp + elif workflow_template_id: + assert workflow_template_id in ret['taskFrom'], resp diff --git a/tests/cases/test_services/test_storage/__init__.py b/tests/cases/test_services/test_storage/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/cases/test_services/test_storage/conftest.py b/tests/cases/test_services/test_storage/conftest.py new file mode 100644 index 00000000..64b81b20 --- /dev/null +++ b/tests/cases/test_services/test_storage/conftest.py @@ -0,0 +1,140 @@ +import os +from collections import namedtuple +from hashlib import new as hashlib_new +import tempfile + +import pytest + +import requests + +from qiniu import BucketManager +from qiniu.utils import io_md5 +from qiniu.config import QUERY_REGION_HOST, QUERY_REGION_BACKUP_HOSTS +from qiniu.http.endpoint import Endpoint +from qiniu.http.regions_provider import Region, ServiceName, get_default_regions_provider + + +@pytest.fixture(scope='session') +def bucket_manager(qn_auth): + yield BucketManager(qn_auth) + + +@pytest.fixture(scope='session') +def get_remote_object_headers_and_md5(download_domain): + def fetch_calc_md5(key=None, scheme=None, url=None): + if not key and not url: + raise TypeError('Must provide key or url') + + scheme = scheme if scheme is not None else 'http' + download_url = '{}://{}/{}'.format(scheme, download_domain, key) + if url: + download_url = url + + resp = requests.get(download_url, stream=True) + resp.raise_for_status() + + return resp.headers, io_md5(resp.iter_content(chunk_size=8192)) + + yield fetch_calc_md5 + + +@pytest.fixture(scope='session') +def get_real_regions(): + def _get_real_regions(access_key, bucket_name): + regions = list( + get_default_regions_provider( + query_endpoints_provider=[ + Endpoint.from_host(h) + for h in [QUERY_REGION_HOST] + QUERY_REGION_BACKUP_HOSTS + ], + access_key=access_key, + bucket_name=bucket_name + ) + ) + + if not regions: + raise RuntimeError('No regions found') + + return regions + + yield _get_real_regions + + +@pytest.fixture(scope='function') +def regions_with_real_endpoints(access_key, bucket_name, get_real_regions): + yield get_real_regions(access_key, bucket_name) + + +@pytest.fixture(scope='function') +def regions_with_fake_endpoints(regions_with_real_endpoints): + """ + Returns + ------- + list[Region] + The first element is the fake region with fake endpoints for every service. + The second element is the real region with first fake endpoint for every service. + The rest elements are real regions with real endpoints if exists. + """ + regions = regions_with_real_endpoints + + regions[0].services = { + sn: [ + Endpoint('fake-{0}.python-sdk.qiniu.com'.format(sn.value)) + ] + endpoints + for sn, endpoints in regions[0].services.items() + } + + regions.insert(0, Region( + 'fake-id', + 'fake-s3-id', + services={ + sn: [ + Endpoint('fake-region-{0}.python-sdk.qiniu.com'.format(sn.value)) + ] + for sn in ServiceName + } + )) + + yield regions + + +TempFile = namedtuple( + 'TempFile', + [ + 'path', + 'md5', + 'name', + 'size' + ] +) + + +@pytest.fixture(scope='function') +def temp_file(request): + size = 4 * 1024 + if hasattr(request, 'param'): + size = request.param + + tmp_file_path = tempfile.mktemp() + chunk_size = 4 * 1024 + + md5_hasher = hashlib_new('md5') + with open(tmp_file_path, 'wb') as f: + remaining_bytes = size + while remaining_bytes > 0: + chunk = os.urandom(min(chunk_size, remaining_bytes)) + f.write(chunk) + md5_hasher.update(chunk) + remaining_bytes -= len(chunk) + + yield TempFile( + path=tmp_file_path, + md5=md5_hasher.hexdigest(), + name=os.path.basename(tmp_file_path), + size=size + ) + + try: + os.remove(tmp_file_path) + except Exception: + pass diff --git a/tests/cases/test_services/test_storage/test_bucket_manager.py b/tests/cases/test_services/test_storage/test_bucket_manager.py new file mode 100644 index 00000000..68455652 --- /dev/null +++ b/tests/cases/test_services/test_storage/test_bucket_manager.py @@ -0,0 +1,205 @@ +import pytest + +from qiniu.services.storage.bucket import BucketManager +from qiniu.region import LegacyRegion +from qiniu import build_batch_restore_ar + + +@pytest.fixture(scope='function') +def object_key(bucket_manager, bucket_name, rand_string): + key_to = 'copyto_' + rand_string(8) + bucket_manager.copy( + bucket=bucket_name, + key='copyfrom', + bucket_to=bucket_name, + key_to=key_to, + force='true' + ) + + yield key_to + + bucket_manager.delete(bucket_name, key_to) + + +class TestBucketManager: + # TODO(lihs): Move other test cases to here from test_qiniu.py + def test_restore_ar(self, bucket_manager, bucket_name, object_key): + ret, resp = bucket_manager.restore_ar(bucket_name, object_key, 7) + assert not resp.ok(), resp + ret, resp = bucket_manager.change_type(bucket_name, object_key, 2) + assert resp.ok(), resp + ret, resp = bucket_manager.restore_ar(bucket_name, object_key, 7) + assert resp.ok(), resp + + @pytest.mark.parametrize( + 'cond,expect_ok', + [ + ( + None, True + ), + ( + { + 'mime': 'text/plain' + }, + True + ), + ( + { + 'mime': 'application/json' + }, + False + ) + ] + ) + def test_change_status( + self, + bucket_manager, + bucket_name, + object_key, + cond, + expect_ok + ): + ret, resp = bucket_manager.change_status(bucket_name, object_key, 1, cond) + assert resp.ok() == expect_ok, resp + + def test_mkbucketv3(self, bucket_manager, rand_string): + # tested manually, no drop bucket API to auto cleanup + # ret, resp = bucket_manager.mkbucketv3('py-test-' + rand_string(8).lower(), 'z0') + # assert resp.ok(), resp + pass + + def test_list_bucket(self, bucket_manager, bucket_name): + ret, resp = bucket_manager.list_bucket('na0') + assert resp.ok(), resp + assert any(b.get('tbl') == bucket_name for b in ret) + + def test_bucket_info(self, bucket_manager, bucket_name): + ret, resp = bucket_manager.bucket_info(bucket_name) + assert resp.ok(), resp + for k in [ + 'protected', + 'private' + ]: + assert k in ret + + def test_change_bucket_permission(self, bucket_manager, bucket_name): + ret, resp = bucket_manager.bucket_info(bucket_name) + assert resp.ok(), resp + original_private = ret['private'] + ret, resp = bucket_manager.change_bucket_permission( + bucket_name, + 1 if original_private == 1 else 0 + ) + assert resp.ok(), resp + ret, resp = bucket_manager.change_bucket_permission( + bucket_name, + original_private + ) + assert resp.ok(), resp + + def test_batch_restore_ar( + self, + bucket_manager, + bucket_name, + object_key + ): + bucket_manager.change_type(bucket_name, object_key, 2) + ops = build_batch_restore_ar( + bucket_name, + { + object_key: 7 + } + ) + ret, resp = bucket_manager.batch(ops) + assert resp.status_code == 200, resp + assert len(ret) > 0 + assert ret[0].get('code') == 200, ret[0] + + def test_compatible_with_zone(self, qn_auth, bucket_name, regions_with_real_endpoints): + r = LegacyRegion( + io_host='https://fake-io.python-sdk.qiniu.com', + rs_host='https://fake-rs.python-sdk.qiniu.com', + rsf_host='https://fake-rsf.python-sdk.qiniu.com', + api_host='https://fake-api.python-sdk.qiniu.com' + ) + bucket_manager = BucketManager( + qn_auth, + zone=r + ) + + # rs host + ret, resp = bucket_manager.stat(bucket_name, 'python-sdk.html') + assert resp.status_code == -1 + assert ret is None + + # rsf host + ret, _eof, resp = bucket_manager.list(bucket_name, '', limit=10) + assert resp.status_code == -1 + assert ret is None + + # io host + ret, info = bucket_manager.prefetch(bucket_name, 'python-sdk.html') + assert resp.status_code == -1 + assert ret is None + + # api host + # no API method to test + + @pytest.mark.parametrize( + 'preferred_scheme', + [ + None, # default 'http' + 'http', + 'https' + ] + ) + def test_preferred_scheme( + self, + qn_auth, + bucket_name, + preferred_scheme + ): + bucket_manager = BucketManager( + auth=qn_auth, + preferred_scheme=preferred_scheme + ) + + ret, resp = bucket_manager.stat(bucket_name, 'python-sdk.html') + + assert ret is not None, resp + assert resp.ok(), resp + + expect_scheme = preferred_scheme if preferred_scheme else 'http' + assert resp.url.startswith(expect_scheme + '://'), resp.url + + def test_operation_with_regions_and_retrier( + self, + qn_auth, + bucket_name, + regions_with_fake_endpoints + ): + bucket_manager = BucketManager( + auth=qn_auth, + regions=regions_with_fake_endpoints, + ) + + ret, resp = bucket_manager.stat(bucket_name, 'python-sdk.html') + + assert ret is not None, resp + assert resp.ok(), resp + + def test_uc_service_with_retrier( + self, + qn_auth, + bucket_name, + regions_with_fake_endpoints + ): + bucket_manager = BucketManager( + auth=qn_auth, + regions=regions_with_fake_endpoints + ) + + ret, resp = bucket_manager.list_bucket('na0') + assert resp.ok(), resp + assert len(ret) > 0, resp + assert any(b.get('tbl') for b in ret), ret diff --git a/tests/cases/test_services/test_storage/test_upload_pfop.py b/tests/cases/test_services/test_storage/test_upload_pfop.py new file mode 100644 index 00000000..3effa9c7 --- /dev/null +++ b/tests/cases/test_services/test_storage/test_upload_pfop.py @@ -0,0 +1,94 @@ +import pytest + +import qiniu + + +KB = 1024 +MB = 1024 * KB +GB = 1024 * MB + + +# set a bucket lifecycle manually to delete prefix `test-pfop`! +# or this test will continue to occupy bucket space. +class TestPersistentFopByUpload: + @pytest.mark.parametrize('temp_file', [10 * MB], indirect=True) + @pytest.mark.parametrize( + 'persistent_options', + ( + { + 'persistent_type': None, + }, + { + 'persistent_type': 0, + }, + { + 'persistent_type': 1, + }, + { + 'persistent_workflow_template_id': 'test-workflow', + }, + ) + ) + def test_pfop_with_upload( + self, + set_conf_default, + qn_auth, + bucket_name, + temp_file, + persistent_options, + ): + key = 'test-pfop/upload-file' + persistent_type = persistent_options.get('persistent_type') + persistent_workflow_template_id = persistent_options.get('persistent_workflow_template_id') + + upload_policy = {} + + # set pfops or tmplate id + if persistent_workflow_template_id: + upload_policy['persistentWorkflowTemplateID'] = persistent_workflow_template_id + else: + persistent_key = '_'.join([ + 'test-pfop/test-pfop-by-upload', + 'type', + str(persistent_type) + ]) + persistent_ops = ';'.join([ + qiniu.op_save( + op='avinfo', + bucket=bucket_name, + key=persistent_key + ) + ]) + upload_policy['persistentOps'] = persistent_ops + + # set persistent type + if persistent_type is not None: + upload_policy['persistentType'] = persistent_type + + # upload + token = qn_auth.upload_token( + bucket_name, + key, + policy=upload_policy + ) + ret, resp = qiniu.put_file( + token, + key, + temp_file.path, + check_crc=True + ) + + assert ret is not None, resp + assert ret['key'] == key, resp + assert 'persistentId' in ret, resp + + pfop = qiniu.PersistentFop(qn_auth, bucket_name) + ret, resp = pfop.get_status(ret['persistentId']) + assert resp.status_code == 200, resp + assert ret is not None, resp + assert ret['creationDate'] is not None, resp + + if persistent_type == 1: + assert ret['type'] == 1, resp + elif persistent_workflow_template_id: + assert persistent_workflow_template_id in ret['taskFrom'], resp diff --git a/tests/cases/test_services/test_storage/test_uploader.py b/tests/cases/test_services/test_storage/test_uploader.py new file mode 100644 index 00000000..158f111d --- /dev/null +++ b/tests/cases/test_services/test_storage/test_uploader.py @@ -0,0 +1,909 @@ +from collections import namedtuple + +import pytest + +from qiniu.compat import json, is_py2 +from qiniu import ( + Zone, + config as qn_config, + set_default, + put_file, + put_data, + put_stream, + build_batch_delete +) +from qiniu.http.endpoint import Endpoint +from qiniu.http.region import ServiceName +from qiniu.services.storage.uploader import _form_put +from qiniu.services.storage.uploaders.abc import UploaderBase + +KB = 1024 +MB = 1024 * KB +GB = 1024 * MB + + +@pytest.fixture(scope='session') +def valid_up_host(access_key, bucket_name): + zone = Zone() + try: + hosts = json.loads( + zone.bucket_hosts(access_key, bucket_name) + ).get('hosts') + up_host = 'https://' + hosts[0].get('up', {}).get('domains')[0] + except IndexError: + up_host = 'https://upload.qiniup.com' + return up_host + + +CommonlyOptions = namedtuple( + 'CommonlyOptions', + [ + 'mime_type', + 'params', + 'metadata' + ] +) + + +@pytest.fixture() +def commonly_options(request): + res = CommonlyOptions( + mime_type='text/plain', + params={'x:a': 'a'}, + metadata={ + 'x-qn-meta-name': 'qiniu', + 'x-qn-meta-age': '18' + } + ) + if hasattr(request, 'param'): + res = res._replace(**request.param) + yield res + + +@pytest.fixture(scope='class') +def auto_remove(bucket_manager): + grouped_keys_by_bucket_name = {} + + def _auto_remove(bucket_name, key): + if bucket_name not in grouped_keys_by_bucket_name: + grouped_keys_by_bucket_name[bucket_name] = [] + grouped_keys_by_bucket_name[bucket_name].append(key) + return key + + yield _auto_remove + + for bkt_name, keys in grouped_keys_by_bucket_name.items(): + try: + delete_ops = build_batch_delete(bkt_name, keys) + bucket_manager.batch(delete_ops) + except Exception as err: + print('Failed to delete {0} keys: {1} by {2}'.format(bkt_name, keys, err)) + + +@pytest.fixture(scope='class') +def get_key(bucket_name, rand_string, auto_remove): + def _get_key(key, no_rand_trail=False): + result = key + '-' + rand_string(8) + if no_rand_trail: + result = key + auto_remove(bucket_name, result) + return result + + yield _get_key + + +@pytest.fixture(scope='function') +def set_default_up_host_zone(request, valid_up_host): + zone_args = { + 'up_host': valid_up_host, + } + if hasattr(request, 'param') and request.param is not None: + zone_args = { + 'up_host': request.param, + 'up_host_backup': valid_up_host + } + set_default( + default_zone=Zone(**zone_args) + ) + yield + set_default(default_zone=Zone()) + qn_config._is_customized_default['default_zone'] = False + + +class TestUploadFuncs: + def test_put(self, qn_auth, bucket_name, get_key): + key = get_key('a\\b\\c"hello', no_rand_trail=True) + data = 'hello bubby!' + token = qn_auth.upload_token(bucket_name) + ret, info = put_data(token, key, data) + print(info) + assert ret['key'] == key + + def test_put_crc(self, qn_auth, bucket_name, get_key): + key = get_key('', no_rand_trail=True) + data = 'hello bubby!' + token = qn_auth.upload_token(bucket_name, key) + ret, info = put_data(token, key, data, check_crc=True) + print(info) + assert ret['key'] == key + + @pytest.mark.parametrize('temp_file', [64 * KB], indirect=True) + def test_put_file( + self, + qn_auth, + bucket_name, + temp_file, + commonly_options, + get_remote_object_headers_and_md5, + get_key + ): + key = get_key('test_file') + + token = qn_auth.upload_token(bucket_name, key) + ret, info = put_file( + token, + key, + temp_file.path, + mime_type=commonly_options.mime_type, + check_crc=True + ) + + _, actual_md5 = get_remote_object_headers_and_md5(key=key) + + assert ret is not None, info + assert ret['key'] == key, info + assert actual_md5 == temp_file.md5 + + def test_put_with_invalid_crc(self, qn_auth, bucket_name, get_key): + key = get_key('test_invalid') + data = 'hello bubby!' + crc32 = 'wrong crc32' + token = qn_auth.upload_token(bucket_name) + ret, info = _form_put(token, key, data, None, None, crc=crc32) + assert ret is None, info + assert info.status_code == 400, info + + def test_put_without_key(self, qn_auth, bucket_name, get_key): + key = None + data = 'hello bubby!' + token = qn_auth.upload_token(bucket_name) + ret, info = put_data(token, key, data) + assert 'key' in ret, info + get_key(ret['key'], no_rand_trail=True) # auto remove the file + assert ret['hash'] == ret['key'], info + + data = 'hello bubby!' + token = qn_auth.upload_token(bucket_name, 'nokey2') + ret, info = put_data(token, None, data) + print(info) + assert ret is None + assert info.status_code == 403 # key not match + + @pytest.mark.parametrize( + 'set_default_up_host_zone', + [ + 'http://fake.qiniu.com', + None + ], + indirect=True + ) + def test_without_read_without_seek_retry(self, set_default_up_host_zone, qn_auth, bucket_name, get_key): + key = get_key('retry') + data = 'hello retry!' + token = qn_auth.upload_token(bucket_name) + ret, info = put_data(token, key, data) + print(info) + assert ret['key'] == key + assert ret['hash'] == 'FlYu0iBR1WpvYi4whKXiBuQpyLLk' + + @pytest.mark.parametrize('temp_file', [30 * MB], indirect=True) + def test_put_data_without_fname( + self, + qn_auth, + bucket_name, + is_travis, + temp_file, + get_key + ): + if is_travis: + return + key = get_key('test_putData_without_fname') + with open(temp_file.path, 'rb') as input_stream: + token = qn_auth.upload_token(bucket_name, key) + ret, info = put_data(token, key, input_stream) + print(info) + assert ret is not None + + @pytest.mark.parametrize('temp_file', [30 * MB], indirect=True) + def test_put_data_with_empty_fname( + self, + qn_auth, + bucket_name, + is_travis, + temp_file, + commonly_options, + get_key + ): + if is_travis: + return + key = get_key('test_putData_without_fname1') + with open(temp_file.path, 'rb') as input_stream: + token = qn_auth.upload_token(bucket_name, key) + ret, info = put_data( + token, + key, + input_stream, + commonly_options.params, + commonly_options.mime_type, + False, + None, + '' + ) + print(info) + assert ret is not None + + @pytest.mark.parametrize('temp_file', [30 * MB], indirect=True) + def test_put_data_with_space_only_fname( + self, + qn_auth, + bucket_name, + is_travis, + temp_file, + commonly_options, + get_key + ): + if is_travis: + return + key = get_key('test_putData_without_fname2') + with open(temp_file.path, 'rb') as input_stream: + token = qn_auth.upload_token(bucket_name, key) + ret, info = put_data( + token, + key, + input_stream, + commonly_options.params, + commonly_options.mime_type, + False, + None, + ' ' + ) + print(info) + assert ret is not None + + @pytest.mark.parametrize('temp_file', [64 * KB], indirect=True) + def test_put_file_with_metadata( + self, + qn_auth, + bucket_name, + temp_file, + commonly_options, + bucket_manager, + get_remote_object_headers_and_md5, + get_key + ): + key = get_key('test_file_with_metadata') + token = qn_auth.upload_token(bucket_name, key) + ret, info = put_file(token, key, temp_file.path, metadata=commonly_options.metadata) + _, actual_md5 = get_remote_object_headers_and_md5(key=key) + assert ret['key'] == key + assert actual_md5 == temp_file.md5 + + ret, info = bucket_manager.stat(bucket_name, key) + assert 'x-qn-meta' in ret + assert ret['x-qn-meta']['name'] == 'qiniu' + assert ret['x-qn-meta']['age'] == '18' + + def test_put_data_with_metadata( + self, + qn_auth, + bucket_name, + commonly_options, + bucket_manager, + get_key + ): + key = get_key('put_data_with_metadata') + data = 'hello metadata!' + token = qn_auth.upload_token(bucket_name, key) + ret, info = put_data(token, key, data, metadata=commonly_options.metadata) + assert ret['key'] == key + ret, info = bucket_manager.stat(bucket_name, key) + assert 'x-qn-meta' in ret + assert ret['x-qn-meta']['name'] == 'qiniu' + assert ret['x-qn-meta']['age'] == '18' + + @pytest.mark.parametrize('temp_file', [64 * KB], indirect=True) + def test_put_file_with_callback( + self, + qn_auth, + bucket_name, + temp_file, + commonly_options, + bucket_manager, + upload_callback_url, + get_remote_object_headers_and_md5, + get_key + ): + key = get_key('test_file_with_callback') + policy = { + 'callbackUrl': upload_callback_url, + 'callbackBody': '{"custom_vars":{"a":$(x:a)},"key":$(key),"hash":$(etag)}', + 'callbackBodyType': 'application/json', + } + token = qn_auth.upload_token(bucket_name, key, policy=policy) + ret, info = put_file( + token, + key, + temp_file.path, + metadata=commonly_options.metadata, + params=commonly_options.params, + ) + _, actual_md5 = get_remote_object_headers_and_md5(key=key) + assert ret['key'] == key + assert actual_md5 == temp_file.md5 + assert ret['custom_vars']['a'] == 'a' + + ret, info = bucket_manager.stat(bucket_name, key) + assert 'x-qn-meta' in ret + assert ret['x-qn-meta']['name'] == 'qiniu' + assert ret['x-qn-meta']['age'] == '18' + + @pytest.mark.parametrize('temp_file', [64 * KB, 10 * MB], indirect=True) + def test_put_file_with_regions_retry( + self, + qn_auth, + bucket_name, + temp_file, + regions_with_fake_endpoints, + get_remote_object_headers_and_md5, + get_key + ): + key = get_key('test_file_with_form_regions_retry') + token = qn_auth.upload_token(bucket_name, key) + ret, info = put_file( + token, + key, + temp_file.path, + regions=regions_with_fake_endpoints + ) + _, actual_md5 = get_remote_object_headers_and_md5(key=key) + assert ret['key'] == key + assert actual_md5 == temp_file.md5 + + def test_put_data_with_callback( + self, + qn_auth, + bucket_name, + commonly_options, + bucket_manager, + upload_callback_url, + get_key + ): + key = get_key('put_data_with_metadata') + data = 'hello metadata!' + policy = { + 'callbackUrl': upload_callback_url, + 'callbackBody': '{"custom_vars":{"a":$(x:a)},"key":$(key),"hash":$(etag)}', + 'callbackBodyType': 'application/json', + } + token = qn_auth.upload_token(bucket_name, key, policy=policy) + ret, info = put_data( + token, + key, + data, + metadata=commonly_options.metadata, + params=commonly_options.params + ) + assert ret['key'] == key + assert ret['custom_vars']['a'] == 'a' + ret, info = bucket_manager.stat(bucket_name, key) + assert 'x-qn-meta' in ret + assert ret['x-qn-meta']['name'] == 'qiniu' + assert ret['x-qn-meta']['age'] == '18' + + +class TestResumableUploader: + @pytest.mark.parametrize('temp_file', [64 * KB], indirect=True) + def test_put_stream(self, qn_auth, bucket_name, temp_file, commonly_options, get_key): + key = get_key('test_file_r') + with open(temp_file.path, 'rb') as input_stream: + token = qn_auth.upload_token(bucket_name, key) + ret, info = put_stream( + token, + key, + input_stream, + temp_file.name, + temp_file.size, + None, + commonly_options.params, + commonly_options.mime_type, + part_size=None, + version=None, + bucket_name=None + ) + assert ret['key'] == key + + @pytest.mark.parametrize('temp_file', [64 * KB], indirect=True) + def test_put_stream_v2_without_bucket_name(self, qn_auth, bucket_name, temp_file, commonly_options, get_key): + key = get_key('test_file_r') + with open(temp_file.path, 'rb') as input_stream: + token = qn_auth.upload_token(bucket_name, key) + ret, info = put_stream( + token, + key, + input_stream, + temp_file.name, + temp_file.size, + None, + commonly_options.params, + commonly_options.mime_type, + part_size=1024 * 1024 * 10, + version='v2' + ) + assert ret['key'] == key + + @pytest.mark.parametrize( + 'temp_file', + [ + 2 * MB + 1, + 4 * MB, + 10 * MB + 1 + ], + ids=[ + '2MB+', + '4MB', + '10MB+' + ], + indirect=True + ) + def test_put_stream_v2(self, qn_auth, bucket_name, temp_file, commonly_options, get_key): + key = get_key('test_file_r') + with open(temp_file.path, 'rb') as input_stream: + token = qn_auth.upload_token(bucket_name, key) + ret, info = put_stream( + token, + key, + input_stream, + temp_file.name, + temp_file.size, + None, + commonly_options.params, + commonly_options.mime_type, + part_size=1024 * 1024 * 4, + version='v2', + bucket_name=bucket_name + ) + assert ret['key'] == key + + @pytest.mark.parametrize('temp_file', [4 * MB + 1], indirect=True) + def test_put_stream_v2_without_key(self, qn_auth, bucket_name, temp_file, commonly_options, get_key): + part_size = 4 * MB + key = None + with open(temp_file.path, 'rb') as input_stream: + token = qn_auth.upload_token(bucket_name, key) + ret, info = put_stream( + token, + key, + input_stream, + temp_file.name, + temp_file.size, + None, + commonly_options.params, + commonly_options.mime_type, + part_size=part_size, + version='v2', + bucket_name=bucket_name + ) + assert 'key' in ret + get_key(ret['key'], no_rand_trail=True) # auto remove the file + assert ret['key'] == ret['hash'] + + @pytest.mark.parametrize('temp_file', [4 * MB + 1], indirect=True) + def test_put_stream_v2_with_empty_return_body(self, qn_auth, bucket_name, temp_file, commonly_options, get_key): + part_size = 4 * MB + key = get_key('test_file_empty_return_body') + with open(temp_file.path, 'rb') as input_stream: + token = qn_auth.upload_token(bucket_name, key, policy={'returnBody': ' '}) + ret, info = put_stream( + token, + key, + input_stream, + temp_file.name, + temp_file.size, + None, + commonly_options.params, + commonly_options.mime_type, + part_size=part_size, + version='v2', + bucket_name=bucket_name + ) + assert info.status_code == 200 + assert ret == {} + + @pytest.mark.parametrize('temp_file', [4 * MB + 1], indirect=True) + def test_big_file(self, qn_auth, bucket_name, temp_file, commonly_options, get_key): + key = get_key('big') + token = qn_auth.upload_token(bucket_name, key) + + ret, info = put_file( + token, + key, + temp_file.path, + commonly_options.params, + commonly_options.mime_type, + progress_handler=lambda progress, total: progress + ) + print(info) + assert ret['key'] == key + + @pytest.mark.parametrize( + 'set_default_up_host_zone', + [ + 'http://fake.qiniu.com', + None + ], + indirect=True + ) + @pytest.mark.parametrize('temp_file', [64 * KB], indirect=True) + def test_legacy_retry( + self, + set_default_up_host_zone, + qn_auth, + bucket_name, + temp_file, + commonly_options, + get_remote_object_headers_and_md5, + get_key + ): + key = get_key('test_file_r_retry') + token = qn_auth.upload_token(bucket_name, key) + ret, info = put_file( + token, + key, + temp_file.path, + commonly_options.params, + commonly_options.mime_type + ) + _, actual_md5 = get_remote_object_headers_and_md5(key=key) + assert ret['key'] == key, info + assert actual_md5 == temp_file.md5 + + @pytest.mark.parametrize('temp_file', [64 * KB], indirect=True) + def test_put_stream_with_key_limits(self, qn_auth, bucket_name, temp_file, commonly_options, get_key): + key = get_key('test_file_r') + with open(temp_file.path, 'rb') as input_stream: + token = qn_auth.upload_token(bucket_name, key, policy={'keylimit': ['test_file_d']}) + ret, info = put_stream( + token, + key, + input_stream, + temp_file.name, + temp_file.size, + None, + commonly_options.params, + commonly_options.mime_type + ) + assert info.status_code == 403 + token = qn_auth.upload_token( + bucket_name, + key, + policy={'keylimit': ['test_file_d', key]} + ) + ret, info = put_stream( + token, + key, + input_stream, + temp_file.name, + temp_file.size, + None, + commonly_options.params, + commonly_options.mime_type + ) + assert info.status_code == 200 + + @pytest.mark.parametrize('temp_file', [64 * KB], indirect=True) + def test_put_stream_with_metadata( + self, + qn_auth, + bucket_name, + temp_file, + commonly_options, + bucket_manager, + get_key + ): + key = get_key('test_put_stream_with_metadata') + with open(temp_file.path, 'rb') as input_stream: + token = qn_auth.upload_token(bucket_name, key) + ret, info = put_stream( + token, + key, + input_stream, + temp_file.name, + temp_file.size, + None, + commonly_options.params, + commonly_options.mime_type, + part_size=None, + version=None, + bucket_name=None, + metadata=commonly_options.metadata + ) + assert ret['key'] == key + ret, info = bucket_manager.stat(bucket_name, key) + assert 'x-qn-meta' in ret + assert ret['x-qn-meta']['name'] == 'qiniu' + assert ret['x-qn-meta']['age'] == '18' + + @pytest.mark.parametrize('temp_file', [4 * MB + 1], indirect=True) + def test_put_stream_v2_with_metadata( + self, + qn_auth, + bucket_name, + temp_file, + commonly_options, + bucket_manager, + get_key + ): + part_size = 4 * MB + key = get_key('test_put_stream_v2_with_metadata') + with open(temp_file.path, 'rb') as input_stream: + token = qn_auth.upload_token(bucket_name, key) + ret, info = put_stream( + token, + key, + input_stream, + temp_file.name, + temp_file.size, + None, + commonly_options.params, + commonly_options.mime_type, + part_size=part_size, + version='v2', + bucket_name=bucket_name, + metadata=commonly_options.metadata + ) + assert ret['key'] == key + ret, info = bucket_manager.stat(bucket_name, key) + assert 'x-qn-meta' in ret + assert ret['x-qn-meta']['name'] == 'qiniu' + assert ret['x-qn-meta']['age'] == '18' + + @pytest.mark.parametrize('temp_file', [64 * KB], indirect=True) + def test_put_stream_with_callback( + self, + qn_auth, + bucket_name, + temp_file, + commonly_options, + bucket_manager, + upload_callback_url, + get_key + ): + key = get_key('test_put_stream_with_callback') + with open(temp_file.path, 'rb') as input_stream: + policy = { + 'callbackUrl': upload_callback_url, + 'callbackBody': '{"custom_vars":{"a":$(x:a)},"key":$(key),"hash":$(etag)}', + 'callbackBodyType': 'application/json', + } + token = qn_auth.upload_token(bucket_name, key, policy=policy) + ret, info = put_stream( + token, + key, + input_stream, + temp_file.name, + temp_file.size, + None, + commonly_options.params, + commonly_options.mime_type, + part_size=None, + version=None, + bucket_name=None, + metadata=commonly_options.metadata + ) + assert ret['key'] == key + assert ret['custom_vars']['a'] == 'a' + ret, info = bucket_manager.stat(bucket_name, key) + assert 'x-qn-meta' in ret + assert ret['x-qn-meta']['name'] == 'qiniu' + assert ret['x-qn-meta']['age'] == '18' + + @pytest.mark.parametrize('temp_file', [4 * MB + 1], indirect=True) + def test_put_stream_v2_with_callback( + self, + qn_auth, + bucket_name, + temp_file, + commonly_options, + bucket_manager, + upload_callback_url, + get_key + ): + part_size = 4 * MB + key = get_key('test_put_stream_v2_with_metadata') + with open(temp_file.path, 'rb') as input_stream: + policy = { + 'callbackUrl': upload_callback_url, + 'callbackBody': '{"custom_vars":{"a":$(x:a)},"key":$(key),"hash":$(etag)}', + 'callbackBodyType': 'application/json', + } + token = qn_auth.upload_token(bucket_name, key, policy=policy) + ret, info = put_stream( + token, + key, + input_stream, + temp_file.name, + temp_file.size, + None, + commonly_options.params, + commonly_options.mime_type, + part_size=part_size, + version='v2', + bucket_name=bucket_name, + metadata=commonly_options.metadata + ) + assert ret['key'] == key + assert ret['custom_vars']['a'] == 'a' + ret, info = bucket_manager.stat(bucket_name, key) + assert 'x-qn-meta' in ret + assert ret['x-qn-meta']['name'] == 'qiniu' + assert ret['x-qn-meta']['age'] == '18' + + @pytest.mark.parametrize('temp_file', [30 * MB], indirect=True) + @pytest.mark.parametrize('version', ['v1', 'v2']) + def test_resume_upload(self, bucket_name, qn_auth, temp_file, version, get_key): + key = get_key('test_resume_upload_' + version) + part_size = 4 * MB + + def mock_fail(uploaded_size, _total_size): + if uploaded_size > 10 * MB: + raise Exception('Mock Fail') + + try: + token = qn_auth.upload_token(bucket_name, key) + try: + _ret, _into = put_file( + up_token=token, + key=key, + file_path=temp_file.path, + hostscache_dir=None, + part_size=part_size, + version=version, + bucket_name=bucket_name, + progress_handler=mock_fail + ) + except Exception as e: + if 'Mock Fail' not in str(e): + raise e + except IOError: + if is_py2: + # https://github.com/pytest-dev/pytest/issues/2370 + # https://github.com/pytest-dev/pytest/pull/3305 + pass + + def should_start_from_resume(uploaded_size, _total_size): + assert uploaded_size // part_size >= 3 + + token = qn_auth.upload_token(bucket_name, key) + ret, into = put_file( + up_token=token, + key=key, + file_path=temp_file.path, + hostscache_dir=None, + part_size=part_size, + version=version, + bucket_name=bucket_name, + progress_handler=should_start_from_resume + ) + assert ret['key'] == key + + @pytest.mark.parametrize('temp_file', [ + 64 * KB, # form + 10 * MB # resume + ], indirect=True) + @pytest.mark.parametrize('version', ['v1', 'v2']) + def test_upload_acc_normally(self, bucket_name, qn_auth, temp_file, version, get_key): + key = get_key('test_upload_acc_normally') + + token = qn_auth.upload_token(bucket_name, key) + ret, resp = put_file( + up_token=token, + key=key, + file_path=temp_file.path, + version=version, + accelerate_uploading=True + ) + + assert ret['key'] == key, resp + assert 'kodo-accelerate' in resp.url, resp + + @pytest.mark.parametrize('temp_file', [ + 64 * KB, # form + 10 * MB # resume + ], indirect=True) + @pytest.mark.parametrize('version', ['v1', 'v2']) + def test_upload_acc_fallback_src_by_network_err( + self, + bucket_name, + qn_auth, + temp_file, + version, + get_key, + get_real_regions + ): + regions = get_real_regions(qn_auth.get_access_key(), bucket_name) + r = regions[0] + r.services[ServiceName.UP_ACC] = [ + Endpoint('qiniu-acc.fake.qiniu.com') + ] + + key = get_key('test_upload_acc_fallback_src_by_network_err') + + token = qn_auth.upload_token(bucket_name, key) + ret, resp = put_file( + up_token=token, + key=key, + file_path=temp_file.path, + version=version, + regions=[r], + accelerate_uploading=True + ) + + assert ret['key'] == key, resp + + @pytest.mark.parametrize('temp_file', [ + 64 * KB, # form + 10 * MB # resume + ], indirect=True) + @pytest.mark.parametrize('version', ['v1', 'v2']) + def test_upload_acc_fallback_src_by_acc_unavailable( + self, + no_acc_bucket_name, + qn_auth, + temp_file, + version, + rand_string, + auto_remove, + get_real_regions + ): + regions = get_real_regions(qn_auth.get_access_key(), no_acc_bucket_name) + + region = regions[0] + region.services[ServiceName.UP_ACC] = [ + Endpoint('{0}.kodo-accelerate.{1}.qiniucs.com'.format(no_acc_bucket_name, region.s3_region_id)), + Endpoint('fake-acc.python-sdk.qiniu.com') + ] + + key = 'test_upload_acc_fallback_src_by_acc_unavailable-' + rand_string(8) + auto_remove(no_acc_bucket_name, key) + + token = qn_auth.upload_token(no_acc_bucket_name, key) + ret, resp = put_file( + up_token=token, + key=key, + file_path=temp_file.path, + version=version, + accelerate_uploading=True + ) + + assert ret['key'] == key, resp + + def test_uploader_base_compatible(self, qn_auth, bucket_name): + if is_py2: + class MockUploader(UploaderBase): + def upload( + self, + **kwargs + ): + pass + uploader = MockUploader( + bucket_name=bucket_name, + auth=qn_auth + ) + else: + uploader = UploaderBase( + bucket_name=bucket_name, + auth=qn_auth + ) + + up_hosts = uploader._get_up_hosts() + assert len(up_hosts) > 0 diff --git a/tests/cases/test_services/test_storage/test_uploaders_default_retrier.py b/tests/cases/test_services/test_storage/test_uploaders_default_retrier.py new file mode 100644 index 00000000..2aaa83ee --- /dev/null +++ b/tests/cases/test_services/test_storage/test_uploaders_default_retrier.py @@ -0,0 +1,235 @@ +import pytest + +import os + +from qiniu.http.region import ServiceName, Region +from qiniu.retry import Attempt +from qiniu.services.storage.uploaders._default_retrier import ( + ProgressRecord, + TokenExpiredRetryPolicy, + AccUnavailableRetryPolicy +) + + +@pytest.fixture( + scope='function', + params=[ + {'api_version': 'v1'}, + {'api_version': 'v2'} + ] +) +def fake_progress_record(request): + api_version = request.param.get('api_version') + file_path = os.path.join(os.getcwd(), 'fake-progress-record') + + with open(file_path, 'w'): + pass + + def _delete(): + try: + os.remove(file_path) + except OSError: + pass + + def _exists(): + return os.path.exists(file_path) + + yield ProgressRecord( + upload_api_version=api_version, + exists=_exists, + delete=_delete + ) + + _delete() + + +class MockResponse: + def __init__(self, status_code, text_body=None): + self.status_code = status_code + self.text_body = text_body + + +class TestTokenExpiredRetryPolicy: + def test_should_retry(self, fake_progress_record): + policy = TokenExpiredRetryPolicy( + upload_api_version=fake_progress_record.upload_api_version, + record_delete_handler=fake_progress_record.delete, + record_exists_handler=fake_progress_record.exists + ) + + attempt = Attempt() + policy.init_context(attempt.context) + + if fake_progress_record.upload_api_version == 'v1': + mocked_resp = MockResponse(status_code=701) + else: + mocked_resp = MockResponse(status_code=612) + attempt.result = (None, mocked_resp) + + assert policy.should_retry(attempt) + + def test_should_not_retry_by_no_result(self, fake_progress_record): + policy = TokenExpiredRetryPolicy( + upload_api_version=fake_progress_record.upload_api_version, + record_delete_handler=fake_progress_record.delete, + record_exists_handler=fake_progress_record.exists + ) + attempt = Attempt() + policy.init_context(attempt.context) + + assert not policy.should_retry(attempt) + + def test_should_not_retry_by_default_max_retried_times(self, fake_progress_record): + policy = TokenExpiredRetryPolicy( + upload_api_version=fake_progress_record.upload_api_version, + record_delete_handler=fake_progress_record.delete, + record_exists_handler=fake_progress_record.exists + ) + attempt = Attempt() + policy.init_context(attempt.context) + if fake_progress_record.upload_api_version == 'v1': + mocked_resp = MockResponse(status_code=701) + else: + mocked_resp = MockResponse(status_code=612) + attempt.result = (None, mocked_resp) + attempt.context[policy] = attempt.context[policy]._replace(retried_times=1) + + assert not policy.should_retry(attempt) + + def test_should_not_retry_by_file_no_exists(self, fake_progress_record): + policy = TokenExpiredRetryPolicy( + upload_api_version=fake_progress_record.upload_api_version, + record_delete_handler=fake_progress_record.delete, + record_exists_handler=fake_progress_record.exists + ) + + attempt = Attempt() + policy.init_context(attempt.context) + if fake_progress_record.upload_api_version == 'v1': + mocked_resp = MockResponse(status_code=701) + else: + mocked_resp = MockResponse(status_code=612) + attempt.result = (None, mocked_resp) + fake_progress_record.delete() + + assert not policy.should_retry(attempt) + + def test_prepare_retry(self, fake_progress_record): + policy = TokenExpiredRetryPolicy( + upload_api_version=fake_progress_record.upload_api_version, + record_delete_handler=fake_progress_record.delete, + record_exists_handler=fake_progress_record.exists + ) + + attempt = Attempt() + policy.init_context(attempt.context) + if fake_progress_record.upload_api_version == 'v1': + mocked_resp = MockResponse(status_code=701) + else: + mocked_resp = MockResponse(status_code=612) + attempt.result = (None, mocked_resp) + + policy.prepare_retry(attempt) + + assert not fake_progress_record.exists() + + +class TestAccUnavailableRetryPolicy: + def test_should_retry(self): + policy = AccUnavailableRetryPolicy() + attempt = Attempt() + + attempt.context['service_name'] = ServiceName.UP_ACC + attempt.context['alternative_service_names'] = [ServiceName.UP] + attempt.context['region'] = Region.from_region_id('z0') + + mocked_resp = MockResponse( + status_code=400, + text_body='{"error":"transfer acceleration is not configured on this bucket"}' + ) + attempt.result = (None, mocked_resp) + + assert policy.should_retry(attempt) + + def test_should_not_retry_by_no_result(self): + policy = AccUnavailableRetryPolicy() + attempt = Attempt() + + attempt.context['service_name'] = ServiceName.UP_ACC + attempt.context['alternative_service_names'] = [ServiceName.UP] + attempt.context['region'] = Region.from_region_id('z0') + + assert not policy.should_retry(attempt) + + def test_should_not_retry_by_no_alternative_services(self): + policy = AccUnavailableRetryPolicy() + attempt = Attempt() + + attempt.context['service_name'] = ServiceName.UP + attempt.context['alternative_service_names'] = [] + attempt.context['region'] = Region.from_region_id('z0') + + mocked_resp = MockResponse( + status_code=400, + text_body='{"error":"transfer acceleration is not configured on this bucket"}' + ) + attempt.result = (None, mocked_resp) + + assert not policy.should_retry(attempt) + + def test_should_not_retry_by_no_alternative_endpoints(self): + policy = AccUnavailableRetryPolicy() + attempt = Attempt() + + attempt.context['service_name'] = ServiceName.UP_ACC + attempt.context['alternative_service_names'] = [ServiceName.UP] + attempt.context['region'] = Region.from_region_id('z0') + attempt.context['region'].services[ServiceName.UP] = [] + + mocked_resp = MockResponse( + status_code=400, + text_body='{"error":"transfer acceleration is not configured on this bucket"}' + ) + attempt.result = (None, mocked_resp) + + assert not policy.should_retry(attempt) + + def test_should_not_retry_by_other_error(self): + policy = AccUnavailableRetryPolicy() + attempt = Attempt() + + attempt.context['service_name'] = ServiceName.UP_ACC + attempt.context['alternative_service_names'] = [ServiceName.UP] + attempt.context['region'] = Region.from_region_id('z0') + + mocked_resp = MockResponse( + status_code=400, + text_body='{"error":"Bad Request"}' + ) + attempt.result = (None, mocked_resp) + + assert not policy.should_retry(attempt) + + def test_prepare_retry(self): + policy = AccUnavailableRetryPolicy() + attempt = Attempt() + region = Region.from_region_id('z0') + + attempt.context['service_name'] = ServiceName.UP_ACC + attempt.context['alternative_service_names'] = [ServiceName.UP] + attempt.context['region'] = region + + mocked_resp = MockResponse( + status_code=400, + text_body='{"error":"transfer acceleration is not configured on this bucket"}' + ) + attempt.result = (None, mocked_resp) + + policy.prepare_retry(attempt) + + assert attempt.context['service_name'] == ServiceName.UP + assert ( + [attempt.context['endpoint']] + attempt.context['alternative_endpoints'] + == + region.services[ServiceName.UP] + ) diff --git a/tests/cases/test_utils.py b/tests/cases/test_utils.py new file mode 100644 index 00000000..11d9db77 --- /dev/null +++ b/tests/cases/test_utils.py @@ -0,0 +1,145 @@ +from datetime import datetime, timedelta, tzinfo + +from qiniu import utils, compat + + +class _CN_TZINFO(tzinfo): + def utcoffset(self, dt): + return timedelta(hours=8) + + def tzname(self, dt): + return "CST" + + def dst(self, dt): + return timedelta(0) + + +class TestUtils: + def test_urlsafe(self): + a = 'hello\x96' + u = utils.urlsafe_base64_encode(a) + assert compat.b(a) == utils.urlsafe_base64_decode(u) + + def test_canonical_mime_header_key(self): + field_names = [ + ":status", + ":x-test-1", + ":x-Test-2", + "content-type", + "CONTENT-LENGTH", + "oRiGin", + "ReFer", + "Last-Modified", + "acCePt-ChArsEt", + "x-test-3", + "cache-control", + ] + expect_canonical_field_names = [ + ":status", + ":x-test-1", + ":x-Test-2", + "Content-Type", + "Content-Length", + "Origin", + "Refer", + "Last-Modified", + "Accept-Charset", + "X-Test-3", + "Cache-Control", + ] + assert len(field_names) == len(expect_canonical_field_names) + for i in range(len(field_names)): + assert utils.canonical_mime_header_key(field_names[i]) == expect_canonical_field_names[i] + + def test_entry(self): + case_list = [ + { + 'msg': 'normal', + 'bucket': 'qiniuphotos', + 'key': 'gogopher.jpg', + 'expect': 'cWluaXVwaG90b3M6Z29nb3BoZXIuanBn' + }, + { + 'msg': 'key empty', + 'bucket': 'qiniuphotos', + 'key': '', + 'expect': 'cWluaXVwaG90b3M6' + }, + { + 'msg': 'key undefined', + 'bucket': 'qiniuphotos', + 'key': None, + 'expect': 'cWluaXVwaG90b3M=' + }, + { + 'msg': 'key need replace plus symbol', + 'bucket': 'qiniuphotos', + 'key': '012ts>a', + 'expect': 'cWluaXVwaG90b3M6MDEydHM-YQ==' + }, + { + 'msg': 'key need replace slash symbol', + 'bucket': 'qiniuphotos', + 'key': '012ts?a', + 'expect': 'cWluaXVwaG90b3M6MDEydHM_YQ==' + } + ] + for c in case_list: + assert c.get('expect') == utils.entry(c.get('bucket'), c.get('key')), c.get('msg') + + def test_decode_entry(self): + case_list = [ + { + 'msg': 'normal', + 'expect': { + 'bucket': 'qiniuphotos', + 'key': 'gogopher.jpg' + }, + 'entry': 'cWluaXVwaG90b3M6Z29nb3BoZXIuanBn' + }, + { + 'msg': 'key empty', + 'expect': { + 'bucket': 'qiniuphotos', + 'key': '' + }, + 'entry': 'cWluaXVwaG90b3M6' + }, + { + 'msg': 'key undefined', + 'expect': { + 'bucket': 'qiniuphotos', + 'key': None + }, + 'entry': 'cWluaXVwaG90b3M=' + }, + { + 'msg': 'key need replace plus symbol', + 'expect': { + 'bucket': 'qiniuphotos', + 'key': '012ts>a' + }, + 'entry': 'cWluaXVwaG90b3M6MDEydHM-YQ==' + }, + { + 'msg': 'key need replace slash symbol', + 'expect': { + 'bucket': 'qiniuphotos', + 'key': '012ts?a' + }, + 'entry': 'cWluaXVwaG90b3M6MDEydHM_YQ==' + } + ] + for c in case_list: + bucket, key = utils.decode_entry(c.get('entry')) + assert bucket == c.get('expect', {}).get('bucket'), c.get('msg') + assert key == c.get('expect', {}).get('key'), c.get('msg') + + def test_dt2ts(self): + dt = datetime(year=2011, month=8, day=3, tzinfo=_CN_TZINFO()) + expect = 1312300800 + assert utils.dt2ts(dt) == expect + + base_dt = datetime(year=2011, month=8, day=3) + now_dt = datetime.now() + assert int((now_dt - base_dt).total_seconds()) == utils.dt2ts(now_dt) - utils.dt2ts(base_dt) diff --git a/tests/cases/test_zone/__init__.py b/tests/cases/test_zone/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/cases/test_zone/test_lagacy_region.py b/tests/cases/test_zone/test_lagacy_region.py new file mode 100644 index 00000000..0a8de93d --- /dev/null +++ b/tests/cases/test_zone/test_lagacy_region.py @@ -0,0 +1,111 @@ +import pytest + +from qiniu.http.region import Region, ServiceName +from qiniu.region import LegacyRegion +from qiniu.compat import json, is_py2 + + +@pytest.fixture +def mocked_hosts(): + mocked_hosts = { + ServiceName.UP: ['https://up.python-example.qiniu.com', 'https://up-2.python-example.qiniu.com'], + ServiceName.IO: ['https://io.python-example.qiniu.com'], + ServiceName.RS: ['https://rs.python-example.qiniu.com'], + ServiceName.RSF: ['https://rsf.python-example.qiniu.com'], + ServiceName.API: ['https://api.python-example.qiniu.com'] + } + yield mocked_hosts + + +@pytest.fixture +def mock_legacy_region(mocked_hosts): + region = LegacyRegion( + up_host=mocked_hosts[ServiceName.UP][0], + up_host_backup=mocked_hosts[ServiceName.UP][1], + io_host=mocked_hosts[ServiceName.IO][0], + rs_host=mocked_hosts[ServiceName.RS][0], + rsf_host=mocked_hosts[ServiceName.RSF][0], + api_host=mocked_hosts[ServiceName.API][0] + ) + yield region + + +class TestLegacyRegion: + def test_get_hosts_from_self(self, mocked_hosts, mock_legacy_region, qn_auth, bucket_name): + cases = [ + # up will always query from the old version, + # which version implements the `get_up_host_*` method + ( + mock_legacy_region.get_io_host(qn_auth.get_access_key(), None), + mocked_hosts[ServiceName.IO][0] + ), + ( + mock_legacy_region.get_rs_host(qn_auth.get_access_key(), None), + mocked_hosts[ServiceName.RS][0] + ), + ( + mock_legacy_region.get_rsf_host(qn_auth.get_access_key(), None), + mocked_hosts[ServiceName.RSF][0] + ), + ( + mock_legacy_region.get_api_host(qn_auth.get_access_key(), None), + mocked_hosts[ServiceName.API][0] + ) + ] + for actual, expect in cases: + assert actual == expect + + def test_get_hosts_from_query(self, qn_auth, bucket_name): + up_token = qn_auth.upload_token(bucket_name) + region = LegacyRegion() + up_host = region.get_up_host_by_token(up_token, None) + up_host_backup = region.get_up_host_backup_by_token(up_token, None) + if is_py2: + up_host = up_host.encode() + up_host_backup = up_host_backup.encode() + assert type(up_host) is str and len(up_host) > 0 + assert type(up_host_backup) is str and len(up_host_backup) > 0 + assert up_host != up_host_backup + + def test_compatible_with_http_region(self, mocked_hosts, mock_legacy_region): + assert isinstance(mock_legacy_region, Region) + assert mocked_hosts == { + k: [ + e.get_value() + for e in mock_legacy_region.services[k] + ] + for k in mocked_hosts + } + + def test_get_bucket_hosts(self, access_key, bucket_name): + region = LegacyRegion() + bucket_hosts = region.get_bucket_hosts(access_key, bucket_name) + for k in [ + 'upHosts', + 'ioHosts', + 'rsHosts', + 'rsfHosts', + 'apiHosts' + ]: + assert all(h.startswith('http') for h in bucket_hosts[k]), bucket_hosts[k] + + def test_bucket_hosts(self, access_key, bucket_name): + region = LegacyRegion() + bucket_hosts_str = region.bucket_hosts(access_key, bucket_name) + bucket_hosts = json.loads(bucket_hosts_str) + + region_hosts = bucket_hosts.get('hosts', []) + + assert len(region_hosts) > 0 + + for r in region_hosts: + for k in [ + 'up', + 'io', + 'rs', + 'rsf', + 'api' + ]: + service_hosts = r[k].get('domains') + assert len(service_hosts) > 0 + assert all(len(h) for h in service_hosts) diff --git a/tests/cases/test_zone/test_qiniu_conf.py b/tests/cases/test_zone/test_qiniu_conf.py new file mode 100644 index 00000000..0c05dfaf --- /dev/null +++ b/tests/cases/test_zone/test_qiniu_conf.py @@ -0,0 +1,99 @@ +import pytest + +from qiniu import Zone +from qiniu.config import get_default + +TEST_RS_HOST = 'rs.test.region.compatible.config.qiniu.com' +TEST_RSF_HOST = 'rsf.test.region.compatible.config.qiniu.com' +TEST_API_HOST = 'api.test.region.compatible.config.qiniu.com' + + +class TestQiniuConfWithZone: + """ + Test qiniu.conf with Zone(aka LegacyRegion) + """ + + @pytest.mark.parametrize( + 'set_conf_default', + [ + { + 'default_uc_backup_hosts': [], + }, + { + 'default_uc_backup_hosts': [], + 'default_query_region_backup_hosts': [] + } + ], + indirect=True + ) + def test_disable_backup_hosts(self, set_conf_default): + assert get_default('default_uc_backup_hosts') == [] + assert get_default('default_query_region_backup_hosts') == [] + + @pytest.mark.parametrize( + 'set_conf_default', + [ + { + 'default_rs_host': TEST_RS_HOST, + 'default_rsf_host': TEST_RSF_HOST, + 'default_api_host': TEST_API_HOST + } + ], + indirect=True + ) + def test_config_compatible(self, set_conf_default): + zone = Zone() + assert zone.get_rs_host("mock_ak", "mock_bucket") == TEST_RS_HOST + assert zone.get_rsf_host("mock_ak", "mock_bucket") == TEST_RSF_HOST + assert zone.get_api_host("mock_ak", "mock_bucket") == TEST_API_HOST + + @pytest.mark.parametrize( + 'set_conf_default', + [ + { + 'default_query_region_host': 'https://fake-uc.pysdk.qiniu.com' + } + ], + indirect=True + ) + def test_query_region_with_custom_domain(self, access_key, bucket_name, set_conf_default): + with pytest.raises(Exception) as exc: + zone = Zone() + zone.bucket_hosts(access_key, bucket_name) + assert 'HTTP Status Code -1' in str(exc) + + @pytest.mark.parametrize( + 'set_conf_default', + [ + { + 'default_query_region_host': 'https://fake-uc.pysdk.qiniu.com', + 'default_query_region_backup_hosts': [ + 'unavailable-uc.pysdk.qiniu.com', + 'uc.qbox.me' + ] + } + ], + indirect=True + ) + def test_query_region_with_backup_domains(self, access_key, bucket_name, set_conf_default): + zone = Zone() + data = zone.bucket_hosts(access_key, bucket_name) + assert data != 'null' and len(data) > 0 + + @pytest.mark.parametrize( + 'set_conf_default', + [ + { + 'default_uc_host': 'https://fake-uc.pysdk.qiniu.com', + 'default_query_region_backup_hosts': [ + 'unavailable-uc.phpsdk.qiniu.com', + 'uc.qbox.me' + ] + } + ], + indirect=True + ) + def test_query_region_with_uc_and_backup_domains(self, access_key, bucket_name, set_conf_default): + zone = Zone() + data = zone.bucket_hosts(access_key, bucket_name) + assert data != 'null' diff --git a/tests/mock_server/main.py b/tests/mock_server/main.py new file mode 100644 index 00000000..d85129ba --- /dev/null +++ b/tests/mock_server/main.py @@ -0,0 +1,65 @@ +import argparse +import http.server +import http.client +import logging +import sys +from urllib.parse import urlparse + +from routes import routes + + +class MockHandler(http.server.BaseHTTPRequestHandler): + def do_GET(self): + self.handle_request('GET') + + def do_POST(self): + self.handle_request('POST') + + def do_PUT(self): + self.handle_request('PUT') + + def do_DELETE(self): + self.handle_request('DELETE') + + def do_OPTIONS(self): + self.handle_request('OPTIONS') + + def do_HEAD(self): + self.handle_request('HEAD') + + def handle_request(self, method): + parsed_uri = urlparse(self.path) + handle = routes.get(parsed_uri.path) + if callable(handle): + try: + handle(method=method, parsed_uri=parsed_uri, request_handler=self) + except Exception: + logging.exception('Exception while handling.') + else: + self.send_response(404) + self.send_header('Content-type', 'text/html') + self.end_headers() + self.wfile.write(b'404 Not Found') + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument( + '--port', + type=int, + default=8000, + ) + args = parser.parse_args() + + logging.basicConfig( + level=logging.INFO, + datefmt='%Y-%m-%d %H:%M:%S', + format='[%(asctime)s %(levelname)s] %(message)s', + handlers=[logging.StreamHandler(sys.stdout)], + ) + + server_address = ('', args.port) + httpd = http.server.HTTPServer(server_address, MockHandler) + logging.info('Mock Server running on port {}...'.format(args.port)) + + httpd.serve_forever() diff --git a/tests/mock_server/routes/__init__.py b/tests/mock_server/routes/__init__.py new file mode 100644 index 00000000..7eba32f1 --- /dev/null +++ b/tests/mock_server/routes/__init__.py @@ -0,0 +1,10 @@ +from .timeout import * +from .echo import * +from .retry_me import * + +routes = { + '/timeout': handle_timeout, + '/echo': handle_echo, + '/retry_me': handle_retry_me, + '/retry_me/__mgr__': handle_mgr_retry_me, +} diff --git a/tests/mock_server/routes/echo.py b/tests/mock_server/routes/echo.py new file mode 100644 index 00000000..30174c77 --- /dev/null +++ b/tests/mock_server/routes/echo.py @@ -0,0 +1,34 @@ +import http +import logging +from urllib.parse import parse_qs + + +def handle_echo(method, parsed_uri, request_handler): + """ + Parameters + ---------- + method: str + HTTP method + parsed_uri: urllib.parse.ParseResult + parsed URI + request_handler: http.server.BaseHTTPRequestHandler + request handler + """ + if method not in []: + # all method allowed + pass + echo_status = parse_qs(parsed_uri.query).get('status') + if not echo_status: + echo_status = http.HTTPStatus.BAD_REQUEST + logging.error('No echo status specified') + echo_body = f'param status is required' + else: + echo_status = int(echo_status[0]) + echo_body = f'Response echo status is {echo_status}' + + request_handler.send_response(echo_status) + request_handler.send_header('Content-Type', 'text/plain') + request_handler.send_header('X-Reqid', 'mocked-req-id') + request_handler.end_headers() + + request_handler.wfile.write(echo_body.encode('utf-8')) diff --git a/tests/mock_server/routes/retry_me.py b/tests/mock_server/routes/retry_me.py new file mode 100644 index 00000000..af4458f7 --- /dev/null +++ b/tests/mock_server/routes/retry_me.py @@ -0,0 +1,145 @@ +import http +import random +import string + +from urllib.parse import parse_qs + +__failure_record = {} + + +def should_fail_by_times(success_times=None, failure_times=None): + """ + Parameters + ---------- + success_times: list[int], default=[1] + failure_times: list[int], default=[0] + + Returns + ------- + Generator[bool, None, None] + + Examples + -------- + + should_fail_by_times([2], [3]) + will succeed 2 times and failed 3 times, and loop + + should_fail_by_times([2, 4], [3]) + will succeed 2 times and failed 3 times, + then succeeded 4 times and failed 3 time, and loop + """ + if not success_times: + success_times = [1] + if not failure_times: + failure_times = [0] + + def success_times_gen(): + while True: + for i in success_times: + yield i + + def failure_times_gen(): + while True: + for i in failure_times: + yield i + + success_times_iter = success_times_gen() + fail_times_iter = failure_times_gen() + + while True: + success = next(success_times_iter) + fail = next(fail_times_iter) + for _ in range(success): + yield False + for _ in range(fail): + yield True + + +def handle_mgr_retry_me(method, parsed_uri, request_handler): + """ + Parameters + ---------- + method: str + HTTP method + parsed_uri: urllib.parse.ParseResult + parsed URI + request_handler: http.server.BaseHTTPRequestHandler + request handler + """ + if method not in ['PUT', 'DELETE']: + request_handler.send_response(http.HTTPStatus.METHOD_NOT_ALLOWED) + return + match method: + case 'PUT': + # s for success + success_times = parse_qs(parsed_uri.query).get('s', []) + # f for failure + failure_times = parse_qs(parsed_uri.query).get('f', []) + + record_id = ''.join(random.choices(string.ascii_letters, k=16)) + + __failure_record[record_id] = should_fail_by_times( + success_times=[int(n) for n in success_times], + failure_times=[int(n) for n in failure_times] + ) + + request_handler.send_response(http.HTTPStatus.OK) + request_handler.send_header('Content-Type', 'text/plain') + request_handler.send_header('X-Reqid', record_id) + request_handler.end_headers() + + request_handler.wfile.write(record_id.encode('utf-8')) + case 'DELETE': + record_id = parse_qs(parsed_uri.query).get('id') + if not record_id or not record_id[0]: + request_handler.send_response(http.HTTPStatus.BAD_REQUEST) + return + record_id = record_id[0] + + if record_id in __failure_record: + del __failure_record[record_id] + + request_handler.send_response(http.HTTPStatus.NO_CONTENT) + request_handler.send_header('X-Reqid', record_id) + request_handler.end_headers() + + +def handle_retry_me(method, parsed_uri, request_handler): + """ + Parameters + ---------- + method: str + HTTP method + parsed_uri: urllib.parse.ParseResult + parsed URI + request_handler: http.server.BaseHTTPRequestHandler + request handler + """ + if method not in []: + # all method allowed + pass + record_id = parse_qs(parsed_uri.query).get('id') + if not record_id or not record_id[0]: + request_handler.send_response(http.HTTPStatus.BAD_REQUEST) + return + record_id = record_id[0] + + should_fail = next(__failure_record[record_id]) + + if should_fail: + request_handler.send_response(-1) + request_handler.send_header('Content-Type', 'text/plain') + request_handler.send_header('X-Reqid', record_id) + request_handler.end_headers() + + resp_body = 'service unavailable' + request_handler.wfile.write(resp_body.encode('utf-8')) + return + + request_handler.send_response(http.HTTPStatus.OK) + request_handler.send_header('Content-Type', 'text/plain') + request_handler.send_header('X-Reqid', record_id) + request_handler.end_headers() + + resp_body = 'ok' + request_handler.wfile.write(resp_body.encode('utf-8')) diff --git a/tests/mock_server/routes/timeout.py b/tests/mock_server/routes/timeout.py new file mode 100644 index 00000000..1cdaf70a --- /dev/null +++ b/tests/mock_server/routes/timeout.py @@ -0,0 +1,36 @@ +import http +import logging +import time + +from urllib.parse import parse_qs + + +def handle_timeout(method, parsed_uri, request_handler): + """ + Parameters + ---------- + method: str + HTTP method + parsed_uri: urllib.parse.ParseResult + parsed URI + request_handler: http.server.BaseHTTPRequestHandler + request handler + """ + if method not in []: + # all method allowed + pass + delay = parse_qs(parsed_uri.query).get('delay') + if not delay: + delay = 3 + logging.info('No delay specified. Fallback to %s seconds.', delay) + else: + delay = float(delay[0]) + + time.sleep(delay) + request_handler.send_response(http.HTTPStatus.OK) + request_handler.send_header('Content-Type', 'text/plain') + request_handler.send_header('X-Reqid', 'mocked-req-id') + request_handler.end_headers() + + resp_body = f'Response after {delay} seconds' + request_handler.wfile.write(resp_body.encode('utf-8'))