-
Notifications
You must be signed in to change notification settings - Fork 4
Expand file tree
/
Copy pathpyproject.toml
More file actions
109 lines (97 loc) · 2.52 KB
/
pyproject.toml
File metadata and controls
109 lines (97 loc) · 2.52 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
[project]
# PyPi name, import name is "torchstream"
name = "torchstream-lib"
version = "1.0.2"
authors = [
{ name = "Corentin Jemine", email = "corentin.jemine@gmail.com" }
]
maintainers = [
{ name = "Corentin Jemine", email = "corentin.jemine@gmail.com" }
]
description = "A library for making PyTorch models streamable"
keywords = ["torch", "pytorch", "streaming", "deep learning", "machine learning", "neural networks", "numpy", "optimization"]
classifiers = [
"Programming Language :: Python :: 3",
"Operating System :: OS Independent",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Software Development :: Libraries",
"Framework :: OpenTelemetry :: Instrumentations",
]
readme = "README.md"
license = "MIT"
license-files = ["LICENSE"]
requires-python = ">=3.8"
dependencies = [
"numpy>=1.23.0",
"opentelemetry-api>=1.33.1",
"scipy>=1.10.1",
"torch>=1.11.0",
"z3-solver>=4.14.0.0",
]
[project.optional-dependencies]
cpu = [
"torch>=1.11.0",
]
cuda = [
"torch>=1.11.0",
]
[project.urls]
Repository = "https://github.com/CorentinJ/TorchStream"
[tool.setuptools.packages.find]
include = ["torchstream*"]
# Defining a build system lets us install the project as a dev package
[build-system]
requires = ["setuptools"]
build-backend = "setuptools.build_meta"
[dependency-groups]
demos = [
"colorama>=0.4.6",
"huggingface_hub>=0.23.4,<=0.36",
"kokoro>=0.9.2",
"librosa>=0.11.0",
"matplotlib>=3.7.5",
"opentelemetry-sdk>=1.33.1",
"pip>=25.0.1",
"requests>=2.32.4",
"resampy>=0.4.3",
"soundfile",
"streamlit>=1.40.1",
"torch>=1.11.0",
"torchaudio>=2.5.1",
]
dev = [
"opentelemetry-sdk>=1.33.1",
"pytest>=8.3.4",
"pytest-profiling>=1.8.1",
]
[tool.uv]
conflicts = [
[
{ extra = "cpu" },
{ extra = "cuda" },
],
]
[tool.uv.sources]
torch = [
{ index = "pytorch-cpu", extra = "cpu" },
{ index = "pytorch-cu118", extra = "cuda" },
]
# It's easier to setup torchaudio on CPU, regardless of whether CUDA is used for torch.
# We don't need CUDA-enabled torchaudio for the demos.
torchaudio = [
{ index = "pytorch-cpu", group = "demos" },
]
[[tool.uv.index]]
name = "pytorch-cu118"
url = "https://download.pytorch.org/whl/cu118"
explicit = true
[[tool.uv.index]]
name = "pytorch-cpu"
url = "https://download.pytorch.org/whl/cpu"
explicit = true
[tool.uv.dependency-groups]
demos = { requires-python = ">=3.10" }
[tool.ruff]
line-length = 120