diff --git a/python/pyproject.toml b/python/pyproject.toml index 680e451..5f43454 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -10,20 +10,26 @@ dependencies = [ "datasets", "accelerate", "pyyaml", - "nvidia-cusparselt-cu12>=0.8.1", - "nvidia-nvshmem-cu12>=3.6.5", + "nvidia-nvshmem-cu13>=3.4.5", + "nvidia-cuda-cccl>=13.2.27", + "flash-attn", + "unsloth", ] -[project.optional-dependencies] -flash = ["flash-attn"] -decoder = ["unsloth"] - [project.scripts] sec-cybert = "main:main" +[[tool.uv.index]] +name = "pytorch-cu130" +url = "https://download.pytorch.org/whl/cu130" +explicit = true + [[tool.uv.index]] url = "https://pypi.org/simple/" default = true [tool.uv.sources] +torch = [ + { index = "pytorch-cu130", marker = "sys_platform == 'linux' or sys_platform == 'win32'" }, +] flash-attn = { url = "https://github.com/mjun0812/flash-attention-prebuild-wheels/releases/download/v0.9.4/flash_attn-2.6.3%2Bcu130torch2.11-cp313-cp313-linux_x86_64.whl" }