summaryrefslogtreecommitdiff
path: root/ci/upload_relnotes.py
blob: 1cc8901385403e39a7191e3631e42a3973c3cade (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
#!/usr/bin/env python3
"""
Upload CHANGES.rst to Tidelift as Markdown chunks

Requires pandoc installed.

Put your Tidelift API token in a file called tidelift.token alongside this
program, for example:

    user/n3IwOpxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxc2ZwE4

Run with two arguments: the .rst file to parse, and the Tidelift package name:

	python upload_relnotes.py CHANGES.rst pypi/coverage

"""

import os.path
import re
import subprocess
import sys

import requests

class TextChunkBuffer:
    """Hold onto text chunks until needed."""
    def __init__(self):
        self.buffer = []

    def append(self, text):
        self.buffer.append(text)

    def clear(self):
        self.buffer = []

    def flush(self):
        buffered = "".join(self.buffer).strip()
        if buffered:
            yield ("text", buffered)
        self.clear()


def parse_md(lines):
    """Parse markdown lines, producing (type, text) chunks."""
    buffer = TextChunkBuffer()

    for line in lines:
        header_match = re.search(r"^(#+) (.+)$", line)
        is_header = bool(header_match)
        if is_header:
            yield from buffer.flush()
            hashes, text = header_match.groups()
            yield (f"h{len(hashes)}", text)
        else:
            buffer.append(line)

    yield from buffer.flush()


def sections(parsed_data):
    """Convert a stream of parsed tokens into sections with text and notes.

    Yields a stream of:
        ('h-level', 'header text', 'text')

    """
    header = None
    text = []
    for ttype, ttext in parsed_data:
        if ttype.startswith('h'):
            if header:
                yield (*header, "\n".join(text))
            text = []
            notes = []
            header = (ttype, ttext)
        elif ttype == "text":
            text.append(ttext)
        else:
            raise Exception(f"Don't know ttype {ttype!r}")
    yield (*header, "\n".join(text))


def relnotes(mdlines):
    """Yield (version, text) pairs from markdown lines.

    Each tuple is a separate version mentioned in the release notes.

    A version is an h2 that starts with "Version ".

    """
    for hlevel, htext, text in sections(parse_md(mdlines)):
        if hlevel == 'h2' and htext.startswith('Version '):
            version = htext.split()[1]
            yield version, text

def convert_rst_file_to_markdown(rst_filename):
    markdown = subprocess.check_output(["pandoc", "-frst", "-tmarkdown_strict", "--atx-headers", rst_filename])
    return markdown.decode("utf8")

def update_release_note(package, version, text):
    url = f"https://api.tidelift.com/external-api/lifting/{package}/release-notes/{version}"
    token_file = os.path.join(os.path.dirname(__file__), "tidelift.token")
    with open(token_file) as ftoken:
        token = ftoken.read().strip()
    headers = {
        "Authorization": f"Bearer: {token}",
    }
    req_args = dict(url=url, data=text.encode('utf8'), headers=headers)
    result = requests.post(**req_args)
    if result.status_code == 409:
        result = requests.put(**req_args)
    print(f"{version}: {result.status_code}")

def convert_and_upload(rst_filename, package):
    markdown = convert_rst_file_to_markdown(rst_filename)
    for version, text in relnotes(markdown.splitlines(True)):
        update_release_note(package, version, text)

if __name__ == "__main__":
    convert_and_upload(*sys.argv[1:])