2019-02-05 15:50:56 +00:00
|
|
|
#!/bin/sh
|
|
|
|
|
|
|
|
set -e
|
|
|
|
|
|
|
|
REPO="git-lfs/git-lfs"
|
|
|
|
WORKDIR="$(mktemp -d)"
|
|
|
|
|
|
|
|
trap 'rm -fr "$WORKDIR"' EXIT
|
|
|
|
|
|
|
|
say () {
|
|
|
|
[ -n "$QUIET" ] && return
|
|
|
|
local format="$1"
|
|
|
|
shift
|
|
|
|
printf "$format\n" "$@" >&2
|
|
|
|
}
|
|
|
|
|
|
|
|
abort () {
|
|
|
|
local format="$1"
|
|
|
|
shift
|
|
|
|
printf "$format\n" "$@" >&2
|
|
|
|
exit 2
|
|
|
|
}
|
|
|
|
|
2020-09-30 18:03:12 +00:00
|
|
|
uri_encode () {
|
|
|
|
ruby -e 'print ARGV[0].gsub(/[^A-Za-z0-9_.-]/) { |x| "%%%02x" % x.ord }' "$1"
|
|
|
|
}
|
|
|
|
|
2019-02-05 15:50:56 +00:00
|
|
|
curl () {
|
2019-09-04 21:29:42 +00:00
|
|
|
if [ -n "$GITHUB_TOKEN" ]
|
|
|
|
then
|
|
|
|
command curl -u "token:$GITHUB_TOKEN" -fSs "$@"
|
|
|
|
else
|
|
|
|
command curl -nfSs "$@"
|
|
|
|
fi
|
2019-02-05 15:50:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
categorize_os () {
|
|
|
|
local os="$1"
|
|
|
|
|
|
|
|
if [ "$os" = "freebsd" ]
|
|
|
|
then
|
|
|
|
echo FreeBSD
|
|
|
|
else
|
|
|
|
ruby -e 'puts ARGV[0].capitalize' "$os"
|
|
|
|
fi
|
|
|
|
}
|
|
|
|
|
|
|
|
categorize_arch () {
|
|
|
|
local arch="$1"
|
|
|
|
|
2020-01-14 21:35:27 +00:00
|
|
|
if [ "$arch" = "ppc64le" ]
|
|
|
|
then
|
|
|
|
echo "Little-endian 64-bit PowerPC"
|
|
|
|
else
|
|
|
|
echo "$arch" | tr a-z A-Z
|
|
|
|
fi
|
2019-02-05 15:50:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
# Categorize a release asset and print its human readable name to standard
|
|
|
|
# output.
|
|
|
|
categorize_asset () {
|
|
|
|
local file="$1"
|
|
|
|
local os=$(echo "$file" | sed -e 's/^git-lfs-//' -e 's/[-.].*$//')
|
|
|
|
local arch=$(echo "$file" | ruby -pe '$_.gsub!(/\Agit-lfs-[^-]+-([^-]+)[-.].*/, "\\1")')
|
|
|
|
|
|
|
|
case "$file" in
|
|
|
|
git-lfs-v*.*.*.tar.gz)
|
|
|
|
echo "Source";;
|
|
|
|
git-lfs-windows-v*.*.*.exe)
|
|
|
|
echo "Windows Installer";;
|
2019-09-04 21:39:38 +00:00
|
|
|
sha256sums)
|
|
|
|
echo "Unsigned SHA-256 Hashes";;
|
2019-02-05 15:50:56 +00:00
|
|
|
sha256sums.asc)
|
|
|
|
echo "Signed SHA-256 Hashes";;
|
script/upload: add support for more hashes
Right now, we provide signed SHA-256 hashes for our releases. This is
fine and sufficient, and also cryptographically secure. However, many
distributors use other algorithms, and it would be convenient if we
could provide easy access to those hashes as well. For example, NetBSD
uses SHA-512 and BLAKE2s.
Let's add an additional file, hashes.asc, which contains a general set
of hashes in the BSD format. The advantage of the BSD format over the
traditional GNU format is that it includes the hash algorithm, which
allows us to distinguish between hashes of the same length, such as
SHA-256, SHA-512/256, and SHA3-256. It is generated by shasum, sha*sum,
sha3sum, and b2sum with the --tag format, and all of these programs
accept it for verification with no problems.
Using the BSD format means that we need only provide one additional file
with all the additional algorithms. There is therefore no need to add
multiple new files, and if we desire to add additional algorithms in the
future, that's easily done without modification.
If the user has sha3sum (which comes from Perl's Digest::SHA3) or b2sum
(part of GNU coreutils), then we use them to verify our hashes. There
are no known commands available on a typical Linux system to verify
BLAKE2s, but we assume that if OpenSSL and our Ruby script correctly
generated the SHA-2 entries, then it will also have properly generated
the other hashes as well.
Since we must now run programs inside the repository, we need to know
where that file is located, and therefore we use git to find the root of
the repository and now must run within the repository. Since this
script is only run by Git LFS core team members or the CI system when
doing releases, this is not expected to be an issue.
2022-04-27 19:19:23 +00:00
|
|
|
hashes)
|
|
|
|
echo "Unsigned Hashes";;
|
|
|
|
hashes.asc)
|
|
|
|
echo "Signed Hashes";;
|
2019-02-05 15:50:56 +00:00
|
|
|
*)
|
|
|
|
printf "%s %s\n" "$(categorize_os "$os")" "$(categorize_arch "$arch")";;
|
|
|
|
esac
|
|
|
|
}
|
|
|
|
|
|
|
|
# Provide a content type for the asset based on its file name.
|
|
|
|
content_type () {
|
|
|
|
local file="$1"
|
|
|
|
|
|
|
|
case "$file" in
|
|
|
|
*.zip)
|
|
|
|
echo "application/zip";;
|
|
|
|
*.tar.gz)
|
|
|
|
echo "application/gzip";;
|
|
|
|
*.exe)
|
|
|
|
echo "application/octet-stream";;
|
script/upload: add support for more hashes
Right now, we provide signed SHA-256 hashes for our releases. This is
fine and sufficient, and also cryptographically secure. However, many
distributors use other algorithms, and it would be convenient if we
could provide easy access to those hashes as well. For example, NetBSD
uses SHA-512 and BLAKE2s.
Let's add an additional file, hashes.asc, which contains a general set
of hashes in the BSD format. The advantage of the BSD format over the
traditional GNU format is that it includes the hash algorithm, which
allows us to distinguish between hashes of the same length, such as
SHA-256, SHA-512/256, and SHA3-256. It is generated by shasum, sha*sum,
sha3sum, and b2sum with the --tag format, and all of these programs
accept it for verification with no problems.
Using the BSD format means that we need only provide one additional file
with all the additional algorithms. There is therefore no need to add
multiple new files, and if we desire to add additional algorithms in the
future, that's easily done without modification.
If the user has sha3sum (which comes from Perl's Digest::SHA3) or b2sum
(part of GNU coreutils), then we use them to verify our hashes. There
are no known commands available on a typical Linux system to verify
BLAKE2s, but we assume that if OpenSSL and our Ruby script correctly
generated the SHA-2 entries, then it will also have properly generated
the other hashes as well.
Since we must now run programs inside the repository, we need to know
where that file is located, and therefore we use git to find the root of
the repository and now must run within the repository. Since this
script is only run by Git LFS core team members or the CI system when
doing releases, this is not expected to be an issue.
2022-04-27 19:19:23 +00:00
|
|
|
*.asc|sha256sums*|hashes*)
|
2019-02-05 15:50:56 +00:00
|
|
|
echo "text/plain";;
|
|
|
|
esac
|
|
|
|
}
|
|
|
|
|
|
|
|
# Format the JSON for creating the release and print it to standard output.
|
|
|
|
format_release_json () {
|
|
|
|
local version="$1"
|
|
|
|
local bodyfile="$2"
|
|
|
|
|
|
|
|
ruby -rjson -e 'puts JSON.generate({
|
|
|
|
tag_name: ARGV[0],
|
|
|
|
name: ARGV[0],
|
|
|
|
draft: true,
|
|
|
|
body: File.read(ARGV[1]),
|
|
|
|
})' "$version" "$bodyfile"
|
|
|
|
}
|
|
|
|
|
|
|
|
# Create a draft release and print the upload URL for release assets to the
|
|
|
|
# standard output. If a release with that version already exists, do nothing
|
|
|
|
# instead.
|
|
|
|
create_release () {
|
|
|
|
local version="$1"
|
|
|
|
local bodyfile="$2"
|
|
|
|
|
|
|
|
# Check to see if we already have such a release. If so, don't create it.
|
|
|
|
curl https://api.github.com/repos/$REPO/releases | \
|
|
|
|
jq -r '.[].name' | grep -qsF "$version" && {
|
|
|
|
say "Found an existing release for this version."
|
|
|
|
curl https://api.github.com/repos/$REPO/releases | \
|
|
|
|
jq -r '.[] | select(.name == "'"$version"'") | .upload_url' | \
|
|
|
|
sed -e 's/{.*}//g'
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
# This can be large, so pass it in a file.
|
|
|
|
format_release_json "$version" "$bodyfile" >> "$WORKDIR/release-json"
|
|
|
|
|
|
|
|
curl -H'Content-Type: application/json' -d"@$WORKDIR/release-json" \
|
|
|
|
https://api.github.com/repos/$REPO/releases | \
|
|
|
|
jq -r '.upload_url' |
|
|
|
|
sed -e 's/{.*}//g'
|
|
|
|
}
|
|
|
|
|
2019-09-06 14:20:50 +00:00
|
|
|
# Update the draft release with a new body and print the upload URL for release assets to the
|
|
|
|
# standard output. A release with the given version must already exist.
|
|
|
|
patch_release () {
|
|
|
|
local version="$1"
|
|
|
|
local bodyfile="$2"
|
|
|
|
|
|
|
|
# Find the URL of this release.
|
|
|
|
local url=$(curl https://api.github.com/repos/$REPO/releases | \
|
|
|
|
jq -r '.[] | select(.name == "'"$version"'") | .url')
|
|
|
|
|
|
|
|
[ -n "$url" ] || abort "No existing release found for version $version."
|
|
|
|
say "Found the existing release for this version."
|
|
|
|
|
|
|
|
# This can be large, so pass it in a file.
|
|
|
|
format_release_json "$version" "$bodyfile" >> "$WORKDIR/release-json"
|
|
|
|
|
|
|
|
curl -XPATCH -H'Content-Type: application/json' -d"@$WORKDIR/release-json" \
|
|
|
|
$url | \
|
|
|
|
jq -r '.upload_url' |
|
|
|
|
sed -e 's/{.*}//g'
|
|
|
|
}
|
|
|
|
|
2019-02-05 15:50:56 +00:00
|
|
|
# Find the release files for the given version.
|
|
|
|
release_files () {
|
|
|
|
local version="$1"
|
2019-09-06 14:20:50 +00:00
|
|
|
local assets="${2:-bin/releases}"
|
2019-02-05 15:50:56 +00:00
|
|
|
|
|
|
|
[ -n "$version" ] || return 1
|
|
|
|
|
2021-03-11 21:11:33 +00:00
|
|
|
find "$assets" -name '*.tar.gz' -o \
|
|
|
|
-name '*386*.zip' -o \
|
|
|
|
-name '*amd64*.zip' -o \
|
|
|
|
-name '*arm64*.zip' -o \
|
|
|
|
-name '*.exe' -o \
|
script/upload: add support for more hashes
Right now, we provide signed SHA-256 hashes for our releases. This is
fine and sufficient, and also cryptographically secure. However, many
distributors use other algorithms, and it would be convenient if we
could provide easy access to those hashes as well. For example, NetBSD
uses SHA-512 and BLAKE2s.
Let's add an additional file, hashes.asc, which contains a general set
of hashes in the BSD format. The advantage of the BSD format over the
traditional GNU format is that it includes the hash algorithm, which
allows us to distinguish between hashes of the same length, such as
SHA-256, SHA-512/256, and SHA3-256. It is generated by shasum, sha*sum,
sha3sum, and b2sum with the --tag format, and all of these programs
accept it for verification with no problems.
Using the BSD format means that we need only provide one additional file
with all the additional algorithms. There is therefore no need to add
multiple new files, and if we desire to add additional algorithms in the
future, that's easily done without modification.
If the user has sha3sum (which comes from Perl's Digest::SHA3) or b2sum
(part of GNU coreutils), then we use them to verify our hashes. There
are no known commands available on a typical Linux system to verify
BLAKE2s, but we assume that if OpenSSL and our Ruby script correctly
generated the SHA-2 entries, then it will also have properly generated
the other hashes as well.
Since we must now run programs inside the repository, we need to know
where that file is located, and therefore we use git to find the root of
the repository and now must run within the repository. Since this
script is only run by Git LFS core team members or the CI system when
doing releases, this is not expected to be an issue.
2022-04-27 19:19:23 +00:00
|
|
|
-name 'sha256sums.asc' -o \
|
|
|
|
-name 'hashes.asc' | \
|
|
|
|
grep -E "$version|sha256sums.asc|hashes.asc" | \
|
2019-02-05 15:50:56 +00:00
|
|
|
grep -v "assets" | \
|
|
|
|
LC_ALL=C sort
|
|
|
|
}
|
|
|
|
|
|
|
|
# Format the body message and print the file which contains it to the standard
|
|
|
|
# output.
|
|
|
|
finalize_body_message () {
|
|
|
|
local version="$1"
|
|
|
|
local changelog="$2"
|
2019-09-06 14:20:50 +00:00
|
|
|
local assets="$3"
|
2019-02-05 15:50:56 +00:00
|
|
|
|
|
|
|
version=$(echo "$version" | sed -e 's/^v//')
|
|
|
|
|
|
|
|
# If you change the list of distributions here, change docker/run_dockers.bsh
|
|
|
|
# as well.
|
|
|
|
cat "$changelog" > "$WORKDIR/body-template"
|
|
|
|
cat <<EOM >> "$WORKDIR/body-template"
|
|
|
|
## Packages
|
|
|
|
|
|
|
|
Up to date packages are available on [PackageCloud](https://packagecloud.io/github/git-lfs) and [Homebrew](http://brew.sh/).
|
|
|
|
|
|
|
|
[RPM RHEL 7/CentOS 7](https://packagecloud.io/github/git-lfs/packages/el/7/git-lfs-VERSION-1.el7.x86_64.rpm/download)
|
2022-10-17 15:25:08 +00:00
|
|
|
[RPM RHEL 8/Rocky Linux 8](https://packagecloud.io/github/git-lfs/packages/el/8/git-lfs-VERSION-1.el8.x86_64.rpm/download)
|
2022-10-18 15:32:05 +00:00
|
|
|
[RPM RHEL 9/Rocky Linux 9](https://packagecloud.io/github/git-lfs/packages/el/9/git-lfs-VERSION-1.el9.x86_64.rpm/download)
|
2019-02-05 15:50:56 +00:00
|
|
|
[Debian 9](https://packagecloud.io/github/git-lfs/packages/debian/stretch/git-lfs_VERSION_amd64.deb/download)
|
2019-06-27 15:52:13 +00:00
|
|
|
[Debian 10](https://packagecloud.io/github/git-lfs/packages/debian/buster/git-lfs_VERSION_amd64.deb/download)
|
2021-08-23 18:45:07 +00:00
|
|
|
[Debian 11](https://packagecloud.io/github/git-lfs/packages/debian/bullseye/git-lfs_VERSION_amd64.deb/download)
|
2019-02-05 15:50:56 +00:00
|
|
|
|
|
|
|
## SHA-256 hashes:
|
|
|
|
EOM
|
|
|
|
|
2019-09-06 14:20:50 +00:00
|
|
|
shasum -a256 $(release_files "$version" "$assets") | \
|
2019-02-05 15:50:56 +00:00
|
|
|
ruby -pe '$_.chomp!' \
|
|
|
|
-e '$_.gsub!(/^([0-9a-f]+)\s+.*\/([^\/]+)$/, "**\\2**\n\\1\n\n")' | \
|
|
|
|
ruby -0777 -pe '$_.gsub!(/\n+\z/, "\n")' >> "$WORKDIR/body-template"
|
|
|
|
|
|
|
|
sed -e "s/VERSION/$version/g" < "$WORKDIR/body-template" > "$WORKDIR/body"
|
|
|
|
echo "$WORKDIR/body"
|
|
|
|
}
|
|
|
|
|
|
|
|
# Filter a list of files from standard input, removing entries found in the file
|
|
|
|
# provided.
|
|
|
|
filter_files () {
|
|
|
|
local filter="$1"
|
|
|
|
|
|
|
|
# If the filter file is empty (that is, no assets have been uploaded), grep
|
|
|
|
# will produce no output, and therefore nothing will be uploaded. That's not
|
|
|
|
# what we want, so handle this case specially.
|
|
|
|
if [ -s "$filter" ]
|
|
|
|
then
|
|
|
|
grep -vF -f "$filter"
|
|
|
|
else
|
|
|
|
cat
|
|
|
|
fi
|
|
|
|
}
|
|
|
|
|
|
|
|
# Upload assets from the release directory to GitHub. Only assets that are not
|
|
|
|
# already existing should be uploaded.
|
|
|
|
upload_assets () {
|
|
|
|
local version="$1"
|
|
|
|
local upload_url="$2"
|
script/upload: don't require empty bin/releases for finalize
When the upload script is used in finalize mode, which is a manual part
of the release process, it currently requires the bin/releases directory
to be empty. This is because it uses the upload_assets function, which
uploads everything in that directory, and we obviously don't want to
upload whatever the developer may have in that directory as part of our
release, only the signed hashes.
Since the person doing the release is usually the person who has built
the pre-release assets for the pull request, this means that we
invariably bother this person, which, while not the end of the world, is
a minor annoyance. Let's make this experience better for the core team
member doing the release and lift this restriction.
Make the upload_assets function take a location of assets to upload, and
when we're uploading assets in non-finalize mode (which is now
effectively restricted to our release workflow) use the bin/releases
directory, but use a temporary directory for the finalize step. Remove
the check an empty bin/releases directory, since we no longer need it.
2021-10-28 17:30:50 +00:00
|
|
|
local src="$3"
|
2019-02-05 15:50:56 +00:00
|
|
|
local file desc base ct encdesc encbase
|
|
|
|
|
|
|
|
curl https://api.github.com/repos/$REPO/releases | \
|
|
|
|
jq -r '.[] | select(.name == "'"$version"'") | .assets | .[] | .name' \
|
|
|
|
> "$WORKDIR/existing-assets"
|
|
|
|
|
script/upload: don't require empty bin/releases for finalize
When the upload script is used in finalize mode, which is a manual part
of the release process, it currently requires the bin/releases directory
to be empty. This is because it uses the upload_assets function, which
uploads everything in that directory, and we obviously don't want to
upload whatever the developer may have in that directory as part of our
release, only the signed hashes.
Since the person doing the release is usually the person who has built
the pre-release assets for the pull request, this means that we
invariably bother this person, which, while not the end of the world, is
a minor annoyance. Let's make this experience better for the core team
member doing the release and lift this restriction.
Make the upload_assets function take a location of assets to upload, and
when we're uploading assets in non-finalize mode (which is now
effectively restricted to our release workflow) use the bin/releases
directory, but use a temporary directory for the finalize step. Remove
the check an empty bin/releases directory, since we no longer need it.
2021-10-28 17:30:50 +00:00
|
|
|
for file in $(release_files "$version" "$src" | filter_files "$WORKDIR/existing-assets")
|
2019-02-05 15:50:56 +00:00
|
|
|
do
|
|
|
|
base=$(basename "$file")
|
|
|
|
desc=$(categorize_asset "$base")
|
|
|
|
ct=$(content_type "$base")
|
2020-09-30 18:03:12 +00:00
|
|
|
encbase=$(uri_encode "$base")
|
|
|
|
encdesc=$(uri_encode "$desc")
|
2019-02-05 15:50:56 +00:00
|
|
|
|
|
|
|
say "\tUploading %s as \"%s\" (Content-Type %s)..." "$base" "$desc" "$ct"
|
2019-02-21 19:58:37 +00:00
|
|
|
curl --data-binary "@$file" -H'Accept: application/vnd.github.v3+json' \
|
2019-02-05 15:50:56 +00:00
|
|
|
-H"Content-Type: $ct" "$upload_url?name=$encbase&label=$encdesc" \
|
script/upload: verify assets after uploading
We'd like to ensure we have a complete release when uploading files, in
case a network error occurs or something else unfortunate happens during
the upload process. Moreover, since we publish hashes and signatures, it
makes sense to verify that our assets are correct according to those
signatures, since users will verify them (we hope).
To do so, ensure all the files are uploaded, then download all the files
again using the API URL and verify that the signature is valid on
sha256sums.asc and that the hashes match. If this succeeds, we can, with
high probability, be confident that our release is intact, containing
all the files we expect.
2019-02-21 20:18:56 +00:00
|
|
|
>"$WORKDIR/response"
|
|
|
|
download=$(jq -r '.url' "$WORKDIR/response")
|
2019-02-05 15:50:56 +00:00
|
|
|
done
|
script/upload: verify assets after uploading
We'd like to ensure we have a complete release when uploading files, in
case a network error occurs or something else unfortunate happens during
the upload process. Moreover, since we publish hashes and signatures, it
makes sense to verify that our assets are correct according to those
signatures, since users will verify them (we hope).
To do so, ensure all the files are uploaded, then download all the files
again using the API URL and verify that the signature is valid on
sha256sums.asc and that the hashes match. If this succeeds, we can, with
high probability, be confident that our release is intact, containing
all the files we expect.
2019-02-21 20:18:56 +00:00
|
|
|
|
2019-09-04 21:59:56 +00:00
|
|
|
say "Assets uploaded."
|
|
|
|
}
|
|
|
|
|
|
|
|
# Download assets from GitHub to the specified directory.
|
|
|
|
download_assets () {
|
|
|
|
local version="$1"
|
|
|
|
local dir="$2"
|
|
|
|
|
script/upload: verify assets after uploading
We'd like to ensure we have a complete release when uploading files, in
case a network error occurs or something else unfortunate happens during
the upload process. Moreover, since we publish hashes and signatures, it
makes sense to verify that our assets are correct according to those
signatures, since users will verify them (we hope).
To do so, ensure all the files are uploaded, then download all the files
again using the API URL and verify that the signature is valid on
sha256sums.asc and that the hashes match. If this succeeds, we can, with
high probability, be confident that our release is intact, containing
all the files we expect.
2019-02-21 20:18:56 +00:00
|
|
|
curl https://api.github.com/repos/$REPO/releases | \
|
|
|
|
jq -rc '.[] | select(.name == "'"$version"'") | .assets | .[] | [.name,.url]' | \
|
|
|
|
ruby -rjson -ne 'puts JSON.parse($_).join(" ")' \
|
|
|
|
> "$WORKDIR/assets"
|
|
|
|
|
|
|
|
cat "$WORKDIR/assets" | (while read base url
|
|
|
|
do
|
2019-09-06 14:20:50 +00:00
|
|
|
say "\tDownloading %s..." "$base"
|
script/upload: verify assets after uploading
We'd like to ensure we have a complete release when uploading files, in
case a network error occurs or something else unfortunate happens during
the upload process. Moreover, since we publish hashes and signatures, it
makes sense to verify that our assets are correct according to those
signatures, since users will verify them (we hope).
To do so, ensure all the files are uploaded, then download all the files
again using the API URL and verify that the signature is valid on
sha256sums.asc and that the hashes match. If this succeeds, we can, with
high probability, be confident that our release is intact, containing
all the files we expect.
2019-02-21 20:18:56 +00:00
|
|
|
(
|
2019-09-04 21:59:56 +00:00
|
|
|
cd "$dir" &&
|
script/upload: verify assets after uploading
We'd like to ensure we have a complete release when uploading files, in
case a network error occurs or something else unfortunate happens during
the upload process. Moreover, since we publish hashes and signatures, it
makes sense to verify that our assets are correct according to those
signatures, since users will verify them (we hope).
To do so, ensure all the files are uploaded, then download all the files
again using the API URL and verify that the signature is valid on
sha256sums.asc and that the hashes match. If this succeeds, we can, with
high probability, be confident that our release is intact, containing
all the files we expect.
2019-02-21 20:18:56 +00:00
|
|
|
curl -Lo "$base" -H"Accept: application/octet-stream" "$url"
|
|
|
|
)
|
|
|
|
done)
|
2019-09-04 21:59:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
# Download the assets and verify the signature made on them.
|
|
|
|
verify_assets () {
|
|
|
|
local version="$1"
|
|
|
|
local dir="$WORKDIR/verify"
|
|
|
|
mkdir "$dir"
|
|
|
|
download_assets "$version" "$dir"
|
script/upload: verify assets after uploading
We'd like to ensure we have a complete release when uploading files, in
case a network error occurs or something else unfortunate happens during
the upload process. Moreover, since we publish hashes and signatures, it
makes sense to verify that our assets are correct according to those
signatures, since users will verify them (we hope).
To do so, ensure all the files are uploaded, then download all the files
again using the API URL and verify that the signature is valid on
sha256sums.asc and that the hashes match. If this succeeds, we can, with
high probability, be confident that our release is intact, containing
all the files we expect.
2019-02-21 20:18:56 +00:00
|
|
|
|
|
|
|
# If the OpenPGP data is not valid, gpg -d will output nothing to stdout, and
|
|
|
|
# shasum will then fail.
|
2022-06-01 15:33:42 +00:00
|
|
|
say "Checking assets for integrity with SHA-256..."
|
2019-09-04 21:59:56 +00:00
|
|
|
(cd "$dir" && gpg -d sha256sums.asc | shasum -a 256 -c)
|
2022-06-01 15:33:42 +00:00
|
|
|
say "Checking assets for integrity with SHA-2..."
|
script/upload: add support for more hashes
Right now, we provide signed SHA-256 hashes for our releases. This is
fine and sufficient, and also cryptographically secure. However, many
distributors use other algorithms, and it would be convenient if we
could provide easy access to those hashes as well. For example, NetBSD
uses SHA-512 and BLAKE2s.
Let's add an additional file, hashes.asc, which contains a general set
of hashes in the BSD format. The advantage of the BSD format over the
traditional GNU format is that it includes the hash algorithm, which
allows us to distinguish between hashes of the same length, such as
SHA-256, SHA-512/256, and SHA3-256. It is generated by shasum, sha*sum,
sha3sum, and b2sum with the --tag format, and all of these programs
accept it for verification with no problems.
Using the BSD format means that we need only provide one additional file
with all the additional algorithms. There is therefore no need to add
multiple new files, and if we desire to add additional algorithms in the
future, that's easily done without modification.
If the user has sha3sum (which comes from Perl's Digest::SHA3) or b2sum
(part of GNU coreutils), then we use them to verify our hashes. There
are no known commands available on a typical Linux system to verify
BLAKE2s, but we assume that if OpenSSL and our Ruby script correctly
generated the SHA-2 entries, then it will also have properly generated
the other hashes as well.
Since we must now run programs inside the repository, we need to know
where that file is located, and therefore we use git to find the root of
the repository and now must run within the repository. Since this
script is only run by Git LFS core team members or the CI system when
doing releases, this is not expected to be an issue.
2022-04-27 19:19:23 +00:00
|
|
|
(cd "$dir" && gpg -d hashes.asc | grep 'SHA[0-9][^-]' | shasum -c)
|
|
|
|
if command -v sha3sum >/dev/null 2>&1
|
|
|
|
then
|
2022-06-01 15:33:42 +00:00
|
|
|
say "Checking assets for integrity with SHA-3..."
|
script/upload: add support for more hashes
Right now, we provide signed SHA-256 hashes for our releases. This is
fine and sufficient, and also cryptographically secure. However, many
distributors use other algorithms, and it would be convenient if we
could provide easy access to those hashes as well. For example, NetBSD
uses SHA-512 and BLAKE2s.
Let's add an additional file, hashes.asc, which contains a general set
of hashes in the BSD format. The advantage of the BSD format over the
traditional GNU format is that it includes the hash algorithm, which
allows us to distinguish between hashes of the same length, such as
SHA-256, SHA-512/256, and SHA3-256. It is generated by shasum, sha*sum,
sha3sum, and b2sum with the --tag format, and all of these programs
accept it for verification with no problems.
Using the BSD format means that we need only provide one additional file
with all the additional algorithms. There is therefore no need to add
multiple new files, and if we desire to add additional algorithms in the
future, that's easily done without modification.
If the user has sha3sum (which comes from Perl's Digest::SHA3) or b2sum
(part of GNU coreutils), then we use them to verify our hashes. There
are no known commands available on a typical Linux system to verify
BLAKE2s, but we assume that if OpenSSL and our Ruby script correctly
generated the SHA-2 entries, then it will also have properly generated
the other hashes as well.
Since we must now run programs inside the repository, we need to know
where that file is located, and therefore we use git to find the root of
the repository and now must run within the repository. Since this
script is only run by Git LFS core team members or the CI system when
doing releases, this is not expected to be an issue.
2022-04-27 19:19:23 +00:00
|
|
|
(cd "$dir" && gpg -d hashes.asc | grep 'SHA3-' | sha3sum -c)
|
|
|
|
fi
|
|
|
|
if command -v b2sum >/dev/null 2>&1
|
|
|
|
then
|
2022-06-01 15:33:42 +00:00
|
|
|
say "Checking assets for integrity with BLAKE2b..."
|
script/upload: add support for more hashes
Right now, we provide signed SHA-256 hashes for our releases. This is
fine and sufficient, and also cryptographically secure. However, many
distributors use other algorithms, and it would be convenient if we
could provide easy access to those hashes as well. For example, NetBSD
uses SHA-512 and BLAKE2s.
Let's add an additional file, hashes.asc, which contains a general set
of hashes in the BSD format. The advantage of the BSD format over the
traditional GNU format is that it includes the hash algorithm, which
allows us to distinguish between hashes of the same length, such as
SHA-256, SHA-512/256, and SHA3-256. It is generated by shasum, sha*sum,
sha3sum, and b2sum with the --tag format, and all of these programs
accept it for verification with no problems.
Using the BSD format means that we need only provide one additional file
with all the additional algorithms. There is therefore no need to add
multiple new files, and if we desire to add additional algorithms in the
future, that's easily done without modification.
If the user has sha3sum (which comes from Perl's Digest::SHA3) or b2sum
(part of GNU coreutils), then we use them to verify our hashes. There
are no known commands available on a typical Linux system to verify
BLAKE2s, but we assume that if OpenSSL and our Ruby script correctly
generated the SHA-2 entries, then it will also have properly generated
the other hashes as well.
Since we must now run programs inside the repository, we need to know
where that file is located, and therefore we use git to find the root of
the repository and now must run within the repository. Since this
script is only run by Git LFS core team members or the CI system when
doing releases, this is not expected to be an issue.
2022-04-27 19:19:23 +00:00
|
|
|
# b2sum on Linux does not handle BLAKE2s, only BLAKE2b.
|
|
|
|
(cd "$dir" && gpg -d hashes.asc | grep 'BLAKE2b' | b2sum -c)
|
|
|
|
fi
|
script/upload: verify assets after uploading
We'd like to ensure we have a complete release when uploading files, in
case a network error occurs or something else unfortunate happens during
the upload process. Moreover, since we publish hashes and signatures, it
makes sense to verify that our assets are correct according to those
signatures, since users will verify them (we hope).
To do so, ensure all the files are uploaded, then download all the files
again using the API URL and verify that the signature is valid on
sha256sums.asc and that the hashes match. If this succeeds, we can, with
high probability, be confident that our release is intact, containing
all the files we expect.
2019-02-21 20:18:56 +00:00
|
|
|
|
|
|
|
say "\nAssets look good!"
|
2019-02-05 15:50:56 +00:00
|
|
|
}
|
|
|
|
|
2019-09-05 14:00:21 +00:00
|
|
|
# Extract the changelog for the given version from the history and save it in a
|
|
|
|
# file. Print the filename of the changelog to standard output.
|
|
|
|
extract_changelog () {
|
|
|
|
local version="$1"
|
|
|
|
|
|
|
|
git cat-file blob "$version:CHANGELOG.md" | \
|
|
|
|
ruby -ne "version=%Q($version)[1..-1]; state ||= :silent; text ||= [];" \
|
|
|
|
-e 'if state == :print && $_.start_with?("## "); puts text.join.strip; exit; end;' \
|
|
|
|
-e 'text << $_ if state == :print;' \
|
|
|
|
-e 'state = :print if $_.start_with?("## #{version}")' \
|
|
|
|
> "$WORKDIR/changelog"
|
|
|
|
echo "$WORKDIR/changelog"
|
|
|
|
}
|
|
|
|
|
2019-09-06 14:20:50 +00:00
|
|
|
# Perform the final steps to verify a release
|
|
|
|
finalize () {
|
|
|
|
local version="$1"
|
|
|
|
local inspect="$2"
|
|
|
|
local downloads="$WORKDIR/finalize"
|
script/upload: don't require empty bin/releases for finalize
When the upload script is used in finalize mode, which is a manual part
of the release process, it currently requires the bin/releases directory
to be empty. This is because it uses the upload_assets function, which
uploads everything in that directory, and we obviously don't want to
upload whatever the developer may have in that directory as part of our
release, only the signed hashes.
Since the person doing the release is usually the person who has built
the pre-release assets for the pull request, this means that we
invariably bother this person, which, while not the end of the world, is
a minor annoyance. Let's make this experience better for the core team
member doing the release and lift this restriction.
Make the upload_assets function take a location of assets to upload, and
when we're uploading assets in non-finalize mode (which is now
effectively restricted to our release workflow) use the bin/releases
directory, but use a temporary directory for the finalize step. Remove
the check an empty bin/releases directory, since we no longer need it.
2021-10-28 17:30:50 +00:00
|
|
|
local uploads="$WORKDIR/finalize-uploads"
|
2019-09-06 14:20:50 +00:00
|
|
|
|
|
|
|
say "Finalizing the release process..."
|
|
|
|
say "Downloading assets..."
|
|
|
|
|
|
|
|
mkdir "$downloads"
|
script/upload: don't require empty bin/releases for finalize
When the upload script is used in finalize mode, which is a manual part
of the release process, it currently requires the bin/releases directory
to be empty. This is because it uses the upload_assets function, which
uploads everything in that directory, and we obviously don't want to
upload whatever the developer may have in that directory as part of our
release, only the signed hashes.
Since the person doing the release is usually the person who has built
the pre-release assets for the pull request, this means that we
invariably bother this person, which, while not the end of the world, is
a minor annoyance. Let's make this experience better for the core team
member doing the release and lift this restriction.
Make the upload_assets function take a location of assets to upload, and
when we're uploading assets in non-finalize mode (which is now
effectively restricted to our release workflow) use the bin/releases
directory, but use a temporary directory for the finalize step. Remove
the check an empty bin/releases directory, since we no longer need it.
2021-10-28 17:30:50 +00:00
|
|
|
mkdir "$uploads"
|
2019-09-06 14:20:50 +00:00
|
|
|
download_assets "$version" "$downloads"
|
|
|
|
|
|
|
|
if [ -n "$inspect" ]
|
|
|
|
then
|
|
|
|
say "Dropping you to a shell to inspect the assets."
|
|
|
|
say "Type 'exit 0' to continue, or 'exit 1' to abort."
|
|
|
|
|
|
|
|
(cd "$downloads" && $SHELL)
|
|
|
|
fi
|
|
|
|
|
|
|
|
say "Signing asset manifest..."
|
|
|
|
(
|
script/upload: add support for more hashes
Right now, we provide signed SHA-256 hashes for our releases. This is
fine and sufficient, and also cryptographically secure. However, many
distributors use other algorithms, and it would be convenient if we
could provide easy access to those hashes as well. For example, NetBSD
uses SHA-512 and BLAKE2s.
Let's add an additional file, hashes.asc, which contains a general set
of hashes in the BSD format. The advantage of the BSD format over the
traditional GNU format is that it includes the hash algorithm, which
allows us to distinguish between hashes of the same length, such as
SHA-256, SHA-512/256, and SHA3-256. It is generated by shasum, sha*sum,
sha3sum, and b2sum with the --tag format, and all of these programs
accept it for verification with no problems.
Using the BSD format means that we need only provide one additional file
with all the additional algorithms. There is therefore no need to add
multiple new files, and if we desire to add additional algorithms in the
future, that's easily done without modification.
If the user has sha3sum (which comes from Perl's Digest::SHA3) or b2sum
(part of GNU coreutils), then we use them to verify our hashes. There
are no known commands available on a typical Linux system to verify
BLAKE2s, but we assume that if OpenSSL and our Ruby script correctly
generated the SHA-2 entries, then it will also have properly generated
the other hashes as well.
Since we must now run programs inside the repository, we need to know
where that file is located, and therefore we use git to find the root of
the repository and now must run within the repository. Since this
script is only run by Git LFS core team members or the CI system when
doing releases, this is not expected to be an issue.
2022-04-27 19:19:23 +00:00
|
|
|
root="$(git rev-parse --show-toplevel)" &&
|
2019-09-06 14:20:50 +00:00
|
|
|
cd "$downloads" && \
|
script/upload: add support for more hashes
Right now, we provide signed SHA-256 hashes for our releases. This is
fine and sufficient, and also cryptographically secure. However, many
distributors use other algorithms, and it would be convenient if we
could provide easy access to those hashes as well. For example, NetBSD
uses SHA-512 and BLAKE2s.
Let's add an additional file, hashes.asc, which contains a general set
of hashes in the BSD format. The advantage of the BSD format over the
traditional GNU format is that it includes the hash algorithm, which
allows us to distinguish between hashes of the same length, such as
SHA-256, SHA-512/256, and SHA3-256. It is generated by shasum, sha*sum,
sha3sum, and b2sum with the --tag format, and all of these programs
accept it for verification with no problems.
Using the BSD format means that we need only provide one additional file
with all the additional algorithms. There is therefore no need to add
multiple new files, and if we desire to add additional algorithms in the
future, that's easily done without modification.
If the user has sha3sum (which comes from Perl's Digest::SHA3) or b2sum
(part of GNU coreutils), then we use them to verify our hashes. There
are no known commands available on a typical Linux system to verify
BLAKE2s, but we assume that if OpenSSL and our Ruby script correctly
generated the SHA-2 entries, then it will also have properly generated
the other hashes as well.
Since we must now run programs inside the repository, we need to know
where that file is located, and therefore we use git to find the root of
the repository and now must run within the repository. Since this
script is only run by Git LFS core team members or the CI system when
doing releases, this is not expected to be an issue.
2022-04-27 19:19:23 +00:00
|
|
|
shasum -a256 -b * | grep -vE '(assets|sha256sums|hashes)' | \
|
|
|
|
gpg --digest-algo SHA256 --clearsign >sha256sums.asc &&
|
|
|
|
"$root/script/hash-files" * | grep -vE '(assets|sha256sums|hashes)' | \
|
|
|
|
gpg --digest-algo SHA512 --clearsign >hashes.asc
|
2019-09-06 14:20:50 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
say "Formatting the final body of the GitHub release now..."
|
|
|
|
|
|
|
|
local changelog=$(extract_changelog "$version")
|
|
|
|
local bodyfile=$(finalize_body_message "$version" "$changelog" "$downloads")
|
|
|
|
|
|
|
|
say "Uploading final release body..."
|
|
|
|
|
|
|
|
local upload_url=$(patch_release "$version" "$bodyfile")
|
|
|
|
|
|
|
|
say "Uploading final versions of assets..."
|
script/upload: add support for more hashes
Right now, we provide signed SHA-256 hashes for our releases. This is
fine and sufficient, and also cryptographically secure. However, many
distributors use other algorithms, and it would be convenient if we
could provide easy access to those hashes as well. For example, NetBSD
uses SHA-512 and BLAKE2s.
Let's add an additional file, hashes.asc, which contains a general set
of hashes in the BSD format. The advantage of the BSD format over the
traditional GNU format is that it includes the hash algorithm, which
allows us to distinguish between hashes of the same length, such as
SHA-256, SHA-512/256, and SHA3-256. It is generated by shasum, sha*sum,
sha3sum, and b2sum with the --tag format, and all of these programs
accept it for verification with no problems.
Using the BSD format means that we need only provide one additional file
with all the additional algorithms. There is therefore no need to add
multiple new files, and if we desire to add additional algorithms in the
future, that's easily done without modification.
If the user has sha3sum (which comes from Perl's Digest::SHA3) or b2sum
(part of GNU coreutils), then we use them to verify our hashes. There
are no known commands available on a typical Linux system to verify
BLAKE2s, but we assume that if OpenSSL and our Ruby script correctly
generated the SHA-2 entries, then it will also have properly generated
the other hashes as well.
Since we must now run programs inside the repository, we need to know
where that file is located, and therefore we use git to find the root of
the repository and now must run within the repository. Since this
script is only run by Git LFS core team members or the CI system when
doing releases, this is not expected to be an issue.
2022-04-27 19:19:23 +00:00
|
|
|
cp "$downloads/sha256sums.asc" "$downloads/hashes.asc" "$uploads"
|
script/upload: don't require empty bin/releases for finalize
When the upload script is used in finalize mode, which is a manual part
of the release process, it currently requires the bin/releases directory
to be empty. This is because it uses the upload_assets function, which
uploads everything in that directory, and we obviously don't want to
upload whatever the developer may have in that directory as part of our
release, only the signed hashes.
Since the person doing the release is usually the person who has built
the pre-release assets for the pull request, this means that we
invariably bother this person, which, while not the end of the world, is
a minor annoyance. Let's make this experience better for the core team
member doing the release and lift this restriction.
Make the upload_assets function take a location of assets to upload, and
when we're uploading assets in non-finalize mode (which is now
effectively restricted to our release workflow) use the bin/releases
directory, but use a temporary directory for the finalize step. Remove
the check an empty bin/releases directory, since we no longer need it.
2021-10-28 17:30:50 +00:00
|
|
|
upload_assets "$version" "$upload_url" "$uploads"
|
2019-09-06 14:20:50 +00:00
|
|
|
|
|
|
|
# Verification occurs in caller below.
|
|
|
|
}
|
|
|
|
|
2019-02-05 15:50:56 +00:00
|
|
|
# Provide a helpful usage message and exit.
|
|
|
|
usage () {
|
|
|
|
local status="$1"
|
|
|
|
cat <<EOM
|
2019-09-05 14:00:21 +00:00
|
|
|
Usage: $0 VERSION
|
2019-02-05 15:50:56 +00:00
|
|
|
|
|
|
|
Create a draft GitHub release for Git LFS using the tag specified by VERSION and
|
|
|
|
the changelog specified in the file CHANGELOG. Before running this script, the
|
|
|
|
release assets should be built and ready for upload, including the signed
|
script/upload: add support for more hashes
Right now, we provide signed SHA-256 hashes for our releases. This is
fine and sufficient, and also cryptographically secure. However, many
distributors use other algorithms, and it would be convenient if we
could provide easy access to those hashes as well. For example, NetBSD
uses SHA-512 and BLAKE2s.
Let's add an additional file, hashes.asc, which contains a general set
of hashes in the BSD format. The advantage of the BSD format over the
traditional GNU format is that it includes the hash algorithm, which
allows us to distinguish between hashes of the same length, such as
SHA-256, SHA-512/256, and SHA3-256. It is generated by shasum, sha*sum,
sha3sum, and b2sum with the --tag format, and all of these programs
accept it for verification with no problems.
Using the BSD format means that we need only provide one additional file
with all the additional algorithms. There is therefore no need to add
multiple new files, and if we desire to add additional algorithms in the
future, that's easily done without modification.
If the user has sha3sum (which comes from Perl's Digest::SHA3) or b2sum
(part of GNU coreutils), then we use them to verify our hashes. There
are no known commands available on a typical Linux system to verify
BLAKE2s, but we assume that if OpenSSL and our Ruby script correctly
generated the SHA-2 entries, then it will also have properly generated
the other hashes as well.
Since we must now run programs inside the repository, we need to know
where that file is located, and therefore we use git to find the root of
the repository and now must run within the repository. Since this
script is only run by Git LFS core team members or the CI system when
doing releases, this is not expected to be an issue.
2022-04-27 19:19:23 +00:00
|
|
|
sha256sums.asc and hashes.asc files.
|
|
|
|
|
|
|
|
This script requires ruby, gpg, curl, shasum, and jq. sha3sum and b2sum will be
|
|
|
|
used if available, but are optional.
|
2019-02-05 15:50:56 +00:00
|
|
|
|
script/upload: add support for more hashes
Right now, we provide signed SHA-256 hashes for our releases. This is
fine and sufficient, and also cryptographically secure. However, many
distributors use other algorithms, and it would be convenient if we
could provide easy access to those hashes as well. For example, NetBSD
uses SHA-512 and BLAKE2s.
Let's add an additional file, hashes.asc, which contains a general set
of hashes in the BSD format. The advantage of the BSD format over the
traditional GNU format is that it includes the hash algorithm, which
allows us to distinguish between hashes of the same length, such as
SHA-256, SHA-512/256, and SHA3-256. It is generated by shasum, sha*sum,
sha3sum, and b2sum with the --tag format, and all of these programs
accept it for verification with no problems.
Using the BSD format means that we need only provide one additional file
with all the additional algorithms. There is therefore no need to add
multiple new files, and if we desire to add additional algorithms in the
future, that's easily done without modification.
If the user has sha3sum (which comes from Perl's Digest::SHA3) or b2sum
(part of GNU coreutils), then we use them to verify our hashes. There
are no known commands available on a typical Linux system to verify
BLAKE2s, but we assume that if OpenSSL and our Ruby script correctly
generated the SHA-2 entries, then it will also have properly generated
the other hashes as well.
Since we must now run programs inside the repository, we need to know
where that file is located, and therefore we use git to find the root of
the repository and now must run within the repository. Since this
script is only run by Git LFS core team members or the CI system when
doing releases, this is not expected to be an issue.
2022-04-27 19:19:23 +00:00
|
|
|
This command must be run from within the repository.
|
2019-02-05 15:50:56 +00:00
|
|
|
EOM
|
|
|
|
exit $status
|
|
|
|
}
|
|
|
|
|
2019-09-05 14:22:03 +00:00
|
|
|
# Check that this script has the prerequisites to continue.
|
|
|
|
sanity_check () {
|
|
|
|
local version="$1"
|
2019-09-06 14:20:50 +00:00
|
|
|
local finalize="$2"
|
2019-09-05 14:22:03 +00:00
|
|
|
|
|
|
|
say "Checking that you've got some release artifacts..."
|
script/upload: don't require empty bin/releases for finalize
When the upload script is used in finalize mode, which is a manual part
of the release process, it currently requires the bin/releases directory
to be empty. This is because it uses the upload_assets function, which
uploads everything in that directory, and we obviously don't want to
upload whatever the developer may have in that directory as part of our
release, only the signed hashes.
Since the person doing the release is usually the person who has built
the pre-release assets for the pull request, this means that we
invariably bother this person, which, while not the end of the world, is
a minor annoyance. Let's make this experience better for the core team
member doing the release and lift this restriction.
Make the upload_assets function take a location of assets to upload, and
when we're uploading assets in non-finalize mode (which is now
effectively restricted to our release workflow) use the bin/releases
directory, but use a temporary directory for the finalize step. Remove
the check an empty bin/releases directory, since we no longer need it.
2021-10-28 17:30:50 +00:00
|
|
|
if [ -z "$(release_files "$version")" ]
|
2019-09-06 14:20:50 +00:00
|
|
|
then
|
|
|
|
[ -n "$finalize" ] || abort "I couldn't find any release files for $version."
|
|
|
|
fi
|
2019-09-05 14:22:03 +00:00
|
|
|
|
|
|
|
if [ -n "$GITHUB_TOKEN" ]
|
|
|
|
then
|
|
|
|
say "Found a token in the GITHUB_TOKEN environment variable."
|
|
|
|
else
|
|
|
|
say "Looking for the necessary entries in .netrc..."
|
|
|
|
grep -qsF api.github.com "$HOME/.netrc" || \
|
|
|
|
abort "I couldn't find api.github.com in your .netrc."
|
|
|
|
grep -qsF uploads.github.com "$HOME/.netrc" || \
|
|
|
|
abort "I couldn't find uploads.github.com in your .netrc."
|
|
|
|
fi
|
|
|
|
|
|
|
|
say "Okay, everything looks good."
|
|
|
|
}
|
|
|
|
|
2019-02-05 15:50:56 +00:00
|
|
|
# The main program.
|
|
|
|
main () {
|
2019-09-06 14:20:50 +00:00
|
|
|
local inspect=""
|
|
|
|
|
2019-09-04 21:43:15 +00:00
|
|
|
while [ -n "$1" ]
|
|
|
|
do
|
|
|
|
case "$1" in
|
|
|
|
--help)
|
|
|
|
usage 0;;
|
2019-09-06 14:20:50 +00:00
|
|
|
--inspect)
|
|
|
|
inspect=1
|
|
|
|
shift;;
|
2019-09-04 21:43:15 +00:00
|
|
|
--skip-verify)
|
|
|
|
SKIP_VERIFY=1
|
|
|
|
shift;;
|
2019-09-06 14:20:50 +00:00
|
|
|
--finalize)
|
|
|
|
FINALIZE=1
|
|
|
|
shift;;
|
2019-09-04 21:43:15 +00:00
|
|
|
--)
|
|
|
|
shift
|
|
|
|
break;;
|
|
|
|
*)
|
|
|
|
break;;
|
|
|
|
esac
|
|
|
|
done
|
|
|
|
|
2019-02-05 15:50:56 +00:00
|
|
|
local version="$1"
|
|
|
|
|
2019-09-05 14:00:21 +00:00
|
|
|
[ -z "$version" ] && usage 1 >&2
|
2019-02-05 15:50:56 +00:00
|
|
|
|
2019-09-06 14:20:50 +00:00
|
|
|
sanity_check "$version" "$FINALIZE"
|
2019-02-05 15:50:56 +00:00
|
|
|
|
2019-09-06 14:20:50 +00:00
|
|
|
if [ -n "$FINALIZE" ]
|
|
|
|
then
|
|
|
|
finalize "$version" "$inspect"
|
|
|
|
else
|
|
|
|
say "Formatting the body of the GitHub release now..."
|
2019-02-05 15:50:56 +00:00
|
|
|
|
2019-09-06 14:20:50 +00:00
|
|
|
local changelog=$(extract_changelog "$version")
|
|
|
|
local bodyfile=$(finalize_body_message "$version" "$changelog")
|
2019-02-05 15:50:56 +00:00
|
|
|
|
2019-09-06 14:20:50 +00:00
|
|
|
say "Creating a GitHub release for %s..." "$version"
|
2019-02-05 15:50:56 +00:00
|
|
|
|
2019-09-06 14:20:50 +00:00
|
|
|
local upload_url=$(create_release "$version" "$bodyfile")
|
2019-02-05 15:50:56 +00:00
|
|
|
|
2019-09-06 14:20:50 +00:00
|
|
|
say "Uploading assets to GitHub..."
|
script/upload: don't require empty bin/releases for finalize
When the upload script is used in finalize mode, which is a manual part
of the release process, it currently requires the bin/releases directory
to be empty. This is because it uses the upload_assets function, which
uploads everything in that directory, and we obviously don't want to
upload whatever the developer may have in that directory as part of our
release, only the signed hashes.
Since the person doing the release is usually the person who has built
the pre-release assets for the pull request, this means that we
invariably bother this person, which, while not the end of the world, is
a minor annoyance. Let's make this experience better for the core team
member doing the release and lift this restriction.
Make the upload_assets function take a location of assets to upload, and
when we're uploading assets in non-finalize mode (which is now
effectively restricted to our release workflow) use the bin/releases
directory, but use a temporary directory for the finalize step. Remove
the check an empty bin/releases directory, since we no longer need it.
2021-10-28 17:30:50 +00:00
|
|
|
upload_assets "$version" "$upload_url" bin/releases
|
2019-09-06 14:20:50 +00:00
|
|
|
fi
|
2019-02-05 15:50:56 +00:00
|
|
|
|
2019-09-04 21:43:15 +00:00
|
|
|
if [ -z "$SKIP_VERIFY" ]
|
|
|
|
then
|
|
|
|
say "Verifying assets..."
|
2019-09-04 21:59:56 +00:00
|
|
|
verify_assets "$version"
|
2019-09-04 21:43:15 +00:00
|
|
|
fi
|
|
|
|
|
2019-02-05 15:50:56 +00:00
|
|
|
say "Okay, done. Sanity-check the release and publish it."
|
|
|
|
}
|
|
|
|
|
|
|
|
main "$@"
|