diff --git a/.github/workflows/arm-build.yml b/.github/workflows/arm-build.yml deleted file mode 100644 index 7bd48d754..000000000 --- a/.github/workflows/arm-build.yml +++ /dev/null @@ -1,101 +0,0 @@ -name: ARM builds - -on: - push: - branches: - - arm-build - schedule: - - cron: '0 4 * * 2' # Tuesday at 4:00 UTC - workflow_dispatch: - -jobs: - Build: - if: github.repository == 'CESNET/UltraGrid' || github.event.schedule == null - runs-on: ubuntu-22.04 # TODO: see the commit message why 22.04 is explicit - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - strategy: - matrix: - arch: [armhf, arm64] - include: - - arch: armhf - repo: http://mirrordirector.raspbian.org/raspbian/ - keyring: /etc/apt/trusted.gpg - qemu_bin_arch: arm - deb_release: bullseye - - arch: arm64 - repo: https://deb.debian.org/debian - keyring: /usr/share/keyrings/debian-archive-keyring.gpg - qemu_bin_arch: aarch64 - deb_release: bullseye - - steps: - - uses: actions/checkout@v4 - - name: Init environment variables - run: . .github/scripts/environment.sh - - name: Prepare - run: | - sudo apt update - sudo apt install binfmt-support curl qemu-user-static - - - name: Fetch SDKs ETags - id: etags - run: | - $GITHUB_WORKSPACE/.github/scripts/get-etag.sh ndi\ - https://downloads.ndi.tv/SDK/NDI_SDK_Linux/\ - Install_NDI_SDK_v6_Linux.tar.gz >> $GITHUB_OUTPUT - - name: Run actions/cache for NDI - id: cache-ndi - uses: actions/cache@main - with: - path: /var/tmp/Install_NDI_SDK_Linux.tar.gz - key: cache-ndi-${{ runner.os }}-${{ steps.etags.outputs.ndi }} - - name: Download NDI - if: steps.cache-ndi.outputs.cache-hit != 'true' - run: "curl -Lf https://downloads.ndi.tv/SDK/NDI_SDK_Linux/\ - Install_NDI_SDK_v6_Linux.tar.gz -o /var/tmp/Install_NDI_SDK_Linux.tar.gz" - - - name: Run actions/cache for ARM Chroot - id: cache-bootstrap - uses: actions/cache@main - with: - path: '~/chroot.tar' - key: cache-bootstrap-${{ env.ImageOS }}-${{ matrix.arch }}-${{ - hashFiles('.github/scripts/Linux/arm/bootstrap.sh', - '.github/scripts/Linux/install_others.sh') }} - - name: Debootstrap - if: steps.cache-bootstrap.outputs.cache-hit != 'true' - run: | - sudo apt install debootstrap debian-archive-keyring - sudo rm -rf ~/chroot - sudo mkdir -p ~/chroot/var/tmp - sudo mv /var/tmp/*NDI* ~/chroot/var/tmp - wget http://archive.raspbian.org/raspbian.public.key -O - | sudo apt-key add -q - sudo debootstrap --keyring=${{ matrix.keyring }} --arch ${{ matrix.arch }} ${{ matrix.deb_release }} ~/chroot ${{ matrix.repo }} - sudo cp -r $GITHUB_WORKSPACE/.github ~/chroot/ - sudo cp /usr/bin/qemu-${{ matrix.qemu_bin_arch }}-static ~/chroot/usr/bin/ - sudo -EH chroot ~/chroot /.github/scripts/Linux/arm/bootstrap.sh ${{ matrix.arch }} - cd ~ && sudo tar cf chroot.tar chroot # Create archive for caching. Needs to be created as root, therefore created manually. - - name: Extract Cached Chroot - if: steps.cache-bootstrap.outputs.cache-hit == 'true' - run: | - cd ~ - sudo tar xf chroot.tar - - name: Build - run: | - sudo cp -ar $GITHUB_WORKSPACE ~/chroot/UltraGrid - sudo -EH chroot ~/chroot /bin/sh -ec "cd UltraGrid; .github/scripts/Linux/arm/build.sh" - - name: Upload Release Asset - id: upload-release-asset - if: github.repository == 'CESNET/UltraGrid' - run: | - sudo apt install jq - sudo .github/scripts/replace-asset.sh GITHUB_REPOSITORY=$GITHUB_REPOSITORY GITHUB_TOKEN=${{ secrets.GITHUB_TOKEN }} continuous ~/chroot/UltraGrid/UltraGrid-latest-${{ matrix.arch }}.AppImage application/x-appimage Linux%20${{ matrix.arch }}%20build - - name: Upload Build - if: steps.upload-release-asset.conclusion == 'skipped' - uses: actions/upload-artifact@main - with: - name: UltraGrid CI ${{ matrix.arch }} build - path: '~/chroot/UltraGrid/UltraGrid-latest-${{ matrix.arch }}.AppImage' - -# vi: set expandtab sw=2: diff --git a/.github/workflows/auto-rebase-and-build.yml b/.github/workflows/auto-rebase-and-build.yml new file mode 100644 index 000000000..f53b01bad --- /dev/null +++ b/.github/workflows/auto-rebase-and-build.yml @@ -0,0 +1,54 @@ +name: Auto Rebase and Build +on: + schedule: + - cron: '0 6 * * *' # 6 AM daily + workflow_dispatch: + +jobs: + rebase-and-build: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + fetch-depth: 0 + token: ${{ secrets.GITHUB_TOKEN }} + + - name: Rebase on upstream + id: rebase + run: | + git config user.name "Auto Rebase Bot" + git config user.email "noreply@github.com" + git remote add upstream https://github.com/CESNET/UltraGrid.git + git fetch upstream + + BEHIND=$(git rev-list --count HEAD..upstream/master) + if [ "$BEHIND" -eq 0 ]; then + echo "rebase_needed=false" >> $GITHUB_OUTPUT + exit 0 + fi + + # Create backup and rebase + git checkout -b backup-$(date +%Y%m%d) + git push origin backup-$(date +%Y%m%d) + git checkout master + + if git rebase upstream/master; then + git push --force-with-lease origin master + echo "rebase_needed=true" >> $GITHUB_OUTPUT + echo "rebase_success=true" >> $GITHUB_OUTPUT + else + git rebase --abort + echo "rebase_needed=true" >> $GITHUB_OUTPUT + echo "rebase_success=false" >> $GITHUB_OUTPUT + exit 1 + fi + + - name: Trigger AppImage build + if: steps.rebase.outputs.rebase_success == 'true' + run: | + curl -X POST \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "${{ github.api_url }}/repos/${{ github.repository }}/actions/workflows/build--patched-appimage.yml/dispatches" \ + -d '{"ref":"master"}' diff --git a/.github/workflows/build-patched-appimage.yml b/.github/workflows/build-patched-appimage.yml new file mode 100644 index 000000000..005c48967 --- /dev/null +++ b/.github/workflows/build-patched-appimage.yml @@ -0,0 +1,133 @@ +name: C/C++ CI + +# This is cut down from the UG ccpp.yml file, as we don't need to run builds for anything other than Linux. + +on: + pull_request: + push: + branches: + - master + tags: + - v[0-9]+.* + paths: + - '.github/scripts/**' + - '.github/workflows/ccpp.yml' + - '**.c' + - '**.cpp' + - '**.cu' + - '**.h' + - '**.hpp' + - '**.m' + - '**.mm' + - 'autogen.sh' + - 'configure.ac' + - 'data/**' + - 'Makefile.in' + workflow_dispatch: + +jobs: + prepare: + runs-on: ubuntu-latest + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + steps: + - uses: actions/checkout@v4 + id: checkout + with: + persist-credentials: true + - name: Init environment variables + run: . .github/scripts/environment.sh + - name: Retag continuous + if: github.repository == 'armelvil/UltraGrid' && github.ref == 'refs/heads/master' + run: | + git fetch --prune --unshallow --tags + git tag -f $TAG + git push -f origin refs/tags/$TAG:refs/tags/$TAG + - name: Create continuous release # ensure continuous release is present for AppImage zsync + if: startsWith(github.ref, 'refs/tags/') + run: .github/scripts/create_continuous_release.sh + + Ubuntu: + name: run Ubuntu + needs: prepare + runs-on: ubuntu-22.04 + env: + appimage_key: ${{ secrets.appimage_key }} + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + SDK_URL: ${{ secrets.SDK_URL }} + + steps: + - uses: actions/checkout@v4 + - name: Fetch SDKs ETags + id: etags + run: | + $GITHUB_WORKSPACE/.github/scripts/get-etag.sh ndi\ + https://downloads.ndi.tv/SDK/NDI_SDK_Linux/\ + Install_NDI_SDK_v6_Linux.tar.gz >> $GITHUB_OUTPUT + - name: Run actions/cache for NDI + id: cache-ndi + uses: actions/cache@main + with: + path: /var/tmp/Install_NDI_SDK_Linux.tar.gz + key: cache-ndi-${{ runner.os }}-${{ steps.etags.outputs.ndi }} + - name: Download NDI + if: steps.cache-ndi.outputs.cache-hit != 'true' + run: "curl -Lf https://downloads.ndi.tv/SDK/NDI_SDK_Linux/\ + Install_NDI_SDK_v6_Linux.tar.gz -o /var/tmp/Install_NDI_SDK_Linux.tar.gz" + - name: Cache FFmpeg + uses: actions/cache@main + with: + path: '/var/tmp/ffmpeg' + key: cache-ffmpeg-${{ runner.os }}-${{ hashFiles( '.github/scripts/Linux/install_ffmpeg.sh', '.github/scripts/Linux/install_other.sh', '.github/scripts/Linux/ffmpeg-patches/*') }} + - name: Cache SDL + uses: actions/cache@main + with: + path: '/var/tmp/sdl' + key: cache-sdl-${{ runner.os }}-${{ hashFiles( '.github/scripts/Linux/install_sdl.sh' ) }} + - name: Cache GLFW + uses: actions/cache@main + with: + path: '/var/tmp/glfw' + key: cache-glfw-${{ runner.os }}-${{ hashFiles( '.github/scripts/Linux/install_glfw.sh' ) }} + - name: bootstrap + run: | + . .github/scripts/environment.sh + .github/scripts/Linux/prepare.sh + - name: configure + run: "./autogen.sh $FEATURES || { RC=$?; cat config.log; exit $RC; }" + - name: make + run: make -j4 + - name: make check + run: make check + - name: make distcheck + run: make distcheck + - name: check libc/libstdc++ ABI + run: .github/scripts/Linux/check_abi.sh 2.35 3.4.30 1.3.13 bin/* lib/ultragrid/* + - name: Create AppImage + run: data/scripts/Linux-AppImage/create-appimage.sh https://github.com/$GITHUB_REPOSITORY/releases/download/continuous/UltraGrid-$CHANNEL-x86_64.AppImage.zsync + - name: Check AppImage + run: | + .github/scripts/Linux/docker_appimage_tests.sh + curl -LSf -O https://raw.githubusercontent.com/AppImage/pkg2appimage/\ + master/appdir-lint.sh -O https://raw.githubusercontent.com/probonopd/\ + AppImages/master/excludelist + sudo apt install desktop-file-utils libfile-mimeinfo-perl # desktop-file-validate, mimetype + bash appdir-lint.sh squashfs-root + - name: Compute checksum + run: sha256sum UltraGrid-$VERSION-x86_64.AppImage + - name: Upload Release Asset + id: upload-release + if: (github.repository == 'armelvil/UltraGrid' && github.ref == 'refs/heads/master') || startsWith(github.ref, 'refs/tags/') + run: | + sudo apt install jq zsync + zsyncmake -C -u https://github.com/$GITHUB_REPOSITORY/releases/download/$TAG/UltraGrid-$VERSION-x86_64.AppImage -o UltraGrid-$CHANNEL-x86_64.AppImage.zsync UltraGrid-$VERSION-x86_64.AppImage + .github/scripts/replace-asset.sh continuous UltraGrid-$CHANNEL-x86_64.AppImage.zsync application/x-zsync AppImage%20${CHANNEL}%20zsync + .github/scripts/replace-asset.sh $TAG UltraGrid-$VERSION-x86_64.AppImage application/x-appimage Linux%20build + - name: Upload Build + if: steps.upload-release.conclusion == 'skipped' + uses: actions/upload-artifact@main + with: + name: UltraGrid-Linux + path: UltraGrid-${{ env.VERSION }}-x86_64.AppImage + +# vi: set expandtab sw=2: \ No newline at end of file diff --git a/.github/workflows/ccpp.yml b/.github/workflows/ccpp.yml deleted file mode 100644 index 7c4a198e6..000000000 --- a/.github/workflows/ccpp.yml +++ /dev/null @@ -1,373 +0,0 @@ -name: C/C++ CI - -on: - pull_request: - push: - branches: - - master - tags: - - v[0-9]+.* - paths: - - '.github/scripts/**' - - '.github/workflows/ccpp.yml' - - '**.c' - - '**.cpp' - - '**.cu' - - '**.h' - - '**.hpp' - - '**.m' - - '**.mm' - - 'autogen.sh' - - 'configure.ac' - - 'data/**' - - 'Makefile.in' - workflow_dispatch: - -jobs: - prepare: - runs-on: ubuntu-latest - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # This token is provided by Actions, we do not need to create own token. - steps: - - uses: actions/checkout@v4 - id: checkout - with: - persist-credentials: true - - name: Init environment variables - run: . .github/scripts/environment.sh - - name: Retag continuous - if: github.repository == 'CESNET/UltraGrid' && github.ref == 'refs/heads/master' - run: | - git fetch --prune --unshallow --tags - git tag -f $TAG - git push -f origin refs/tags/$TAG:refs/tags/$TAG - - name: Update Release - if: (github.repository == 'CESNET/UltraGrid' && github.ref == 'refs/heads/master') || startsWith(github.ref, 'refs/tags/') - run: .github/scripts/create_release.sh - - name: Create continuous release # ensure continuous release is present for AppImage zsync - if: startsWith(github.ref, 'refs/tags/') - run: .github/scripts/create_continuous_release.sh - - Ubuntu: - name: run Ubuntu - needs: prepare - runs-on: ubuntu-22.04 - env: - appimage_key: ${{ secrets.appimage_key }} - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - SDK_URL: ${{ secrets.SDK_URL }} - - steps: - - uses: actions/checkout@v4 - - name: Fetch SDKs ETags - id: etags - run: | - $GITHUB_WORKSPACE/.github/scripts/get-etag.sh ndi\ - https://downloads.ndi.tv/SDK/NDI_SDK_Linux/\ - Install_NDI_SDK_v6_Linux.tar.gz >> $GITHUB_OUTPUT - - name: Run actions/cache for NDI - id: cache-ndi - uses: actions/cache@main - with: - path: /var/tmp/Install_NDI_SDK_Linux.tar.gz - key: cache-ndi-${{ runner.os }}-${{ steps.etags.outputs.ndi }} - - name: Download NDI - if: steps.cache-ndi.outputs.cache-hit != 'true' - run: "curl -Lf https://downloads.ndi.tv/SDK/NDI_SDK_Linux/\ - Install_NDI_SDK_v6_Linux.tar.gz -o /var/tmp/Install_NDI_SDK_Linux.tar.gz" - - name: Cache FFmpeg - uses: actions/cache@main - with: - path: '/var/tmp/ffmpeg' - key: cache-ffmpeg-${{ runner.os }}-${{ hashFiles( '.github/scripts/Linux/install_ffmpeg.sh', '.github/scripts/Linux/install_other.sh', '.github/scripts/Linux/ffmpeg-patches/*') }} - - name: Cache SDL - uses: actions/cache@main - with: - path: '/var/tmp/sdl' - key: cache-sdl-${{ runner.os }}-${{ hashFiles( '.github/scripts/Linux/install_sdl.sh' ) }} - - name: Cache GLFW - uses: actions/cache@main - with: - path: '/var/tmp/glfw' - key: cache-glfw-${{ runner.os }}-${{ hashFiles( '.github/scripts/Linux/install_glfw.sh' ) }} - - name: bootstrap - run: | - . .github/scripts/environment.sh - .github/scripts/Linux/prepare.sh - - name: configure - run: "./autogen.sh $FEATURES || { RC=$?; cat config.log; exit $RC; }" - - name: make - run: make -j4 - - name: make check - run: make check - - name: make distcheck - run: make distcheck - - name: check libc/libstdc++ ABI - run: .github/scripts/Linux/check_abi.sh 2.35 3.4.30 1.3.13 bin/* lib/ultragrid/* - - name: Create AppImage - run: data/scripts/Linux-AppImage/create-appimage.sh https://github.com/$GITHUB_REPOSITORY/releases/download/continuous/UltraGrid-$CHANNEL-x86_64.AppImage.zsync - - name: Check AppImage - run: | - .github/scripts/Linux/docker_appimage_tests.sh - curl -LSf -O https://raw.githubusercontent.com/AppImage/pkg2appimage/\ - master/appdir-lint.sh -O https://raw.githubusercontent.com/probonopd/\ - AppImages/master/excludelist - sudo apt install desktop-file-utils libfile-mimeinfo-perl # desktop-file-validate, mimetype - bash appdir-lint.sh squashfs-root - - name: Compute checksum - run: sha256sum UltraGrid-$VERSION-x86_64.AppImage - - name: Upload Release Asset - id: upload-release - if: (github.repository == 'CESNET/UltraGrid' && github.ref == 'refs/heads/master') || startsWith(github.ref, 'refs/tags/') - run: | - sudo apt install jq zsync - zsyncmake -C -u https://github.com/$GITHUB_REPOSITORY/releases/download/$TAG/UltraGrid-$VERSION-x86_64.AppImage -o UltraGrid-$CHANNEL-x86_64.AppImage.zsync UltraGrid-$VERSION-x86_64.AppImage - .github/scripts/replace-asset.sh continuous UltraGrid-$CHANNEL-x86_64.AppImage.zsync application/x-zsync AppImage%20${CHANNEL}%20zsync - .github/scripts/replace-asset.sh $TAG UltraGrid-$VERSION-x86_64.AppImage application/x-appimage Linux%20build - - name: Upload Build - if: steps.upload-release.conclusion == 'skipped' - uses: actions/upload-artifact@main - with: - name: UltraGrid-Linux - path: UltraGrid-${{ env.VERSION }}-x86_64.AppImage - - macOS: - name: run macOS - needs: prepare - strategy: - matrix: - os: [macos-15-intel, macos-14] - fail-fast: false - runs-on: ${{ matrix.os }} - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - SDK_NONFREE_PATH: /private/var/tmp/sdks-nonfree - notarytool_credentials: ${{ secrets.notarytool_credentials }} - apple_key_p12_b64: ${{ secrets.apple_key_p12_b64 }} - SDK_URL: ${{ secrets.SDK_URL }} - - steps: - - uses: actions/checkout@v4 - - name: Set environment - run: . .github/scripts/environment.sh - - name: Fetch SDKs ETags - id: etags - run: | - $GITHUB_WORKSPACE/.github/scripts/get-etag.sh nonfree\ - "$SDK_URL/$DELTA_MAC_ARCHIVE" optional >> $GITHUB_OUTPUT - $GITHUB_WORKSPACE/.github/scripts/get-etag.sh ndi\ - https://downloads.ndi.tv/SDK/NDI_SDK_Mac/Install_NDI_SDK_v6_Apple.pkg\ - >> $GITHUB_OUTPUT - $GITHUB_WORKSPACE/.github/scripts/get-etag.sh ximea\ - "$XIMEA_DOWNLOAD_URL" >> $GITHUB_OUTPUT - - name: Run actions/cache for Non-Free SDKs - id: cache-macos-nonfree-sdks - uses: actions/cache@main - with: - path: ${{ env.SDK_NONFREE_PATH }} - key: cache-nonfree-sdks-${{ runner.os }}-${{ steps.etags.outputs.nonfree }} - - name: Download Non-Free SDKs - if: steps.cache-macos-nonfree-sdks.outputs.cache-hit != 'true' && env.SDK_URL != null - run: | - rm -rf ${{ env.SDK_NONFREE_PATH }} - mkdir -p ${{ env.SDK_NONFREE_PATH }} - cd ${{ env.SDK_NONFREE_PATH }} - curl -S -f "$SDK_URL/$DELTA_MAC_ARCHIVE" -o "$DELTA_MAC_ARCHIVE" - - name: Run actions/cache for XIMEA - id: cache-macos-ximea - uses: actions/cache@main - with: - path: /var/tmp/XIMEA_OSX_SP.dmg - key: cache-ximea-${{ matrix.os }}-${{ steps.etags.outputs.ximea }} - - name: Download XIMEA - if: steps.cache-macos-ximea.outputs.cache-hit != 'true' - run: curl -LSf "$XIMEA_DOWNLOAD_URL" -o /private/var/tmp/XIMEA_OSX_SP.dmg - - name: Run actions/cache for NDI - id: cache-ndi - uses: actions/cache@main - with: - path: /private/var/tmp/Install_NDI_SDK_Apple.pkg - key: cache-ndi-${{ runner.os }}-${{ steps.etags.outputs.ndi }} - - name: Download NDI - if: steps.cache-ndi.outputs.cache-hit != 'true' - run: "curl -Lf https://downloads.ndi.tv/SDK/NDI_SDK_Mac/Install_\ - NDI_SDK_v6_Apple.pkg -o /private/var/tmp/Install_NDI_SDK_Apple.pkg" - - name: bootstrap - run: .github/scripts/macOS/prepare.sh - - name: configure - run: "ARCH=$UG_ARCH ./autogen.sh $FEATURES || { RC=$?; cat config.log; exit $RC; }" - - name: make bundle - run: make -j4 gui-bundle - - name: make check - run: make check - - name: make distcheck - run: | - local_lib_list="/opt/homebrew /usr/local/lib /usr/local/opt \ - $(xcrun --show-sdk-path)/System/Library/Frameworks" - for n in $local_lib_list; do if [ -d "$n" ] - then sudo mv -- "$n" "$n-"; fi; done # hide local libs - make distcheck TARGET=uv-qt.app/Contents/MacOS/uv REFLECTOR_TARGET=uv-qt.app/Contents/MacOS/hd-rum-transcode GUI_EXE=uv-qt.app/Contents/MacOS/uv-qt - for n in $local_lib_list; do if [ -d "$n-" ] - then sudo mv -- "$n-" "$n"; fi; done # return back - - name: sign+notarize - if: env.KEY_CHAIN != null - run: .github/scripts/macOS/sign.sh uv-qt.app - - name: make dmg - run: | - for n in `seq 5`; do # do more attempts - if make osx-gui-dmg; then break; fi - if [ $n -eq 5 ]; then false; fi - sleep $((n * 10)) - done - mv UltraGrid.dmg UltraGrid-$VERSION.dmg - - name: Compute checksum - run: sha2 -256 UltraGrid-$VERSION.dmg || true - - name: Upload Release Asset - id: upload-release - if: (github.repository == 'CESNET/UltraGrid' && github.ref == 'refs/heads/master') || startsWith(github.ref, 'refs/tags/') - run: | - brew list jq >/dev/null 2>&1 || brew install jq - .github/scripts/replace-asset.sh $TAG UltraGrid-$VERSION.dmg application/x-apple-diskimage macOS%20$(uname -m)%20build - - name: Upload Build - if: steps.upload-release.conclusion == 'skipped' - uses: actions/upload-artifact@main - with: - name: UltraGrid-${{ env.VERSION }}-macOS - path: UltraGrid-${{ env.VERSION }}.dmg - - Windows: - name: run Windows - needs: prepare - runs-on: windows-latest - defaults: - run: - shell: C:\shells\msys2bash.cmd {0} - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - MSYS2_PATH_TYPE: inherit - SDK_URL: ${{ secrets.SDK_URL }} - - steps: - - uses: actions/checkout@v4 - - name: Set environment - run: .github/scripts/environment.sh - - name: Fetch SDKs ETags - id: etags - run: | - $GITHUB_WORKSPACE/.github/scripts/get-etag.sh ndi\ - https://downloads.ndi.tv/SDK/NDI_SDK/NDI%206%20SDK.exe\ - >> $GITHUB_OUTPUT - $GITHUB_WORKSPACE/.github/scripts/get-etag.sh ximea\ - "$XIMEA_DOWNLOAD_URL" >> $GITHUB_OUTPUT - - name: Find MSVC - run: .github/scripts/Windows/find_msvc.ps1 - shell: pwsh -command ". '{0}'" - - name: Run actions/cache for NDI - id: cache-ndi - uses: actions/cache@main - with: - path: 'C:\ndi.exe' - key: cache-ndi-${{ runner.os }}-${{ steps.etags.outputs.ndi }} - - name: Download NDI - if: steps.cache-ndi.outputs.cache-hit != 'true' - run: curl -f 'https://downloads.ndi.tv/SDK/NDI_SDK/NDI%206%20SDK.exe' - -o 'C:\ndi.exe' - - name: Cache XIMEA - id: cache-macos-ximea - uses: actions/cache@main - with: - path: 'C:\XIMEA_API_Installer.exe' - key: cache-ximea-${{ runner.os }}-${{ steps.etags.outputs.ximea }} - - name: Download XIMEA - if: steps.cache-macos-ximea.outputs.cache-hit != 'true' - run: curl -f "$XIMEA_DOWNLOAD_URL" -o 'C:\XIMEA_API_Installer.exe' - - - name: Run actions/cache for libajantv2 build - id: cache-aja - uses: actions/cache@main - with: - path: 'libajantv2' - key: cache-aja-${{ runner.os }}-${{ hashFiles('.github/scripts/install-common-deps.sh') }} - - name: Cache live555 - id: cache-live555 - uses: actions/cache@main - with: - path: 'live555' - key: cache-live555-${{ runner.os }}-${{ hashFiles('.github/scripts/install-common-deps.sh') }} - - - name: bootsrap - run: .github/scripts/Windows/prepare.ps1 - shell: pwsh -command ". '{0}'" - - name: Run actions/cache for JACK - id: cache-jack - uses: actions/cache@main - with: - path: 'C:\Program Files\JACK2' - key: cache-jack-${{ runner.os }}-${{ hashFiles('.github/scripts/Windows/install_jack.ps1') }} - - name: Install JACK - if: steps.cache-jack.outputs.cache-hit != 'true' - run: .github/scripts/Windows/install_jack.ps1 - shell: pwsh -command ". '{0}'" - - name: bootsrap MSYS2 - run: $GITHUB_WORKSPACE/.github/scripts/Windows/prepare_msys.sh - - - name: Install Spout - run: $GITHUB_WORKSPACE/.github/scripts/Windows/install_spout.sh - - - name: Run actions/cache for CineForm build - id: cache-cineform - uses: actions/cache@main - with: - path: 'C:\cineform-sdk' - key: cache-cineform-${{ runner.os }}-${{ hashFiles('.github/scripts/Windows/install_cineform.sh') }} - - name: Build CineForm - if: steps.cache-cineform.outputs.cache-hit != 'true' - run: $GITHUB_WORKSPACE/.github/scripts/Windows/install_cineform.sh build - - name: Install CineForm - run: $GITHUB_WORKSPACE/.github/scripts/Windows/install_cineform.sh install - - - name: configure - run: ./autogen.sh --prefix=/ --bindir=/ --docdir=/doc $FEATURES || { RC=$?; cat config.log; exit $RC; } - - name: make - run: make -j4 - - name: make check - run: make check - - name: make bundle - run: | - export DESTDIR=build/UltraGrid-$VERSION-win64 - make install - for exe in "$DESTDIR"/*.exe; do - data/scripts/get_dll_depends.sh "$exe" | - while read -r n; do cp "$n" "$DESTDIR"; done - done - if command -v windeployqt-qt6 >/dev/null; then - windeployqt-qt6 "$DESTDIR/uv-qt.exe" - else - windeployqt "$DESTDIR/uv-qt.exe" - fi - cp -r data/Windows/* "$DESTDIR" - data/scripts/get_dll_depends.sh\ - "$DESTDIR/screen-capture-recorder-x64.dll" | - while read -r n; do cp "$n" "$DESTDIR"; done - - name: make dist-check - run: PATH= /usr/bin/make distcheck - TARGET=build/UltraGrid-$VERSION-win64/uv.exe - REFLECTOR_TARGET=build/UltraGrid-$VERSION-win64/hd-rum-transcode.exe - GUI_EXE=build/UltraGrid-$VERSION-win64/uv-qt.exe - - name: Upload Release Asset - id: upload-release - if: (github.repository == 'CESNET/UltraGrid' && github.ref == 'refs/heads/master') || startsWith(github.ref, 'refs/tags/') - run: | - cd build; zip -9 -r UltraGrid-$VERSION-win64.zip UltraGrid-$VERSION-win64 - $GITHUB_WORKSPACE/.github/scripts/replace-asset.sh $TAG UltraGrid-$VERSION-win64.zip application/zip Windows%20build - - name: Compute checksum - if: steps.upload-release.conclusion != 'skipped' - run: sha256sum build/UltraGrid-$VERSION-win64.zip - - name: Upload Build - if: steps.upload-release.conclusion == 'skipped' - uses: actions/upload-artifact@main - with: - name: UltraGrid-Windows - path: build - -# vi: set expandtab sw=2: diff --git a/.github/workflows/coverity-scan.yml b/.github/workflows/coverity-scan.yml deleted file mode 100644 index 4b94e3b0d..000000000 --- a/.github/workflows/coverity-scan.yml +++ /dev/null @@ -1,104 +0,0 @@ -name: coverity-scan - -on: - push: - branches: - - coverity_scan - schedule: - - cron: '0 4 * * 2' # Tuesday at 4:00 UTC - -jobs: - Coverity: - if: github.repository == 'CESNET/UltraGrid' || github.event.schedule == null - runs-on: ubuntu-latest - env: - SDK_URL: ${{ secrets.SDK_URL }} - coverity_token: ${{ secrets.coverity_token }} - - steps: - - name: Check Coverity token presence - if: env.coverity_token == '' - run: | - echo "secrects.coverity_token not present, skipping the analysis!" - exit 1 - - - name: Get Coverity tool name # the file name contains version and is used as the cache key - id: tool - run: | - FILENAME=$(curl -LIf "https://scan.coverity.com/download/linux64\ - ?token=$coverity_token&project=UltraGrid" | - sed -n '/content-disposition/s/.*\"\(.*\)\"/\1/p') - echo "filename=$FILENAME" >> $GITHUB_OUTPUT - - name: Run actions/cache for Coverity build tool - id: cache-coverity-tool - uses: actions/cache@main - with: - path: ~/coverity_tool.tgz - key: cache-coverity-tool-${{ steps.tool.outputs.filename }} - - name: Download Coverity build tool - if: steps.cache-coverity-tool.outputs.cache-hit != 'true' - run: | - wget --no-verbose https://scan.coverity.com/download/linux64 --post-data "token=$coverity_token&project=UltraGrid" -O ~/coverity_tool.tgz - - name: Extract Coverity build tool - run: | - tar xaf ~/coverity_tool.tgz - mv cov-analysis* /tmp/cov-analysis - - - uses: actions/checkout@v4 - - - name: Fetch SDKs ETags - id: etags - run: | - $GITHUB_WORKSPACE/.github/scripts/get-etag.sh ndi\ - https://downloads.ndi.tv/SDK/NDI_SDK_Linux/\ - Install_NDI_SDK_v6_Linux.tar.gz >> $GITHUB_OUTPUT - - name: Run actions/cache for NDI - id: cache-ndi - uses: actions/cache@main - with: - path: /var/tmp/Install_NDI_SDK_Linux.tar.gz - key: cache-ndi-${{ runner.os }}-${{ steps.etags.outputs.ndi }} - - name: Download NDI - if: steps.cache-ndi.outputs.cache-hit != 'true' - run: "curl -Lf https://downloads.ndi.tv/SDK/NDI_SDK_Linux/\ - Install_NDI_SDK_v6_Linux.tar.gz -o /var/tmp/Install_NDI_SDK_Linux.tar.gz" - - name: Cache FFmpeg - uses: actions/cache@main - with: - path: '/var/tmp/ffmpeg' - key: cache-${{ github.workflow }}-ffmpeg-${{ runner.os }}-${{ hashFiles( '.github/scripts/Linux/install_ffmpeg.sh', '.github/scripts/Linux/install_other.sh', '.github/scripts/Linux/ffmpeg-patches/*') }} - - name: Cache SDL - uses: actions/cache@main - with: - path: '/var/tmp/sdl' - key: cache-${{ github.workflow }}-sdl-${{ runner.os }}-${{ hashFiles( '.github/scripts/Linux/install_sdl.sh' ) }} - - name: Cache GLFW - uses: actions/cache@main - with: - path: '/var/tmp/glfw' - key: cache-${{ github.workflow }}-glfw-${{ runner.os }}-${{ hashFiles( '.github/scripts/Linux/install_glfw.sh' ) }} - - name: bootstrap - run: | - . .github/scripts/environment.sh - .github/scripts/Linux/prepare.sh - - - name: configure - run: ./autogen.sh $FEATURES - - name: Build with cov-build - run: | - /tmp/cov-analysis/bin/cov-build --dir cov-int make -j2 - - name: Submit the result to Coverity Scan - run: | - tar caf ultragrid.tar.xz cov-int - result=$(curl -Sf --form token=$coverity_token \ - --form email=pulec@cesnet.cz \ - --form file=@ultragrid.tar.xz \ - --form version="$(date +%F)" \ - --form description="master build" \ - https://scan.coverity.com/builds?project=UltraGrid) - echo "$result" - if ! expr "$result" : 'Build successfully submitted.' >/dev/null; then - exit 1 - fi - -# vi: set expandtab sw=2: diff --git a/src/control_socket.cpp b/src/control_socket.cpp index 8b8595ea6..3e0507b34 100644 --- a/src/control_socket.cpp +++ b/src/control_socket.cpp @@ -88,7 +88,7 @@ using namespace std; struct client { fd_t fd; - char buff[1024]; + char buff[2048]; int buff_len; struct client *prev; @@ -655,17 +655,25 @@ static int process_msg(struct control_state *s, fd_t client_fd, char *message, s dump_tree(s->root_module, 0); resp = new_response(RESPONSE_OK, NULL); } else { // assume message in format "path message" - struct msg_universal *msg = (struct msg_universal *) - new_message(sizeof(struct msg_universal)); - - if (strchr(message, ' ')) { - memcpy(path, message, strchr(message, ' ') - message); - strncpy(msg->text, strchr(message, ' ') + 1, sizeof(path) - 1); - } else { - strncpy(path, message, sizeof(path) - 1); // empty message ?? + struct msg_universal *msg = (struct msg_universal *) + new_message(sizeof(struct msg_universal)); + + if (strchr(message, ' ')) { + size_t path_len = strchr(message, ' ') - message; + memcpy(path, message, path_len); + path[path_len] = '\0'; + strncpy(msg->text, strchr(message, ' ') + 1, sizeof(msg->text) - 1); + + // If path is "root", send directly to root module + if (strcmp(path, "root") == 0) { + path[0] = '\0'; } + } else { + path[0] = '\0'; + strncpy(msg->text, message, sizeof(msg->text) - 1); + } - resp = send_message(s->root_module, path, (struct message *) msg); + resp = send_message_sync(s->root_module, path, (struct message *) msg, 100, 0); } if(!resp) { @@ -695,8 +703,12 @@ static void send_response(fd_t fd, struct response *resp) if (ret < 0) { socket_error("Unable to write response"); } + #ifndef _WIN32 + // Force flush on POSIX systems + fsync(fd); + #endif - free_response(resp); + free_response(resp); } static bool parse_msg(char *buffer, int buffer_len, /* out */ char *message, int *new_buffer_len) @@ -852,7 +864,6 @@ static void * control_thread(void *args) socket_error("[control socket] accept"); continue; } - // all remote sockets are written sequentially so // we don't want to block if one gets stuck set_socket_nonblock(fd); @@ -1063,7 +1074,8 @@ get_control_state(struct module *mod) return (struct control_state *) control_mod->priv_data; } -static void print_control_help() { + + static void print_control_help() { color_printf("Control internal commands:\n" TBOLD("\texit") "\n" TBOLD("\tpause") "\n" @@ -1081,10 +1093,14 @@ static void print_control_help() { " - (un)mutes audio sender or receiver\n" TBOLD("\tpostprocess | flush") "\n" TBOLD("\tdump-tree")"\n"); + color_printf("\nHD-RUM Translator commands:\n" + TBOLD("\tcreate-port [compression]") " - create new output port\n" + TBOLD("\tdelete-port ") " - remove output port\n" + TBOLD("\tlist-ports") " - show all configured output ports and their IP addresses\n"); color_printf("\nOther commands can be issued directly to individual " "modules (see \"" TBOLD("dump-tree") "\"), eg.:\n" "\t" TBOLD("capture.filter mirror") "\n" "\nSometimes those modules support help (eg. \"" TBOLD("capture.filter help") "\")\n\n"); color_printf(TBOLD(u8"ยน") " audio commands applying to receiver\n\n"); -} + } diff --git a/src/control_socket.h b/src/control_socket.h index e94520943..434b1b0a3 100644 --- a/src/control_socket.h +++ b/src/control_socket.h @@ -51,6 +51,7 @@ struct module; * @retval 0 if success */ int control_init(int port, int connection_type, struct control_state **state, struct module *root_module, int force_ip_version); +int control_get_port(struct control_state *state); struct control_state *get_control_state(struct module *mod); void control_start(struct control_state *state); void control_done(struct control_state *s); diff --git a/src/hd-rum-translator/hd-rum-translator.cpp b/src/hd-rum-translator/hd-rum-translator.cpp index 56649e7dc..cf7c454ef 100644 --- a/src/hd-rum-translator/hd-rum-translator.cpp +++ b/src/hd-rum-translator/hd-rum-translator.cpp @@ -106,6 +106,7 @@ struct replica { replica(const char *addr, uint16_t rx_port, uint16_t tx_port, int bufsize, struct module *parent, int force_ip_version) { magic = REPLICA_MAGIC; host = addr; + ip_address = addr; // Store IP address for identification m_tx_port = tx_port; sock = std::shared_ptr(udp_init(addr, rx_port, tx_port, 255, force_ip_version, false), udp_exit); int mode = 0; @@ -133,6 +134,7 @@ struct replica { struct module mod; uint32_t magic; string host; + string ip_address; int m_tx_port; enum type_t { @@ -146,9 +148,13 @@ struct replica { socklen_t sockaddr_len; }; +void writer_new_message_callback(struct module *m); + struct hd_rum_translator_state { hd_rum_translator_state() { init_root_module(&mod); + mod.priv_data = this; + mod.new_message = writer_new_message_callback; pthread_mutex_init(&qempty_mtx, NULL); pthread_mutex_init(&qfull_mtx, NULL); pthread_cond_init(&qempty_cond, NULL); @@ -331,8 +337,18 @@ static int create_output_port(struct hd_rum_translator_state *s, { struct replica *rep; try { - rep = new replica(addr, rx_port, tx_port, bufsize, &s->mod, + // Process the address string to handle IPv6 brackets + char *processed_addr = strdup(addr); + if (processed_addr[0] == '[' && processed_addr[strlen(processed_addr) - 1] == ']') { + processed_addr[0] = '\0'; + memmove(processed_addr, processed_addr + 1, strlen(processed_addr)); + processed_addr[strlen(processed_addr) - 1] = '\0'; + } + + rep = new replica(processed_addr, rx_port, tx_port, bufsize, &s->mod, common->force_ip_version); + free(processed_addr); + if(use_server_sock){ rep->sock = s->server_socket; } @@ -360,11 +376,24 @@ static int create_output_port(struct hd_rum_translator_state *s, } assert((unsigned) idx == s->replicas.size() - 1); - recompress_port_set_active(s->recompress, idx, compression != nullptr); + recompress_port_set_active(s->recompress, idx, + rep->type == replica::type_t::RECOMPRESS); return idx; } +void writer_new_message_callback(struct module *m) { + // add callback function + struct hd_rum_translator_state *s = (struct hd_rum_translator_state *) m->priv_data; + if (s) { + log_msg(LOG_LEVEL_DEBUG, "Message callback triggered, waking up writer thread\n"); + // Wake up the writer thread when a new message arrives + pthread_mutex_lock(&s->qempty_mtx); + pthread_cond_signal(&s->qempty_cond); + pthread_mutex_unlock(&s->qempty_mtx); + } +} + static void *writer(void *arg) { struct hd_rum_translator_state *s = @@ -384,88 +413,161 @@ static void *writer(void *arg) while ((msg = (struct msg_universal *) check_message(&s->mod))) { struct response *r = NULL; if (strncasecmp(msg->text, "delete-port ", strlen("delete-port ")) == 0) { + char buffer[2048]; char *port_spec = msg->text + strlen("delete-port "); int index = -1; - if (isdigit(port_spec[0])) { + bool is_all_digits = true; + for (int j = 0; port_spec[j] != '\0'; j++) { + if (!isdigit(port_spec[j])) { + is_all_digits = false; + break; + } + } + if (is_all_digits && strlen(port_spec) > 0) { int i = stoi(port_spec); if (i >= 0 && i < (int) s->replicas.size()) { index = i; } else { log_msg(LOG_LEVEL_WARNING, "Invalid port index: %d. Not removing.\n", i); + snprintf(buffer, sizeof(buffer), "Invalid port index: %d. Not removing.\n", i); } } else { + // It's not all digits, so treat as IP address or name int i = 0; + // Check for IP address match first for (auto r : s->replicas) { - if (strcmp(r->mod.name, port_spec) == 0) { + // Ensure replica and its IP address are valid before comparing + if (!r->ip_address.empty() && r->ip_address == port_spec) { index = i; break; } i++; } + // If not found by IP, check by port name + if (index == -1) { + i = 0; + for (auto r : s->replicas) { + if (strcmp(r->mod.name, port_spec) == 0) { + index = i; + break; + } + i++; + } + } + // Log if neither IP address or name matches if (index == -1) { - log_msg(LOG_LEVEL_WARNING, "Unknown port name: %s. Not removing.\n", port_spec); + log_msg(LOG_LEVEL_WARNING, "Unknown port (IP or name): %s. Not removing.\n", port_spec); + snprintf(buffer, sizeof(buffer), "Unknown port (IP or name): %s. Not removing.\n", port_spec); } } if (index >= 0) { recompress_remove_port(s->recompress, index); delete s->replicas[index]; s->replicas.erase(s->replicas.begin() + index); - log_msg(LOG_LEVEL_NOTICE, "Deleted output port %d.\n", index); + snprintf(buffer, sizeof(buffer), "Deleted output port %d.\n", index); + log_msg(LOG_LEVEL_NOTICE, "%s", buffer); + r = new_response(RESPONSE_OK, buffer); + } else { + r = new_response(RESPONSE_NOT_FOUND, "Port not found"); } - } else if (strncasecmp(msg->text, "create-port", strlen("create-port")) == 0) { - // format of parameters is either: - // : [] - // or (for compat with older CoUniverse version) - // [] - char *host_port, *port_str = NULL, *save_ptr; - char *host; - int tx_port; - strtok_r(msg->text, " ", &save_ptr); - host_port = strtok_r(NULL, " ", &save_ptr); - if (host_port && (strchr(host_port, ':') != NULL || (port_str = strtok_r(NULL, " ", &save_ptr)) != NULL)) { - if (port_str) { - host = host_port; - tx_port = stoi(port_str); + log_msg(LOG_LEVEL_NOTICE, "%s", buffer); + r = new_response(RESPONSE_OK, buffer); + free_message((struct message *) msg, r); + continue; + } else if (strncasecmp(msg->text, "list-ports", strlen("list-ports")) == 0 || + strncasecmp(msg->text, "query-ports", strlen("query-ports")) == 0) { + char buffer[2048]; + // List all current root ports and their IP addresses + string port_list = "\n"; + if (s->replicas.empty()) { + port_list += " No ports configured.\n"; } else { - tx_port = stoi(strrchr(host_port, ':') + 1); - host = host_port; - *strrchr(host_port, ':') = '\0'; - } - // handle square brackets around an IPv6 address - if (host[0] == '[' && host[strlen(host) - 1] == ']') { - host += 1; - host[strlen(host) - 1] = '\0'; + for (size_t i = 0; i < s->replicas.size(); i++) { + const auto& replica = s->replicas[i]; + const char* type_str = (replica->type == replica::type_t::USE_SOCK) ? "forwarding" : + (replica->type == replica::type_t::RECOMPRESS) ? "transcoding" : "none"; + char port_info[512]; + snprintf(port_info, sizeof(port_info), "[%zu] %s:%d (%s) - %s\n", + i, replica->ip_address.c_str(), replica->m_tx_port, + replica->mod.name, type_str); + port_list += port_info; // FIXED: was port_list += port_list + } } - } else { - const char *err_msg = "wrong format"; - log_msg(LOG_LEVEL_ERROR, "%s\n", err_msg); - free_message((struct message *) msg, new_response(RESPONSE_BAD_REQUEST, err_msg)); + snprintf(buffer, sizeof(buffer), "Ports: %s\n", port_list.c_str()); + log_msg(LOG_LEVEL_NOTICE, "%s", buffer); + r = new_response(RESPONSE_OK, buffer); + free_message((struct message *) msg, r); continue; - } - char *compress = strtok_r(NULL, " ", &save_ptr); + } else if (strncasecmp(msg->text, "create-port", strlen("create-port")) == 0) { + // format of parameters is either: + // : [] + // or (for compat with older CoUniverse version) + // [] + char buffer[2048]; + char *host_port, *port_str = NULL, *save_ptr; + char *host; + int tx_port; + strtok_r(msg->text, " ", &save_ptr); + host_port = strtok_r(NULL, " ", &save_ptr); + if (host_port && (strchr(host_port, ':') != NULL || (port_str = strtok_r(NULL, " ", &save_ptr)) != NULL)) { + if (port_str) { + host = host_port; + tx_port = stoi(port_str); + } else { + tx_port = stoi(strrchr(host_port, ':') + 1); + host = host_port; + *strrchr(host_port, ':') = '\0'; + } + // handle square brackets around an IPv6 address + if (host[0] == '[' && host[strlen(host) - 1] == ']') { + host += 1; + host[strlen(host) - 1] = '\0'; + } + } else { + const char *err_msg = "wrong format\n"; + log_msg(LOG_LEVEL_ERROR, "%s\n", err_msg); + free_message((struct message *) msg, new_response(RESPONSE_BAD_REQUEST, err_msg)); + continue; + } + char *compress = strtok_r(NULL, " ", &save_ptr); - struct common_opts opts = { COMMON_OPTS_INIT }; - int idx = create_output_port(s, - host, 0, tx_port, s->bufsize, &opts, - compress, nullptr, RATE_UNLIMITED, s->server_socket != nullptr); + // Check if a replica with the same host and port already exists + bool exists = false; + for (auto r : s->replicas) { + if (r->ip_address == host && r->m_tx_port == tx_port) { + exists = true; + break; + } + } - if(idx < 0) { - free_message((struct message *) msg, new_response(RESPONSE_INT_SERV_ERR, "Cannot create output port.")); - continue; - } + if (exists) { + log_msg(LOG_LEVEL_ERROR, "Output port %s:%d already exists.\n", host, tx_port); + r = new_response(RESPONSE_CONFLICT, "Port already exists\n"); + free_message((struct message *) msg, r); + continue; + } - if(compress) - log_msg(LOG_LEVEL_NOTICE, "Created new transcoding output port %s:%d:0x%08" PRIx32 ".\n", host, tx_port, recompress_get_port_ssrc(s->recompress, idx)); - else - log_msg(LOG_LEVEL_NOTICE, "Created new forwarding output port %s:%d.\n", host, tx_port); + struct common_opts opts = { COMMON_OPTS_INIT }; + int idx = create_output_port(s, + host, 0, tx_port, s->bufsize, &opts, + compress, nullptr, RATE_UNLIMITED, s->server_socket != nullptr); - } else { - r = new_response(RESPONSE_BAD_REQUEST, NULL); - } + if(idx < 0) { + r = new_response(RESPONSE_INT_SERV_ERR, "Cannot create output port.\n"); + continue; + } - free_message((struct message *) msg, r ? r : new_response(RESPONSE_OK, NULL)); + if(compress) { + snprintf(buffer, sizeof(buffer), "Created new transcoding output port %s:%d:0x%08" PRIx32 ".\n", host, tx_port, recompress_get_port_ssrc(s->recompress, idx)); + } else { + snprintf(buffer, sizeof(buffer), "Created new forwarding output port %s:%d.\n", host, tx_port); + } + log_msg(LOG_LEVEL_NOTICE, "%s", buffer); + r = new_response(RESPONSE_OK, buffer); + free_message((struct message *) msg, r); + continue; + } } - // then process incoming packets while (s->qhead != s->qtail) { if(s->qhead->size == 0) { // poisoned pill @@ -482,7 +584,7 @@ static void *writer(void *arg) // distribute it to output ports that don't need transcoding #ifdef _WIN32 - // send it asynchronously in MSW (performance optimalization) + // send it asynchronously in MSW (performance optimization) SleepEx(0, true); // allow system to call our completion routines in APC int ref = 0; for (unsigned int i = 0; i < s->replicas.size(); i++) { @@ -525,14 +627,14 @@ static void *writer(void *arg) pthread_cond_signal(&s->qfull_cond); pthread_mutex_unlock(&s->qfull_mtx); } - pthread_mutex_lock(&s->qempty_mtx); - if (s->qempty) + if (s->qempty) { + // Wait indefinitely - we'll be woken up by new packets or messages pthread_cond_wait(&s->qempty_cond, &s->qempty_mtx); + } s->qempty = 1; pthread_mutex_unlock(&s->qempty_mtx); } - return NULL; } diff --git a/src/messaging.h b/src/messaging.h index 8e93c1f4a..8b8e320e4 100644 --- a/src/messaging.h +++ b/src/messaging.h @@ -62,6 +62,7 @@ struct response; #define RESPONSE_BAD_REQUEST 400 #define RESPONSE_NOT_FOUND 404 #define RESPONSE_REQ_TIMEOUT 408 +#define RESPONSE_CONFLICT 409 #define RESPONSE_INT_SERV_ERR 500 #define RESPONSE_NOT_IMPL 501