From 036a58735e582a1622e42786777a8a0726378824 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Mon, 6 Oct 2025 05:50:16 -0500 Subject: [PATCH 1/9] Upgrade trunk (#8229) Co-authored-by: vidplace7 <1779290+vidplace7@users.noreply.github.com> --- .trunk/trunk.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.trunk/trunk.yaml b/.trunk/trunk.yaml index 74b850b64..8c981850d 100644 --- a/.trunk/trunk.yaml +++ b/.trunk/trunk.yaml @@ -16,7 +16,7 @@ lint: - bandit@1.8.6 - trivy@0.67.0 - taplo@0.10.0 - - ruff@0.13.2 + - ruff@0.13.3 - isort@6.1.0 - markdownlint@0.45.0 - oxipng@9.1.5 From 627c0145e7df9f52109a822a73c35f08a2951a43 Mon Sep 17 00:00:00 2001 From: Ben Meadors Date: Mon, 6 Oct 2025 07:56:27 -0500 Subject: [PATCH 2/9] Centralize getNodeId and fix references to owner.id (#8230) --- src/mesh/NodeDB.cpp | 7 ++++ src/mesh/NodeDB.h | 4 ++ src/mesh/PhoneAPI.cpp | 2 + src/mesh/wifi/WiFiAPClient.cpp | 4 +- src/mqtt/MQTT.cpp | 38 ++++++++++++++----- src/serialization/MeshPacketSerializer.cpp | 2 +- .../MeshPacketSerializer_nRF52.cpp | 2 +- test/test_mqtt/MQTT.cpp | 4 +- 8 files changed, 47 insertions(+), 16 deletions(-) diff --git a/src/mesh/NodeDB.cpp b/src/mesh/NodeDB.cpp index e3240462d..dec8411fe 100644 --- a/src/mesh/NodeDB.cpp +++ b/src/mesh/NodeDB.cpp @@ -1874,6 +1874,13 @@ uint8_t NodeDB::getMeshNodeChannel(NodeNum n) return info->channel; } +std::string NodeDB::getNodeId() const +{ + char nodeId[16]; + snprintf(nodeId, sizeof(nodeId), "!%08x", myNodeInfo.my_node_num); + return std::string(nodeId); +} + /// Find a node in our DB, return null for missing /// NOTE: This function might be called from an ISR meshtastic_NodeInfoLite *NodeDB::getMeshNode(NodeNum n) diff --git a/src/mesh/NodeDB.h b/src/mesh/NodeDB.h index f73f64f92..e8724f2c9 100644 --- a/src/mesh/NodeDB.h +++ b/src/mesh/NodeDB.h @@ -5,6 +5,7 @@ #include #include #include +#include #include #include "MeshTypes.h" @@ -203,6 +204,9 @@ class NodeDB /// @return our node number NodeNum getNodeNum() { return myNodeInfo.my_node_num; } + /// @return our node ID as a string in the format "!xxxxxxxx" + std::string getNodeId() const; + // @return last byte of a NodeNum, 0xFF if it ended at 0x00 uint8_t getLastByteOfNodeNum(NodeNum num) { return (uint8_t)((num & 0xFF) ? (num & 0xFF) : 0xFF); } diff --git a/src/mesh/PhoneAPI.cpp b/src/mesh/PhoneAPI.cpp index 07f314415..210e0ba06 100644 --- a/src/mesh/PhoneAPI.cpp +++ b/src/mesh/PhoneAPI.cpp @@ -433,6 +433,8 @@ size_t PhoneAPI::getFromRadio(uint8_t *buf) case STATE_SEND_OTHER_NODEINFOS: { LOG_DEBUG("Send known nodes"); if (nodeInfoForPhone.num != 0) { + // Just in case we stored a different user.id in the past, but should never happen going forward + sprintf(nodeInfoForPhone.user.id, "!%08x", nodeInfoForPhone.num); LOG_INFO("nodeinfo: num=0x%x, lastseen=%u, id=%s, name=%s", nodeInfoForPhone.num, nodeInfoForPhone.last_heard, nodeInfoForPhone.user.id, nodeInfoForPhone.user.long_name); fromRadioScratch.which_payload_variant = meshtastic_FromRadio_node_info_tag; diff --git a/src/mesh/wifi/WiFiAPClient.cpp b/src/mesh/wifi/WiFiAPClient.cpp index 1133ad424..7d210dd33 100644 --- a/src/mesh/wifi/WiFiAPClient.cpp +++ b/src/mesh/wifi/WiFiAPClient.cpp @@ -94,11 +94,11 @@ static void onNetworkConnected() // ESPmDNS (ESP32) and SimpleMDNS (RP2040) have slightly different APIs for adding TXT records #ifdef ARCH_ESP32 MDNS.addServiceTxt("meshtastic", "tcp", "shortname", String(owner.short_name)); - MDNS.addServiceTxt("meshtastic", "tcp", "id", String(owner.id)); + MDNS.addServiceTxt("meshtastic", "tcp", "id", String(nodeDB->getNodeId().c_str())); // ESP32 prints obtained IP address in WiFiEvent #elif defined(ARCH_RP2040) MDNS.addServiceTxt("meshtastic", "shortname", owner.short_name); - MDNS.addServiceTxt("meshtastic", "id", owner.id); + MDNS.addServiceTxt("meshtastic", "id", nodeDB->getNodeId().c_str()); LOG_INFO("Obtained IP address: %s", WiFi.localIP().toString().c_str()); #endif } diff --git a/src/mqtt/MQTT.cpp b/src/mqtt/MQTT.cpp index 7f7a9d511..8ce352f14 100644 --- a/src/mqtt/MQTT.cpp +++ b/src/mqtt/MQTT.cpp @@ -60,7 +60,9 @@ inline void onReceiveProto(char *topic, byte *payload, size_t length) return; } const meshtastic_Channel &ch = channels.getByName(e.channel_id); - if (strcmp(e.gateway_id, owner.id) == 0) { + // Generate node ID from nodenum for comparison + std::string nodeId = nodeDB->getNodeId(); + if (strcmp(e.gateway_id, nodeId.c_str()) == 0) { // Generate an implicit ACK towards ourselves (handled and processed only locally!) for this message. // We do this because packets are not rebroadcasted back into MQTT anymore and we assume that at least one node // receives it when we get our own packet back. Then we'll stop our retransmissions. @@ -128,8 +130,10 @@ inline void onReceiveProto(char *topic, byte *payload, size_t length) // returns true if this is a valid JSON envelope which we accept on downlink inline bool isValidJsonEnvelope(JSONObject &json) { + // Generate node ID from nodenum for comparison + std::string nodeId = nodeDB->getNodeId(); // if "sender" is provided, avoid processing packets we uplinked - return (json.find("sender") != json.end() ? (json["sender"]->AsString().compare(owner.id) != 0) : true) && + return (json.find("sender") != json.end() ? (json["sender"]->AsString().compare(nodeId) != 0) : true) && (json.find("hopLimit") != json.end() ? json["hopLimit"]->IsNumber() : true) && // hop limit should be a number (json.find("from") != json.end()) && json["from"]->IsNumber() && (json["from"]->AsNumber() == nodeDB->getNodeNum()) && // only accept message if the "from" is us @@ -297,7 +301,9 @@ bool connectPubSub(const PubSubConfig &config, PubSubClient &pubSub, Client &cli LOG_INFO("Connecting directly to MQTT server %s, port: %d, username: %s, password: %s", config.serverAddr.c_str(), config.serverPort, config.mqttUsername, config.mqttPassword); - const bool connected = pubSub.connect(owner.id, config.mqttUsername, config.mqttPassword); + // Generate node ID from nodenum for client identification + std::string nodeId = nodeDB->getNodeId(); + const bool connected = pubSub.connect(nodeId.c_str(), config.mqttUsername, config.mqttPassword); if (connected) { LOG_INFO("MQTT connected"); } else { @@ -687,11 +693,14 @@ void MQTT::publishQueuedMessages() if (jsonString.length() == 0) return; + // Generate node ID from nodenum for topic + std::string nodeId = nodeDB->getNodeId(); + std::string topicJson; if (env.packet->pki_encrypted) { - topicJson = jsonTopic + "PKI/" + owner.id; + topicJson = jsonTopic + "PKI/" + nodeId; } else { - topicJson = jsonTopic + env.channel_id + "/" + owner.id; + topicJson = jsonTopic + env.channel_id + "/" + nodeId; } LOG_INFO("JSON publish message to %s, %u bytes: %s", topicJson.c_str(), jsonString.length(), jsonString.c_str()); publish(topicJson.c_str(), jsonString.c_str(), false); @@ -749,10 +758,14 @@ void MQTT::onSend(const meshtastic_MeshPacket &mp_encrypted, const meshtastic_Me return; // Don't upload a still-encrypted PKI packet if not encryption_enabled } - const meshtastic_ServiceEnvelope env = { - .packet = const_cast(p), .channel_id = const_cast(channelId), .gateway_id = owner.id}; + // Generate node ID from nodenum for service envelope + std::string nodeId = nodeDB->getNodeId(); + + const meshtastic_ServiceEnvelope env = {.packet = const_cast(p), + .channel_id = const_cast(channelId), + .gateway_id = const_cast(nodeId.c_str())}; size_t numBytes = pb_encode_to_bytes(bytes, sizeof(bytes), &meshtastic_ServiceEnvelope_msg, &env); - std::string topic = cryptTopic + channelId + "/" + owner.id; + std::string topic = cryptTopic + channelId + "/" + nodeId; if (moduleConfig.mqtt.proxy_to_client_enabled || this->isConnectedDirectly()) { LOG_DEBUG("MQTT Publish %s, %u bytes", topic.c_str(), numBytes); @@ -766,7 +779,9 @@ void MQTT::onSend(const meshtastic_MeshPacket &mp_encrypted, const meshtastic_Me auto jsonString = MeshPacketSerializer::JsonSerialize(&mp_decoded); if (jsonString.length() == 0) return; - std::string topicJson = jsonTopic + channelId + "/" + owner.id; + // Generate node ID from nodenum for JSON topic + std::string nodeIdForJson = nodeDB->getNodeId(); + std::string topicJson = jsonTopic + channelId + "/" + nodeIdForJson; LOG_INFO("JSON publish message to %s, %u bytes: %s", topicJson.c_str(), jsonString.length(), jsonString.c_str()); publish(topicJson.c_str(), jsonString.c_str(), false); #endif // ARCH_NRF52 NRF52_USE_JSON @@ -845,11 +860,14 @@ void MQTT::perhapsReportToMap() mp->decoded.payload.size = pb_encode_to_bytes(mp->decoded.payload.bytes, sizeof(mp->decoded.payload.bytes), &meshtastic_MapReport_msg, &mapReport); + // Generate node ID from nodenum for service envelope + std::string nodeId = nodeDB->getNodeId(); + // Encode the MeshPacket into a binary ServiceEnvelope and publish const meshtastic_ServiceEnvelope se = { .packet = mp, .channel_id = (char *)channels.getGlobalId(channels.getPrimaryIndex()), // Use primary channel as the channel_id - .gateway_id = owner.id}; + .gateway_id = const_cast(nodeId.c_str())}; size_t numBytes = pb_encode_to_bytes(bytes, sizeof(bytes), &meshtastic_ServiceEnvelope_msg, &se); LOG_INFO("MQTT Publish map report to %s", mapTopic.c_str()); diff --git a/src/serialization/MeshPacketSerializer.cpp b/src/serialization/MeshPacketSerializer.cpp index 5a1f8ed7e..b31d2dc2e 100644 --- a/src/serialization/MeshPacketSerializer.cpp +++ b/src/serialization/MeshPacketSerializer.cpp @@ -413,7 +413,7 @@ std::string MeshPacketSerializer::JsonSerialize(const meshtastic_MeshPacket *mp, jsonObj["from"] = new JSONValue((unsigned int)mp->from); jsonObj["channel"] = new JSONValue((unsigned int)mp->channel); jsonObj["type"] = new JSONValue(msgType.c_str()); - jsonObj["sender"] = new JSONValue(owner.id); + jsonObj["sender"] = new JSONValue(nodeDB->getNodeId().c_str()); if (mp->rx_rssi != 0) jsonObj["rssi"] = new JSONValue((int)mp->rx_rssi); if (mp->rx_snr != 0) diff --git a/src/serialization/MeshPacketSerializer_nRF52.cpp b/src/serialization/MeshPacketSerializer_nRF52.cpp index e0daa1a88..353c710a1 100644 --- a/src/serialization/MeshPacketSerializer_nRF52.cpp +++ b/src/serialization/MeshPacketSerializer_nRF52.cpp @@ -353,7 +353,7 @@ std::string MeshPacketSerializer::JsonSerialize(const meshtastic_MeshPacket *mp, jsonObj["from"] = (unsigned int)mp->from; jsonObj["channel"] = (unsigned int)mp->channel; jsonObj["type"] = msgType.c_str(); - jsonObj["sender"] = owner.id; + jsonObj["sender"] = nodeDB->getNodeId().c_str(); if (mp->rx_rssi != 0) jsonObj["rssi"] = (int)mp->rx_rssi; if (mp->rx_snr != 0) diff --git a/test/test_mqtt/MQTT.cpp b/test/test_mqtt/MQTT.cpp index ede3d22b7..8726d1ccb 100644 --- a/test/test_mqtt/MQTT.cpp +++ b/test/test_mqtt/MQTT.cpp @@ -591,7 +591,7 @@ void test_receiveEncryptedPKITopicToUs(void) // Should ignore messages published to MQTT by this gateway. void test_receiveIgnoresOwnPublishedMessages(void) { - unitTest->publish(&decoded, owner.id); + unitTest->publish(&decoded, nodeDB->getNodeId().c_str()); TEST_ASSERT_TRUE(mockRouter->packets_.empty()); TEST_ASSERT_TRUE(mockRoutingModule->ackNacks_.empty()); @@ -603,7 +603,7 @@ void test_receiveAcksOwnSentMessages(void) meshtastic_MeshPacket p = decoded; p.from = myNodeInfo.my_node_num; - unitTest->publish(&p, owner.id); + unitTest->publish(&p, nodeDB->getNodeId().c_str()); TEST_ASSERT_TRUE(mockRouter->packets_.empty()); TEST_ASSERT_EQUAL(1, mockRoutingModule->ackNacks_.size()); From 329a494ce2e35b4c13c726602f4be1fad24d54c7 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 6 Oct 2025 12:59:40 -0500 Subject: [PATCH 3/9] Update meshtastic-ArduinoThread digest to b841b04 (#8233) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- platformio.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/platformio.ini b/platformio.ini index b6d6733e3..2e6f851df 100644 --- a/platformio.ini +++ b/platformio.ini @@ -70,7 +70,7 @@ lib_deps = # renovate: datasource=git-refs depName=meshtastic-TinyGPSPlus packageName=https://github.com/meshtastic/TinyGPSPlus gitBranch=master https://github.com/meshtastic/TinyGPSPlus/archive/71a82db35f3b973440044c476d4bcdc673b104f4.zip # renovate: datasource=git-refs depName=meshtastic-ArduinoThread packageName=https://github.com/meshtastic/ArduinoThread gitBranch=master - https://github.com/meshtastic/ArduinoThread/archive/7c3ee9e1951551b949763b1f5280f8db1fa4068d.zip + https://github.com/meshtastic/ArduinoThread/archive/b841b0415721f1341ea41cccfb4adccfaf951567.zip # renovate: datasource=custom.pio depName=Nanopb packageName=nanopb/library/Nanopb nanopb/Nanopb@0.4.91 # renovate: datasource=custom.pio depName=ErriezCRC32 packageName=erriez/library/ErriezCRC32 From 87e3540f48eb17cffd6cda2cbebef274ced29885 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 6 Oct 2025 12:59:50 -0500 Subject: [PATCH 4/9] Update meshtastic/device-ui digest to f920b12 (#8234) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- platformio.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/platformio.ini b/platformio.ini index 2e6f851df..4ae2da54f 100644 --- a/platformio.ini +++ b/platformio.ini @@ -120,7 +120,7 @@ lib_deps = [device-ui_base] lib_deps = # renovate: datasource=git-refs depName=meshtastic/device-ui packageName=https://github.com/meshtastic/device-ui gitBranch=master - https://github.com/meshtastic/device-ui/archive/505ffadaa7a931df5dc8153229b719a07bbb028c.zip + https://github.com/meshtastic/device-ui/archive/f920b1273df750c2e3e01385d3ba30553b913afa.zip ; Common libs for environmental measurements in telemetry module [environmental_base] From 518680514f6249866ade68b7b11c5906ee1599ca Mon Sep 17 00:00:00 2001 From: Tom Fifield Date: Tue, 7 Oct 2025 13:37:13 +1100 Subject: [PATCH 5/9] Actions: Simplify matrices, cleanup build_one_* (#8218) (#8239) Co-authored-by: Austin --- .github/workflows/build_firmware.yml | 3 + .github/workflows/build_one_arch.yml | 302 +----------------- .github/workflows/build_one_target.yml | 173 +--------- .github/workflows/main_matrix.yml | 135 ++------ .github/workflows/merge_queue.yml | 134 +------- bin/generate_ci_matrix.py | 55 ++-- .../native/portduino-buildroot/platformio.ini | 1 + variants/native/portduino/platformio.ini | 3 +- 8 files changed, 92 insertions(+), 714 deletions(-) diff --git a/.github/workflows/build_firmware.yml b/.github/workflows/build_firmware.yml index 2ef67405a..b62729332 100644 --- a/.github/workflows/build_firmware.yml +++ b/.github/workflows/build_firmware.yml @@ -19,6 +19,8 @@ jobs: pio-build: name: build-${{ inputs.platform }} runs-on: ubuntu-24.04 + outputs: + artifact-id: ${{ steps.upload.outputs.artifact-id }} steps: - uses: actions/checkout@v5 with: @@ -55,6 +57,7 @@ jobs: - name: Store binaries as an artifact uses: actions/upload-artifact@v4 + id: upload with: name: firmware-${{ inputs.platform }}-${{ inputs.pio_env }}-${{ inputs.version }}.zip overwrite: true diff --git a/.github/workflows/build_one_arch.yml b/.github/workflows/build_one_arch.yml index f5352b3c4..6d9134941 100644 --- a/.github/workflows/build_one_arch.yml +++ b/.github/workflows/build_one_arch.yml @@ -3,6 +3,7 @@ name: Build One Arch on: workflow_dispatch: inputs: + # trunk-ignore(checkov/CKV_GHA_7) arch: type: choice options: @@ -16,10 +17,13 @@ on: - stm32 - native +permissions: read-all + +env: + INPUT_ARCH: ${{ github.event.inputs.arch }} + jobs: setup: - strategy: - fail-fast: false runs-on: ubuntu-24.04 steps: - uses: actions/checkout@v5 @@ -31,23 +35,11 @@ jobs: - name: Generate matrix id: jsonStep run: | - if [[ "$GITHUB_HEAD_REF" == "" ]]; then - TARGETS=$(./bin/generate_ci_matrix.py ${{inputs.arch}} extra) - else - TARGETS=$(./bin/generate_ci_matrix.py ${{inputs.arch}} pr) - fi - echo "Name: $GITHUB_REF_NAME Base: $GITHUB_BASE_REF Ref: $GITHUB_REF Targets: $TARGETS" - echo "${{inputs.arch}}=$(jq -cn --argjson environments "$TARGETS" '{board: $environments}')" >> $GITHUB_OUTPUT + TARGETS=$(./bin/generate_ci_matrix.py $INPUT_ARCH --level extra) + echo "Name: $GITHUB_REF_NAME Base: $GITHUB_BASE_REF Ref: $GITHUB_REF" + echo "selected_arch=$TARGETS" >> $GITHUB_OUTPUT outputs: - esp32: ${{ steps.jsonStep.outputs.esp32 }} - esp32s3: ${{ steps.jsonStep.outputs.esp32s3 }} - esp32c3: ${{ steps.jsonStep.outputs.esp32c3 }} - esp32c6: ${{ steps.jsonStep.outputs.esp32c6 }} - nrf52840: ${{ steps.jsonStep.outputs.nrf52840 }} - rp2040: ${{ steps.jsonStep.outputs.rp2040 }} - rp2350: ${{ steps.jsonStep.outputs.rp2350 }} - stm32: ${{ steps.jsonStep.outputs.stm32 }} - check: ${{ steps.jsonStep.outputs.check }} + selected_arch: ${{ steps.jsonStep.outputs.selected_arch }} version: runs-on: ubuntu-latest @@ -64,101 +56,18 @@ jobs: long: ${{ steps.version.outputs.long }} deb: ${{ steps.version.outputs.deb }} - build-esp32: - if: ${{ github.event_name != 'workflow_dispatch' || inputs.arch == 'esp32'}} + build: + if: ${{ github.event_name != 'workflow_dispatch' }} needs: [setup, version] strategy: fail-fast: false - matrix: ${{ fromJson(needs.setup.outputs.esp32) }} + matrix: + build: ${{ fromJson(needs.setup.outputs.selected_arch) }} uses: ./.github/workflows/build_firmware.yml with: version: ${{ needs.version.outputs.long }} - pio_env: ${{ matrix.board }} - platform: esp32 - - build-esp32s3: - if: ${{ github.event_name != 'workflow_dispatch' || inputs.arch == 'esp32s3'}} - needs: [setup, version] - strategy: - fail-fast: false - matrix: ${{ fromJson(needs.setup.outputs.esp32s3) }} - uses: ./.github/workflows/build_firmware.yml - with: - version: ${{ needs.version.outputs.long }} - pio_env: ${{ matrix.board }} - platform: esp32s3 - - build-esp32c3: - if: ${{ github.event_name != 'workflow_dispatch' || inputs.arch == 'esp32c3'}} - needs: [setup, version] - strategy: - fail-fast: false - matrix: ${{ fromJson(needs.setup.outputs.esp32c3) }} - uses: ./.github/workflows/build_firmware.yml - with: - version: ${{ needs.version.outputs.long }} - pio_env: ${{ matrix.board }} - platform: esp32c3 - - build-esp32c6: - if: ${{ github.event_name != 'workflow_dispatch' || inputs.arch == 'esp32c6'}} - needs: [setup, version] - strategy: - fail-fast: false - matrix: ${{ fromJson(needs.setup.outputs.esp32c6) }} - uses: ./.github/workflows/build_firmware.yml - with: - version: ${{ needs.version.outputs.long }} - pio_env: ${{ matrix.board }} - platform: esp32c6 - - build-nrf52840: - if: ${{ github.event_name != 'workflow_dispatch' || inputs.arch == 'nrf52840'}} - needs: [setup, version] - strategy: - fail-fast: false - matrix: ${{ fromJson(needs.setup.outputs.nrf52840) }} - uses: ./.github/workflows/build_firmware.yml - with: - version: ${{ needs.version.outputs.long }} - pio_env: ${{ matrix.board }} - platform: nrf52840 - - build-rp2040: - if: ${{ github.event_name != 'workflow_dispatch' || inputs.arch == 'rp2040'}} - needs: [setup, version] - strategy: - fail-fast: false - matrix: ${{ fromJson(needs.setup.outputs.rp2040) }} - uses: ./.github/workflows/build_firmware.yml - with: - version: ${{ needs.version.outputs.long }} - pio_env: ${{ matrix.board }} - platform: rp2040 - - build-rp2350: - if: ${{ github.event_name != 'workflow_dispatch' || inputs.arch == 'rp2350'}} - needs: [setup, version] - strategy: - fail-fast: false - matrix: ${{ fromJson(needs.setup.outputs.rp2350) }} - uses: ./.github/workflows/build_firmware.yml - with: - version: ${{ needs.version.outputs.long }} - pio_env: ${{ matrix.board }} - platform: rp2350 - - build-stm32: - if: ${{ github.event_name != 'workflow_dispatch' || inputs.arch == 'stm32' }} - needs: [setup, version] - strategy: - fail-fast: false - matrix: ${{ fromJson(needs.setup.outputs.stm32) }} - uses: ./.github/workflows/build_firmware.yml - with: - version: ${{ needs.version.outputs.long }} - pio_env: ${{ matrix.board }} - platform: stm32 + pio_env: ${{ matrix.build.board }} + platform: ${{ matrix.build.arch }} build-debian-src: if: ${{ github.repository == 'meshtastic/firmware' && github.event_name != 'workflow_dispatch' || inputs.arch == 'native' }} @@ -252,18 +161,7 @@ jobs: - rp2350 - stm32 runs-on: ubuntu-latest - needs: - [ - version, - build-esp32, - build-esp32s3, - build-esp32c3, - build-esp32c6, - build-nrf52840, - build-rp2040, - build-rp2350, - build-stm32, - ] + needs: [version, build] steps: - name: Checkout code uses: actions/checkout@v5 @@ -332,169 +230,3 @@ jobs: name: firmware-${{inputs.arch}}-${{ needs.version.outputs.long }} description: "Download firmware-${{inputs.arch}}-${{ needs.version.outputs.long }}.zip. This artifact will be available for 90 days from creation" github-token: ${{ secrets.GITHUB_TOKEN }} - - release-artifacts: - runs-on: ubuntu-latest - if: ${{ github.event_name == 'workflow_dispatch' }} - outputs: - upload_url: ${{ steps.create_release.outputs.upload_url }} - needs: - - version - - gather-artifacts - - build-debian-src - - package-pio-deps-native-tft - steps: - - name: Checkout - uses: actions/checkout@v5 - - - name: Setup Python - uses: actions/setup-python@v6 - with: - python-version: 3.x - - - name: Create release - uses: softprops/action-gh-release@v2 - id: create_release - with: - draft: true - prerelease: true - name: Meshtastic Firmware ${{ needs.version.outputs.long }} Alpha - tag_name: v${{ needs.version.outputs.long }} - body: | - Autogenerated by github action, developer should edit as required before publishing... - - - name: Download source deb - uses: actions/download-artifact@v5 - with: - pattern: firmware-debian-${{ needs.version.outputs.deb }}~UNRELEASED-src - merge-multiple: true - path: ./output/debian-src - - - name: Download `native-tft` pio deps - uses: actions/download-artifact@v5 - with: - pattern: platformio-deps-native-tft-${{ needs.version.outputs.long }} - merge-multiple: true - path: ./output/pio-deps-native-tft - - - name: Zip Linux sources - working-directory: output - run: | - zip -j -9 -r ./meshtasticd-${{ needs.version.outputs.deb }}-src.zip ./debian-src - zip -9 -r ./platformio-deps-native-tft-${{ needs.version.outputs.long }}.zip ./pio-deps-native-tft - - # For diagnostics - - name: Display structure of downloaded files - run: ls -lR - - - name: Add Linux sources to GtiHub Release - # Only run when targeting master branch with workflow_dispatch - if: ${{ github.ref_name == 'master' }} - run: | - gh release upload v${{ needs.version.outputs.long }} ./output/meshtasticd-${{ needs.version.outputs.deb }}-src.zip - gh release upload v${{ needs.version.outputs.long }} ./output/platformio-deps-native-tft-${{ needs.version.outputs.long }}.zip - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - - release-firmware: - strategy: - fail-fast: false - matrix: - arch: - - esp32 - - esp32s3 - - esp32c3 - - esp32c6 - - nrf52840 - - rp2040 - - rp2350 - - stm32 - runs-on: ubuntu-latest - if: ${{ github.event_name == 'workflow_dispatch' }} - needs: [release-artifacts, version] - steps: - - name: Checkout - uses: actions/checkout@v5 - - - name: Setup Python - uses: actions/setup-python@v6 - with: - python-version: 3.x - - - uses: actions/download-artifact@v5 - with: - pattern: firmware-${{inputs.arch}}-${{ needs.version.outputs.long }} - merge-multiple: true - path: ./output - - - name: Display structure of downloaded files - run: ls -lR - - - name: Device scripts permissions - run: | - chmod +x ./output/device-install.sh - chmod +x ./output/device-update.sh - - - name: Zip firmware - run: zip -j -9 -r ./firmware-${{inputs.arch}}-${{ needs.version.outputs.long }}.zip ./output - - - uses: actions/download-artifact@v5 - with: - name: debug-elfs-${{inputs.arch}}-${{ needs.version.outputs.long }}.zip - merge-multiple: true - path: ./elfs - - - name: Zip debug elfs - run: zip -j -9 -r ./debug-elfs-${{inputs.arch}}-${{ needs.version.outputs.long }}.zip ./elfs - - # For diagnostics - - name: Display structure of downloaded files - run: ls -lR - - - name: Add bins and debug elfs to GitHub Release - # Only run when targeting master branch with workflow_dispatch - if: ${{ github.ref_name == 'master' }} - run: | - gh release upload v${{ needs.version.outputs.long }} ./firmware-${{inputs.arch}}-${{ needs.version.outputs.long }}.zip - gh release upload v${{ needs.version.outputs.long }} ./debug-elfs-${{inputs.arch}}-${{ needs.version.outputs.long }}.zip - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - - publish-firmware: - runs-on: ubuntu-24.04 - if: ${{ github.event_name == 'workflow_dispatch' }} - needs: [release-firmware, version] - env: - targets: |- - esp32,esp32s3,esp32c3,esp32c6,nrf52840,rp2040,rp2350,stm32 - steps: - - name: Checkout - uses: actions/checkout@v5 - - - name: Setup Python - uses: actions/setup-python@v6 - with: - python-version: 3.x - - - uses: actions/download-artifact@v5 - with: - pattern: firmware-{${{ env.targets }}}-${{ needs.version.outputs.long }} - merge-multiple: true - path: ./publish - - - name: Publish firmware to meshtastic.github.io - uses: peaceiris/actions-gh-pages@v4 - env: - # On event/* branches, use the event name as the destination prefix - DEST_PREFIX: ${{ contains(github.ref_name, 'event/') && format('{0}/', github.ref_name) || '' }} - with: - deploy_key: ${{ secrets.DIST_PAGES_DEPLOY_KEY }} - external_repository: meshtastic/meshtastic.github.io - publish_branch: master - publish_dir: ./publish - destination_dir: ${{ env.DEST_PREFIX }}firmware-${{ needs.version.outputs.long }} - keep_files: true - user_name: github-actions[bot] - user_email: github-actions[bot]@users.noreply.github.com - commit_message: ${{ needs.version.outputs.long }} - enable_jekyll: true diff --git a/.github/workflows/build_one_target.yml b/.github/workflows/build_one_target.yml index 3c83ce960..ba1d5080f 100644 --- a/.github/workflows/build_one_target.yml +++ b/.github/workflows/build_one_target.yml @@ -3,6 +3,7 @@ name: Build One Target on: workflow_dispatch: inputs: + # trunk-ignore(checkov/CKV_GHA_7) arch: type: choice options: @@ -19,11 +20,13 @@ on: type: string required: false description: Choose the target board, e.g. nrf52_promicro_diy_tcxo. If blank, will find available targets. - # find-target: # type: boolean # default: true # description: 'Find the available targets' + +permissions: read-all + jobs: find-targets: if: ${{ inputs.target == '' }} @@ -51,13 +54,13 @@ jobs: - name: Generate matrix id: jsonStep run: | - TARGETS=$(./bin/generate_ci_matrix.py ${{matrix.arch}} extra) + TARGETS=$(./bin/generate_ci_matrix.py ${{matrix.arch}} --level extra) echo "Name: $GITHUB_REF_NAME" >> $GITHUB_STEP_SUMMARY echo "Base: $GITHUB_BASE_REF" >> $GITHUB_STEP_SUMMARY echo "Arch: ${{matrix.arch}}" >> $GITHUB_STEP_SUMMARY echo "Ref: $GITHUB_REF" >> $GITHUB_STEP_SUMMARY echo "Targets:" >> $GITHUB_STEP_SUMMARY - echo $TARGETS | sed 's/[][]//g; s/", "/\n- /g; s/"//g; s/^/- /' >> $GITHUB_STEP_SUMMARY + echo $TARGETS >> $GITHUB_STEP_SUMMARY version: if: ${{ inputs.target != '' }} @@ -75,11 +78,9 @@ jobs: long: ${{ steps.version.outputs.long }} deb: ${{ steps.version.outputs.deb }} - build-arch: + build: if: ${{ inputs.target != '' && inputs.arch != 'native' }} needs: [version] - strategy: - fail-fast: false uses: ./.github/workflows/build_firmware.yml with: version: ${{ needs.version.outputs.long }} @@ -165,10 +166,8 @@ jobs: permissions: contents: write pull-requests: write - strategy: - fail-fast: false runs-on: ubuntu-latest - needs: [version, build-arch] + needs: [version, build] steps: - name: Checkout code uses: actions/checkout@v5 @@ -237,159 +236,3 @@ jobs: name: firmware-${{inputs.target}}-${{ needs.version.outputs.long }} description: "Download firmware-${{inputs.target}}-${{ needs.version.outputs.long }}.zip. This artifact will be available for 90 days from creation" github-token: ${{ secrets.GITHUB_TOKEN }} - - release-artifacts: - runs-on: ubuntu-latest - if: ${{ github.event_name == 'workflow_dispatch' && inputs.target != ''}} - outputs: - upload_url: ${{ steps.create_release.outputs.upload_url }} - needs: - - version - - gather-artifacts - - build-debian-src - - package-pio-deps-native-tft - steps: - - name: Checkout - uses: actions/checkout@v5 - - - name: Setup Python - uses: actions/setup-python@v6 - with: - python-version: 3.x - - - name: Create release - uses: softprops/action-gh-release@v2 - id: create_release - with: - draft: true - prerelease: true - name: Meshtastic Firmware ${{ needs.version.outputs.long }} Alpha - tag_name: v${{ needs.version.outputs.long }} - body: | - Autogenerated by github action, developer should edit as required before publishing... - - - name: Download source deb - uses: actions/download-artifact@v5 - with: - pattern: firmware-debian-${{ needs.version.outputs.deb }}~UNRELEASED-src - merge-multiple: true - path: ./output/debian-src - - - name: Download `native-tft` pio deps - uses: actions/download-artifact@v5 - with: - pattern: platformio-deps-native-tft-${{ needs.version.outputs.long }} - merge-multiple: true - path: ./output/pio-deps-native-tft - - - name: Zip Linux sources - working-directory: output - run: | - zip -j -9 -r ./meshtasticd-${{ needs.version.outputs.deb }}-src.zip ./debian-src - zip -9 -r ./platformio-deps-native-tft-${{ needs.version.outputs.long }}.zip ./pio-deps-native-tft - - # For diagnostics - - name: Display structure of downloaded files - run: ls -lR - - - name: Add Linux sources to GtiHub Release - # Only run when targeting master branch with workflow_dispatch - if: ${{ github.ref_name == 'master' }} - run: | - gh release upload v${{ needs.version.outputs.long }} ./output/meshtasticd-${{ needs.version.outputs.deb }}-src.zip - gh release upload v${{ needs.version.outputs.long }} ./output/platformio-deps-native-tft-${{ needs.version.outputs.long }}.zip - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - - release-firmware: - strategy: - fail-fast: false - runs-on: ubuntu-latest - if: ${{ github.event_name == 'workflow_dispatch' && inputs.target != ''}} - needs: [release-artifacts, version] - steps: - - name: Checkout - uses: actions/checkout@v5 - - - name: Setup Python - uses: actions/setup-python@v6 - with: - python-version: 3.x - - - uses: actions/download-artifact@v5 - with: - pattern: firmware-*-${{ needs.version.outputs.long }} - merge-multiple: true - path: ./output - - - name: Display structure of downloaded files - run: ls -lR - - - name: Device scripts permissions - run: | - chmod +x ./output/device-install.sh - chmod +x ./output/device-update.sh - - - name: Zip firmware - run: zip -j -9 -r ./firmware-${{inputs.target}}-${{ needs.version.outputs.long }}.zip ./output - - - uses: actions/download-artifact@v5 - with: - pattern: debug-elfs-*-${{ needs.version.outputs.long }}.zip - merge-multiple: true - path: ./elfs - - - name: Zip debug elfs - run: zip -j -9 -r ./debug-elfs-${{inputs.target}}-${{ needs.version.outputs.long }}.zip ./elfs - - # For diagnostics - - name: Display structure of downloaded files - run: ls -lR - - - name: Add bins and debug elfs to GitHub Release - # Only run when targeting master branch with workflow_dispatch - if: ${{ github.ref_name == 'master' }} - run: | - gh release upload v${{ needs.version.outputs.long }} ./firmware-${{inputs.target}}-${{ needs.version.outputs.long }}.zip - gh release upload v${{ needs.version.outputs.long }} ./debug-elfs-${{inputs.target}}-${{ needs.version.outputs.long }}.zip - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - - publish-firmware: - runs-on: ubuntu-24.04 - if: ${{ github.event_name == 'workflow_dispatch' && github.repository == 'meshtastic/firmware' && inputs.target != '' }} - needs: [release-firmware, version] - env: - targets: |- - esp32,esp32s3,esp32c3,esp32c6,nrf52840,rp2040,rp2350,stm32 - steps: - - name: Checkout - uses: actions/checkout@v5 - - - name: Setup Python - uses: actions/setup-python@v6 - with: - python-version: 3.x - - - uses: actions/download-artifact@v5 - with: - pattern: firmware-{${{ env.targets }}}-${{ needs.version.outputs.long }} - merge-multiple: true - path: ./publish - - - name: Publish firmware to meshtastic.github.io - uses: peaceiris/actions-gh-pages@v4 - env: - # On event/* branches, use the event name as the destination prefix - DEST_PREFIX: ${{ contains(github.ref_name, 'event/') && format('{0}/', github.ref_name) || '' }} - with: - deploy_key: ${{ secrets.DIST_PAGES_DEPLOY_KEY }} - external_repository: meshtastic/meshtastic.github.io - publish_branch: master - publish_dir: ./publish - destination_dir: ${{ env.DEST_PREFIX }}firmware-${{ needs.version.outputs.long }} - keep_files: true - user_name: github-actions[bot] - user_email: github-actions[bot]@users.noreply.github.com - commit_message: ${{ needs.version.outputs.long }} - enable_jekyll: true diff --git a/.github/workflows/main_matrix.yml b/.github/workflows/main_matrix.yml index f61e314a7..812990eca 100644 --- a/.github/workflows/main_matrix.yml +++ b/.github/workflows/main_matrix.yml @@ -29,17 +29,10 @@ jobs: setup: if: github.repository == 'meshtastic/firmware' strategy: - fail-fast: false + fail-fast: true matrix: arch: - - esp32 - - esp32s3 - - esp32c3 - - esp32c6 - - nrf52840 - - rp2040 - - rp2350 - - stm32 + - all - check runs-on: ubuntu-24.04 steps: @@ -59,19 +52,13 @@ jobs: if [[ "$GITHUB_HEAD_REF" == "" ]]; then TARGETS=$(./bin/generate_ci_matrix.py ${{matrix.arch}}) else - TARGETS=$(./bin/generate_ci_matrix.py ${{matrix.arch}} pr) + TARGETS=$(./bin/generate_ci_matrix.py ${{matrix.arch}} --level pr) fi - echo "Name: $GITHUB_REF_NAME Base: $GITHUB_BASE_REF Ref: $GITHUB_REF Targets: $TARGETS" - echo "${{matrix.arch}}=$(jq -cn --argjson environments "$TARGETS" '{board: $environments}')" >> $GITHUB_OUTPUT + echo "Name: $GITHUB_REF_NAME Base: $GITHUB_BASE_REF Ref: $GITHUB_REF" + echo "${{matrix.arch}}=$TARGETS" >> $GITHUB_OUTPUT + echo "$TARGETS" >> $GITHUB_STEP_SUMMARY outputs: - esp32: ${{ steps.jsonStep.outputs.esp32 }} - esp32s3: ${{ steps.jsonStep.outputs.esp32s3 }} - esp32c3: ${{ steps.jsonStep.outputs.esp32c3 }} - esp32c6: ${{ steps.jsonStep.outputs.esp32c6 }} - nrf52840: ${{ steps.jsonStep.outputs.nrf52840 }} - rp2040: ${{ steps.jsonStep.outputs.rp2040 }} - rp2350: ${{ steps.jsonStep.outputs.rp2350 }} - stm32: ${{ steps.jsonStep.outputs.stm32 }} + all: ${{ steps.jsonStep.outputs.all }} check: ${{ steps.jsonStep.outputs.check }} version: @@ -94,7 +81,8 @@ jobs: needs: setup strategy: fail-fast: false - matrix: ${{ fromJson(needs.setup.outputs.check) }} + matrix: + check: ${{ fromJson(needs.setup.outputs.check) }} runs-on: ubuntu-latest if: ${{ github.event_name != 'workflow_dispatch' && github.repository == 'meshtastic/firmware' }} @@ -103,96 +91,20 @@ jobs: - name: Build base id: base uses: ./.github/actions/setup-base - - name: Check ${{ matrix.board }} - run: bin/check-all.sh ${{ matrix.board }} + - name: Check ${{ matrix.check.board }} + run: bin/check-all.sh ${{ matrix.check.board }} - build-esp32: + build: needs: [setup, version] strategy: fail-fast: false - matrix: ${{ fromJson(needs.setup.outputs.esp32) }} + matrix: + build: ${{ fromJson(needs.setup.outputs.all) }} uses: ./.github/workflows/build_firmware.yml with: version: ${{ needs.version.outputs.long }} - pio_env: ${{ matrix.board }} - platform: esp32 - - build-esp32s3: - needs: [setup, version] - strategy: - fail-fast: false - matrix: ${{ fromJson(needs.setup.outputs.esp32s3) }} - uses: ./.github/workflows/build_firmware.yml - with: - version: ${{ needs.version.outputs.long }} - pio_env: ${{ matrix.board }} - platform: esp32s3 - - build-esp32c3: - needs: [setup, version] - strategy: - fail-fast: false - matrix: ${{ fromJson(needs.setup.outputs.esp32c3) }} - uses: ./.github/workflows/build_firmware.yml - with: - version: ${{ needs.version.outputs.long }} - pio_env: ${{ matrix.board }} - platform: esp32c3 - - build-esp32c6: - needs: [setup, version] - strategy: - fail-fast: false - matrix: ${{ fromJson(needs.setup.outputs.esp32c6) }} - uses: ./.github/workflows/build_firmware.yml - with: - version: ${{ needs.version.outputs.long }} - pio_env: ${{ matrix.board }} - platform: esp32c6 - - build-nrf52840: - needs: [setup, version] - strategy: - fail-fast: false - matrix: ${{ fromJson(needs.setup.outputs.nrf52840) }} - uses: ./.github/workflows/build_firmware.yml - with: - version: ${{ needs.version.outputs.long }} - pio_env: ${{ matrix.board }} - platform: nrf52840 - - build-rp2040: - needs: [setup, version] - strategy: - fail-fast: false - matrix: ${{ fromJson(needs.setup.outputs.rp2040) }} - uses: ./.github/workflows/build_firmware.yml - with: - version: ${{ needs.version.outputs.long }} - pio_env: ${{ matrix.board }} - platform: rp2040 - - build-rp2350: - needs: [setup, version] - strategy: - fail-fast: false - matrix: ${{ fromJson(needs.setup.outputs.rp2350) }} - uses: ./.github/workflows/build_firmware.yml - with: - version: ${{ needs.version.outputs.long }} - pio_env: ${{ matrix.board }} - platform: rp2350 - - build-stm32: - needs: [setup, version] - strategy: - fail-fast: false - matrix: ${{ fromJson(needs.setup.outputs.stm32) }} - uses: ./.github/workflows/build_firmware.yml - with: - version: ${{ needs.version.outputs.long }} - pio_env: ${{ matrix.board }} - platform: stm32 + pio_env: ${{ matrix.build.board }} + platform: ${{ matrix.build.platform }} build-debian-src: if: github.repository == 'meshtastic/firmware' @@ -203,7 +115,7 @@ jobs: secrets: inherit package-pio-deps-native-tft: - if: ${{ github.event_name == 'workflow_dispatch' }} + if: ${{ github.repository == 'meshtastic/firmware' && github.event_name == 'workflow_dispatch' }} uses: ./.github/workflows/package_pio_deps.yml with: pio_env: native-tft @@ -288,18 +200,7 @@ jobs: - rp2350 - stm32 runs-on: ubuntu-latest - needs: - [ - version, - build-esp32, - build-esp32s3, - build-esp32c3, - build-esp32c6, - build-nrf52840, - build-rp2040, - build-rp2350, - build-stm32, - ] + needs: [version, build] steps: - name: Checkout code uses: actions/checkout@v5 diff --git a/.github/workflows/merge_queue.yml b/.github/workflows/merge_queue.yml index 7f397ce18..79e8b0803 100644 --- a/.github/workflows/merge_queue.yml +++ b/.github/workflows/merge_queue.yml @@ -7,23 +7,13 @@ on: # Merge group is a special trigger that is used to trigger the workflow when a merge group is created. merge_group: -env: - FAIL_FAST_PER_ARCH: true - jobs: setup: strategy: fail-fast: true matrix: arch: - - esp32 - - esp32s3 - - esp32c3 - - esp32c6 - - nrf52840 - - rp2040 - - rp2350 - - stm32 + - all - check runs-on: ubuntu-24.04 steps: @@ -39,19 +29,12 @@ jobs: if [[ "$GITHUB_HEAD_REF" == "" ]]; then TARGETS=$(./bin/generate_ci_matrix.py ${{matrix.arch}}) else - TARGETS=$(./bin/generate_ci_matrix.py ${{matrix.arch}} pr) + TARGETS=$(./bin/generate_ci_matrix.py ${{matrix.arch}} --level pr) fi - echo "Name: $GITHUB_REF_NAME Base: $GITHUB_BASE_REF Ref: $GITHUB_REF Targets: $TARGETS" - echo "${{matrix.arch}}=$(jq -cn --argjson environments "$TARGETS" '{board: $environments}')" >> $GITHUB_OUTPUT + echo "Name: $GITHUB_REF_NAME Base: $GITHUB_BASE_REF Ref: $GITHUB_REF" + echo "${{matrix.arch}}=$TARGETS" >> $GITHUB_OUTPUT outputs: - esp32: ${{ steps.jsonStep.outputs.esp32 }} - esp32s3: ${{ steps.jsonStep.outputs.esp32s3 }} - esp32c3: ${{ steps.jsonStep.outputs.esp32c3 }} - esp32c6: ${{ steps.jsonStep.outputs.esp32c6 }} - nrf52840: ${{ steps.jsonStep.outputs.nrf52840 }} - rp2040: ${{ steps.jsonStep.outputs.rp2040 }} - rp2350: ${{ steps.jsonStep.outputs.rp2350 }} - stm32: ${{ steps.jsonStep.outputs.stm32 }} + all: ${{ steps.jsonStep.outputs.all }} check: ${{ steps.jsonStep.outputs.check }} version: @@ -73,7 +56,8 @@ jobs: needs: setup strategy: fail-fast: true - matrix: ${{ fromJson(needs.setup.outputs.check) }} + matrix: + check: ${{ fromJson(needs.setup.outputs.check) }} runs-on: ubuntu-latest if: ${{ github.event_name != 'workflow_dispatch' }} @@ -82,96 +66,19 @@ jobs: - name: Build base id: base uses: ./.github/actions/setup-base - - name: Check ${{ matrix.board }} - run: bin/check-all.sh ${{ matrix.board }} + - name: Check ${{ matrix.check.board }} + run: bin/check-all.sh ${{ matrix.check.board }} - build-esp32: + build: needs: [setup, version] strategy: - fail-fast: ${{ vars.FAIL_FAST_PER_ARCH == true }} - matrix: ${{ fromJson(needs.setup.outputs.esp32) }} + matrix: + build: ${{ fromJson(needs.setup.outputs.all) }} uses: ./.github/workflows/build_firmware.yml with: version: ${{ needs.version.outputs.long }} - pio_env: ${{ matrix.board }} - platform: esp32 - - build-esp32s3: - needs: [setup, version] - strategy: - fail-fast: ${{ vars.FAIL_FAST_PER_ARCH == true }} - matrix: ${{ fromJson(needs.setup.outputs.esp32s3) }} - uses: ./.github/workflows/build_firmware.yml - with: - version: ${{ needs.version.outputs.long }} - pio_env: ${{ matrix.board }} - platform: esp32s3 - - build-esp32c3: - needs: [setup, version] - strategy: - fail-fast: ${{ vars.FAIL_FAST_PER_ARCH == true }} - matrix: ${{ fromJson(needs.setup.outputs.esp32c3) }} - uses: ./.github/workflows/build_firmware.yml - with: - version: ${{ needs.version.outputs.long }} - pio_env: ${{ matrix.board }} - platform: esp32c3 - - build-esp32c6: - needs: [setup, version] - strategy: - fail-fast: ${{ vars.FAIL_FAST_PER_ARCH == true }} - matrix: ${{ fromJson(needs.setup.outputs.esp32c6) }} - uses: ./.github/workflows/build_firmware.yml - with: - version: ${{ needs.version.outputs.long }} - pio_env: ${{ matrix.board }} - platform: esp32c6 - - build-nrf52840: - needs: [setup, version] - strategy: - fail-fast: ${{ vars.FAIL_FAST_PER_ARCH == true }} - matrix: ${{ fromJson(needs.setup.outputs.nrf52840) }} - uses: ./.github/workflows/build_firmware.yml - with: - version: ${{ needs.version.outputs.long }} - pio_env: ${{ matrix.board }} - platform: nrf52840 - - build-rp2040: - needs: [setup, version] - strategy: - fail-fast: ${{ vars.FAIL_FAST_PER_ARCH == true }} - matrix: ${{ fromJson(needs.setup.outputs.rp2040) }} - uses: ./.github/workflows/build_firmware.yml - with: - version: ${{ needs.version.outputs.long }} - pio_env: ${{ matrix.board }} - platform: rp2040 - - build-rp2350: - needs: [setup, version] - strategy: - fail-fast: ${{ vars.FAIL_FAST_PER_ARCH == true }} - matrix: ${{ fromJson(needs.setup.outputs.rp2350) }} - uses: ./.github/workflows/build_firmware.yml - with: - version: ${{ needs.version.outputs.long }} - pio_env: ${{ matrix.board }} - platform: rp2350 - - build-stm32: - needs: [setup, version] - strategy: - fail-fast: ${{ vars.FAIL_FAST_PER_ARCH == true }} - matrix: ${{ fromJson(needs.setup.outputs.stm32) }} - uses: ./.github/workflows/build_firmware.yml - with: - version: ${{ needs.version.outputs.long }} - pio_env: ${{ matrix.board }} - platform: stm32 + pio_env: ${{ matrix.build.board }} + platform: ${{ matrix.build.platform }} build-debian-src: if: github.repository == 'meshtastic/firmware' @@ -260,18 +167,7 @@ jobs: - rp2350 - stm32 runs-on: ubuntu-latest - needs: - [ - version, - build-esp32, - build-esp32s3, - build-esp32c3, - build-esp32c6, - build-nrf52840, - build-rp2040, - build-rp2350, - build-stm32, - ] + needs: [version, build] steps: - name: Checkout code uses: actions/checkout@v5 diff --git a/bin/generate_ci_matrix.py b/bin/generate_ci_matrix.py index aaa76aa45..b4c18c05b 100755 --- a/bin/generate_ci_matrix.py +++ b/bin/generate_ci_matrix.py @@ -1,28 +1,32 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 """Generate the CI matrix.""" +import argparse import json -import sys -import random import re from platformio.project.config import ProjectConfig -options = sys.argv[1:] +parser = argparse.ArgumentParser(description="Generate the CI matrix") +parser.add_argument("platform", help="Platform to build for") +parser.add_argument( + "--level", + choices=["extra", "pr"], + nargs="*", + default=[], + help="Board level to build for (omit for full release boards)", +) +args = parser.parse_args() outlist = [] -if len(options) < 1: - print(json.dumps(outlist)) - exit(1) - cfg = ProjectConfig.get_instance() pio_envs = cfg.envs() # Gather all PlatformIO environments for filtering later all_envs = [] for pio_env in pio_envs: - env_build_flags = cfg.get(f"env:{pio_env}", 'build_flags') + env_build_flags = cfg.get(f"env:{pio_env}", "build_flags") env_platform = None for flag in env_build_flags: # Extract the platform from the build flags @@ -37,36 +41,35 @@ for pio_env in pio_envs: exit(1) # Store env details as a dictionary, and add to 'all_envs' list env = { - 'name': pio_env, - 'platform': env_platform, - 'board_level': cfg.get(f"env:{pio_env}", 'board_level', default=None), - 'board_check': bool(cfg.get(f"env:{pio_env}", 'board_check', default=False)) + "ci": {"board": pio_env, "platform": env_platform}, + "board_level": cfg.get(f"env:{pio_env}", "board_level", default=None), + "board_check": bool(cfg.get(f"env:{pio_env}", "board_check", default=False)), } all_envs.append(env) # Filter outputs based on options # Check is mutually exclusive with other options (except 'pr') -if "check" in options: +if "check" in args.platform: for env in all_envs: - if env['board_check']: - if "pr" in options: - if env['board_level'] == 'pr': - outlist.append(env['name']) + if env["board_check"]: + if "pr" in args.level: + if env["board_level"] == "pr": + outlist.append(env["ci"]) else: - outlist.append(env['name']) + outlist.append(env["ci"]) # Filter (non-check) builds by platform else: for env in all_envs: - if options[0] == env['platform']: + if args.platform == env["ci"]["platform"] or args.platform == "all": # Always include board_level = 'pr' - if env['board_level'] == 'pr': - outlist.append(env['name']) + if env["board_level"] == "pr": + outlist.append(env["ci"]) # Include board_level = 'extra' when requested - elif "extra" in options and env['board_level'] == "extra": - outlist.append(env['name']) + elif "extra" in args.level and env["board_level"] == "extra": + outlist.append(env["ci"]) # If no board level is specified, include in release builds (not PR) - elif "pr" not in options and not env['board_level']: - outlist.append(env['name']) + elif "pr" not in args.level and not env["board_level"]: + outlist.append(env["ci"]) # Return as a JSON list print(json.dumps(outlist)) diff --git a/variants/native/portduino-buildroot/platformio.ini b/variants/native/portduino-buildroot/platformio.ini index d1bd39e10..a3d0f4639 100644 --- a/variants/native/portduino-buildroot/platformio.ini +++ b/variants/native/portduino-buildroot/platformio.ini @@ -4,5 +4,6 @@ extends = portduino_base ; environment variable in the buildroot environment. build_flags = ${portduino_base.build_flags} -O0 -I variants/native/portduino-buildroot board = buildroot +board_level = extra lib_deps = ${portduino_base.lib_deps} build_src_filter = ${portduino_base.build_src_filter} \ No newline at end of file diff --git a/variants/native/portduino/platformio.ini b/variants/native/portduino/platformio.ini index 61eadb459..2ccdfbbfc 100644 --- a/variants/native/portduino/platformio.ini +++ b/variants/native/portduino/platformio.ini @@ -3,6 +3,7 @@ extends = portduino_base build_flags = ${portduino_base.build_flags} -I variants/native/portduino -I /usr/include board = cross_platform +board_level = extra lib_deps = ${portduino_base.lib_deps} melopero/Melopero RV3028@^1.1.0 @@ -50,7 +51,6 @@ build_type = release lib_deps = ${native_base.lib_deps} ${device-ui_base.lib_deps} -board_level = extra build_flags = ${native_base.build_flags} -Os -ffunction-sections -fdata-sections -Wl,--gc-sections -D RAM_SIZE=8192 -D USE_FRAMEBUFFER=1 @@ -79,7 +79,6 @@ build_type = debug lib_deps = ${native_base.lib_deps} ${device-ui_base.lib_deps} -board_level = extra build_flags = ${native_base.build_flags} -O0 -fsanitize=address -lX11 -linput -lxkbcommon -D DEBUG_HEAP -D RAM_SIZE=16384 From 68a2c4adda4236bcc3e260751617b5ca2106200d Mon Sep 17 00:00:00 2001 From: Tom Fifield Date: Tue, 7 Oct 2025 16:11:36 +1100 Subject: [PATCH 6/9] Run Integration test in simulator mode (#8232) (#8242) Co-authored-by: Jonathan Bennett --- .github/workflows/test_native.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test_native.yml b/.github/workflows/test_native.yml index 6b788f4c7..9cf1c9e53 100644 --- a/.github/workflows/test_native.yml +++ b/.github/workflows/test_native.yml @@ -40,7 +40,7 @@ jobs: - name: Integration test run: | - .pio/build/coverage/program & + .pio/build/coverage/program -s & PID=$! timeout 20 bash -c "until ls -al /proc/$PID/fd | grep socket; do sleep 1; done" echo "Simulator started, launching python test..." From b214f09ca1b8b63df62ee8733d7d4138ff6b3979 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Tue, 7 Oct 2025 16:34:00 +1100 Subject: [PATCH 7/9] Update meshtastic/web to v2.6.6 (#7583) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- bin/web.version | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/web.version b/bin/web.version index e46a05b19..952f449f1 100644 --- a/bin/web.version +++ b/bin/web.version @@ -1 +1 @@ -2.6.4 \ No newline at end of file +2.6.6 \ No newline at end of file From 1d5b3438363190226e5bb294f404766ab12299ef Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Tue, 7 Oct 2025 16:56:13 +1100 Subject: [PATCH 8/9] Update meshtastic/device-ui digest to e564d78 (#8235) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- platformio.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/platformio.ini b/platformio.ini index 4ae2da54f..2e101a8a2 100644 --- a/platformio.ini +++ b/platformio.ini @@ -120,7 +120,7 @@ lib_deps = [device-ui_base] lib_deps = # renovate: datasource=git-refs depName=meshtastic/device-ui packageName=https://github.com/meshtastic/device-ui gitBranch=master - https://github.com/meshtastic/device-ui/archive/f920b1273df750c2e3e01385d3ba30553b913afa.zip + https://github.com/meshtastic/device-ui/archive/e564d78ae1a7e9a225aaf4a73b1cb84c549f510f.zip ; Common libs for environmental measurements in telemetry module [environmental_base] From 5bcc47dddb6aa339e51651a05ff7f2bdc6e27bd2 Mon Sep 17 00:00:00 2001 From: Tom Fifield Date: Tue, 7 Oct 2025 22:00:09 +1100 Subject: [PATCH 9/9] Revert "develop --> Master" (#8244) --- .github/workflows/main_matrix.yml | 6 ++ src/Power.cpp | 7 +- src/gps/GPS.cpp | 8 +-- src/mesh/FloodingRouter.cpp | 102 ++++++++++++++++++----------- src/mesh/FloodingRouter.h | 15 ++--- src/mesh/NextHopRouter.cpp | 105 +++++++++++++++--------------- src/mesh/NextHopRouter.h | 6 +- src/mesh/PacketHistory.cpp | 1 + src/mesh/http/ContentHandler.cpp | 5 -- src/mesh/http/WebServer.cpp | 35 +--------- src/mesh/http/WebServer.h | 4 -- src/modules/TraceRouteModule.cpp | 64 ------------------ src/modules/TraceRouteModule.h | 6 -- src/power.h | 1 - 14 files changed, 136 insertions(+), 229 deletions(-) diff --git a/.github/workflows/main_matrix.yml b/.github/workflows/main_matrix.yml index 887bf3081..812990eca 100644 --- a/.github/workflows/main_matrix.yml +++ b/.github/workflows/main_matrix.yml @@ -27,6 +27,7 @@ on: jobs: setup: + if: github.repository == 'meshtastic/firmware' strategy: fail-fast: true matrix: @@ -41,6 +42,10 @@ jobs: python-version: 3.x cache: pip - run: pip install -U platformio + - name: Uncomment build epoch + shell: bash + run: | + sed -i 's/#-DBUILD_EPOCH=$UNIX_TIME/-DBUILD_EPOCH=$UNIX_TIME/' platformio.ini - name: Generate matrix id: jsonStep run: | @@ -57,6 +62,7 @@ jobs: check: ${{ steps.jsonStep.outputs.check }} version: + if: github.repository == 'meshtastic/firmware' runs-on: ubuntu-latest steps: - uses: actions/checkout@v5 diff --git a/src/Power.cpp b/src/Power.cpp index bb5d16d10..7de82b8d6 100644 --- a/src/Power.cpp +++ b/src/Power.cpp @@ -828,11 +828,8 @@ void Power::readPowerStatus() // Notify any status instances that are observing us const PowerStatus powerStatus2 = PowerStatus(hasBattery, usbPowered, isChargingNow, batteryVoltageMv, batteryChargePercent); - if (millis() > lastLogTime + 50 * 1000) { - LOG_DEBUG("Battery: usbPower=%d, isCharging=%d, batMv=%d, batPct=%d", powerStatus2.getHasUSB(), - powerStatus2.getIsCharging(), powerStatus2.getBatteryVoltageMv(), powerStatus2.getBatteryChargePercent()); - lastLogTime = millis(); - } + LOG_DEBUG("Battery: usbPower=%d, isCharging=%d, batMv=%d, batPct=%d", powerStatus2.getHasUSB(), powerStatus2.getIsCharging(), + powerStatus2.getBatteryVoltageMv(), powerStatus2.getBatteryChargePercent()); newStatus.notifyObservers(&powerStatus2); #ifdef DEBUG_HEAP if (lastheap != memGet.getFreeHeap()) { diff --git a/src/gps/GPS.cpp b/src/gps/GPS.cpp index 36ec7c580..0487d1fea 100644 --- a/src/gps/GPS.cpp +++ b/src/gps/GPS.cpp @@ -1589,12 +1589,8 @@ bool GPS::lookForLocation() #ifndef TINYGPS_OPTION_NO_STATISTICS if (reader.failedChecksum() > lastChecksumFailCount) { -// In a GPS_DEBUG build we want to log all of these. In production, we only care if there are many of them. -#ifndef GPS_DEBUG - if (reader.failedChecksum() > 4) -#endif - LOG_WARN("%u new GPS checksum failures, for a total of %u", reader.failedChecksum() - lastChecksumFailCount, - reader.failedChecksum()); + LOG_WARN("%u new GPS checksum failures, for a total of %u", reader.failedChecksum() - lastChecksumFailCount, + reader.failedChecksum()); lastChecksumFailCount = reader.failedChecksum(); } #endif diff --git a/src/mesh/FloodingRouter.cpp b/src/mesh/FloodingRouter.cpp index 032be241b..1d8ac247f 100644 --- a/src/mesh/FloodingRouter.cpp +++ b/src/mesh/FloodingRouter.cpp @@ -31,8 +31,33 @@ bool FloodingRouter::shouldFilterReceived(const meshtastic_MeshPacket *p) wasSeenRecently(p, true, nullptr, nullptr, &wasUpgraded); // Updates history; returns false when an upgrade is detected // Handle hop_limit upgrade scenario for rebroadcasters - if (wasUpgraded && perhapsHandleUpgradedPacket(p)) { - return true; // we handled it, so stop processing + // isRebroadcaster() is duplicated in perhapsRebroadcast(), but this avoids confusing log messages + if (wasUpgraded && isRebroadcaster() && iface && p->hop_limit > 0) { + // wasSeenRecently() reports false in upgrade cases so we handle replacement before the duplicate short-circuit + // If we overhear a duplicate copy of the packet with more hops left than the one we are waiting to + // rebroadcast, then remove the packet currently sitting in the TX queue and use this one instead. + uint8_t dropThreshold = p->hop_limit; // remove queued packets that have fewer hops remaining + if (iface->removePendingTXPacket(getFrom(p), p->id, dropThreshold)) { + LOG_DEBUG("Processing upgraded packet 0x%08x for rebroadcast with hop limit %d (dropping queued < %d)", p->id, + p->hop_limit, dropThreshold); + + if (nodeDB) + nodeDB->updateFrom(*p); +#if !MESHTASTIC_EXCLUDE_TRACEROUTE + if (traceRouteModule && p->which_payload_variant == meshtastic_MeshPacket_decoded_tag && + p->decoded.portnum == meshtastic_PortNum_TRACEROUTE_APP) + traceRouteModule->processUpgradedPacket(*p); +#endif + + perhapsRebroadcast(p); + + // We already enqueued the improved copy, so make sure the incoming packet stops here. + return true; + } + + // No queue entry was replaced by this upgraded copy, so treat it as a duplicate to avoid + // delivering the same packet to applications/phone twice with different hop limits. + seenRecently = true; } if (seenRecently) { @@ -45,10 +70,8 @@ bool FloodingRouter::shouldFilterReceived(const meshtastic_MeshPacket *p) if (isRepeated) { LOG_DEBUG("Repeated reliable tx"); // Check if it's still in the Tx queue, if not, we have to relay it again - if (!findInTxQueue(p->from, p->id)) { - reprocessPacket(p); + if (!findInTxQueue(p->from, p->id)) perhapsRebroadcast(p); - } } else { perhapsCancelDupe(p); } @@ -59,40 +82,6 @@ bool FloodingRouter::shouldFilterReceived(const meshtastic_MeshPacket *p) return Router::shouldFilterReceived(p); } -bool FloodingRouter::perhapsHandleUpgradedPacket(const meshtastic_MeshPacket *p) -{ - // isRebroadcaster() is duplicated in perhapsRebroadcast(), but this avoids confusing log messages - if (isRebroadcaster() && iface && p->hop_limit > 0) { - // If we overhear a duplicate copy of the packet with more hops left than the one we are waiting to - // rebroadcast, then remove the packet currently sitting in the TX queue and use this one instead. - uint8_t dropThreshold = p->hop_limit; // remove queued packets that have fewer hops remaining - if (iface->removePendingTXPacket(getFrom(p), p->id, dropThreshold)) { - LOG_DEBUG("Processing upgraded packet 0x%08x for rebroadcast with hop limit %d (dropping queued < %d)", p->id, - p->hop_limit, dropThreshold); - - reprocessPacket(p); - perhapsRebroadcast(p); - - rxDupe++; - // We already enqueued the improved copy, so make sure the incoming packet stops here. - return true; - } - } - - return false; -} - -void FloodingRouter::reprocessPacket(const meshtastic_MeshPacket *p) -{ - if (nodeDB) - nodeDB->updateFrom(*p); -#if !MESHTASTIC_EXCLUDE_TRACEROUTE - if (traceRouteModule && p->which_payload_variant == meshtastic_MeshPacket_decoded_tag && - p->decoded.portnum == meshtastic_PortNum_TRACEROUTE_APP) - traceRouteModule->processUpgradedPacket(*p); -#endif -} - bool FloodingRouter::roleAllowsCancelingDupe(const meshtastic_MeshPacket *p) { if (config.device.role == meshtastic_Config_DeviceConfig_Role_ROUTER || @@ -132,6 +121,41 @@ bool FloodingRouter::isRebroadcaster() config.device.rebroadcast_mode != meshtastic_Config_DeviceConfig_RebroadcastMode_NONE; } +void FloodingRouter::perhapsRebroadcast(const meshtastic_MeshPacket *p) +{ + if (!isToUs(p) && (p->hop_limit > 0) && !isFromUs(p)) { + if (p->id != 0) { + if (isRebroadcaster()) { + meshtastic_MeshPacket *tosend = packetPool.allocCopy(*p); // keep a copy because we will be sending it + + // Use shared logic to determine if hop_limit should be decremented + if (shouldDecrementHopLimit(p)) { + tosend->hop_limit--; // bump down the hop count + } else { + LOG_INFO("favorite-ROUTER/CLIENT_BASE-to-ROUTER/CLIENT_BASE flood: preserving hop_limit"); + } +#if USERPREFS_EVENT_MODE + if (tosend->hop_limit > 2) { + // if we are "correcting" the hop_limit, "correct" the hop_start by the same amount to preserve hops away. + tosend->hop_start -= (tosend->hop_limit - 2); + tosend->hop_limit = 2; + } +#endif + + tosend->next_hop = NO_NEXT_HOP_PREFERENCE; // this should already be the case, but just in case + + LOG_INFO("Rebroadcast received floodmsg"); + // Note: we are careful to resend using the original senders node id + send(tosend); + } else { + LOG_DEBUG("No rebroadcast: Role = CLIENT_MUTE or Rebroadcast Mode = NONE"); + } + } else { + LOG_DEBUG("Ignore 0 id broadcast"); + } + } +} + void FloodingRouter::sniffReceived(const meshtastic_MeshPacket *p, const meshtastic_Routing *c) { bool isAckorReply = (p->which_payload_variant == meshtastic_MeshPacket_decoded_tag) && diff --git a/src/mesh/FloodingRouter.h b/src/mesh/FloodingRouter.h index e8a2e9685..eaf71d294 100644 --- a/src/mesh/FloodingRouter.h +++ b/src/mesh/FloodingRouter.h @@ -27,6 +27,10 @@ */ class FloodingRouter : public Router { + private: + /* Check if we should rebroadcast this packet, and do so if needed */ + void perhapsRebroadcast(const meshtastic_MeshPacket *p); + public: /** * Constructor @@ -55,17 +59,6 @@ class FloodingRouter : public Router */ virtual void sniffReceived(const meshtastic_MeshPacket *p, const meshtastic_Routing *c) override; - /* Check if we should rebroadcast this packet, and do so if needed */ - virtual bool perhapsRebroadcast(const meshtastic_MeshPacket *p) = 0; - - /* Check if we should handle an upgraded packet (with higher hop_limit) - * @return true if we handled it (so stop processing) - */ - bool perhapsHandleUpgradedPacket(const meshtastic_MeshPacket *p); - - /* Call when we receive a packet that needs some reprocessing, but afterwards should be filtered */ - void reprocessPacket(const meshtastic_MeshPacket *p); - // Return false for roles like ROUTER which should always rebroadcast even when we've heard another rebroadcast of // the same packet bool roleAllowsCancelingDupe(const meshtastic_MeshPacket *p); diff --git a/src/mesh/NextHopRouter.cpp b/src/mesh/NextHopRouter.cpp index afdb4d096..0461d7eb6 100644 --- a/src/mesh/NextHopRouter.cpp +++ b/src/mesh/NextHopRouter.cpp @@ -43,8 +43,31 @@ bool NextHopRouter::shouldFilterReceived(const meshtastic_MeshPacket *p) &wasUpgraded); // Updates history; returns false when an upgrade is detected // Handle hop_limit upgrade scenario for rebroadcasters - if (wasUpgraded && perhapsHandleUpgradedPacket(p)) { - return true; // we handled it, so stop processing + // isRebroadcaster() is duplicated in perhapsRelay(), but this avoids confusing log messages + if (wasUpgraded && isRebroadcaster() && iface && p->hop_limit > 0) { + // Upgrade detection bypasses the duplicate short-circuit so we replace the queued packet before exiting + uint8_t dropThreshold = p->hop_limit; // remove queued packets that have fewer hops remaining + if (iface->removePendingTXPacket(getFrom(p), p->id, dropThreshold)) { + LOG_DEBUG("Processing upgraded packet 0x%08x for relay with hop limit %d (dropping queued < %d)", p->id, p->hop_limit, + dropThreshold); + + if (nodeDB) + nodeDB->updateFrom(*p); +#if !MESHTASTIC_EXCLUDE_TRACEROUTE + if (traceRouteModule && p->which_payload_variant == meshtastic_MeshPacket_decoded_tag && + p->decoded.portnum == meshtastic_PortNum_TRACEROUTE_APP) + traceRouteModule->processUpgradedPacket(*p); +#endif + + perhapsRelay(p); + + // We already enqueued the improved copy, so make sure the incoming packet stops here. + return true; + } + + // No queue entry was replaced by this upgraded copy, so treat it as a duplicate to avoid + // delivering the same packet to applications/phone twice with different hop limits. + seenRecently = true; } if (seenRecently) { @@ -59,20 +82,14 @@ bool NextHopRouter::shouldFilterReceived(const meshtastic_MeshPacket *p) if (wasFallback) { LOG_INFO("Fallback to flooding from relay_node=0x%x", p->relay_node); // Check if it's still in the Tx queue, if not, we have to relay it again - if (!findInTxQueue(p->from, p->id)) { - reprocessPacket(p); - perhapsRebroadcast(p); - } + if (!findInTxQueue(p->from, p->id)) + perhapsRelay(p); } else { bool isRepeated = p->hop_start > 0 && p->hop_start == p->hop_limit; // If repeated and not in Tx queue anymore, try relaying again, or if we are the destination, send the ACK again if (isRepeated) { - if (!findInTxQueue(p->from, p->id)) { - reprocessPacket(p); - if (!perhapsRebroadcast(p) && isToUs(p) && p->want_ack) { - sendAckNak(meshtastic_Routing_Error_NONE, getFrom(p), p->id, p->channel, 0); - } - } + if (!findInTxQueue(p->from, p->id) && !perhapsRelay(p) && isToUs(p) && p->want_ack) + sendAckNak(meshtastic_Routing_Error_NONE, getFrom(p), p->id, p->channel, 0); } else if (!weWereNextHop) { perhapsCancelDupe(p); // If it's a dupe, cancel relay if we were not explicitly asked to relay } @@ -90,14 +107,13 @@ void NextHopRouter::sniffReceived(const meshtastic_MeshPacket *p, const meshtast bool isAckorReply = (p->which_payload_variant == meshtastic_MeshPacket_decoded_tag) && (p->decoded.request_id != 0 || p->decoded.reply_id != 0); if (isAckorReply) { - // Update next-hop for the original transmitter of this successful transmission to the relay node, but ONLY if "from" - // is not 0 (means implicit ACK) and original packet was also relayed by this node, or we sent it directly to the - // destination + // Update next-hop for the original transmitter of this successful transmission to the relay node, but ONLY if "from" is + // not 0 (means implicit ACK) and original packet was also relayed by this node, or we sent it directly to the destination if (p->from != 0) { meshtastic_NodeInfoLite *origTx = nodeDB->getMeshNode(p->from); if (origTx) { - // Either relayer of ACK was also a relayer of the packet, or we were the *only* relayer and the ACK came - // directly from the destination + // Either relayer of ACK was also a relayer of the packet, or we were the *only* relayer and the ACK came directly + // from the destination bool wasAlreadyRelayer = wasRelayer(p->relay_node, p->decoded.request_id, p->to); bool weWereSoleRelayer = false; bool weWereRelayer = wasRelayer(ourRelayID, p->decoded.request_id, p->to, &weWereSoleRelayer); @@ -118,49 +134,34 @@ void NextHopRouter::sniffReceived(const meshtastic_MeshPacket *p, const meshtast } } - perhapsRebroadcast(p); + perhapsRelay(p); // handle the packet as normal Router::sniffReceived(p, c); } -/* Check if we should be rebroadcasting this packet if so, do so. */ -bool NextHopRouter::perhapsRebroadcast(const meshtastic_MeshPacket *p) +/* Check if we should be relaying this packet if so, do so. */ +bool NextHopRouter::perhapsRelay(const meshtastic_MeshPacket *p) { if (!isToUs(p) && !isFromUs(p) && p->hop_limit > 0) { - if (p->id != 0) { + if (p->next_hop == NO_NEXT_HOP_PREFERENCE || p->next_hop == nodeDB->getLastByteOfNodeNum(getNodeNum())) { if (isRebroadcaster()) { - if (p->next_hop == NO_NEXT_HOP_PREFERENCE || p->next_hop == nodeDB->getLastByteOfNodeNum(getNodeNum())) { - meshtastic_MeshPacket *tosend = packetPool.allocCopy(*p); // keep a copy because we will be sending it - LOG_INFO("Rebroadcast received message coming from %x", p->relay_node); + meshtastic_MeshPacket *tosend = packetPool.allocCopy(*p); // keep a copy because we will be sending it + LOG_INFO("Relaying received message coming from %x", p->relay_node); - // Use shared logic to determine if hop_limit should be decremented - if (shouldDecrementHopLimit(p)) { - tosend->hop_limit--; // bump down the hop count - } else { - LOG_INFO("favorite-ROUTER/CLIENT_BASE-to-ROUTER/CLIENT_BASE rebroadcast: preserving hop_limit"); - } -#if USERPREFS_EVENT_MODE - if (tosend->hop_limit > 2) { - // if we are "correcting" the hop_limit, "correct" the hop_start by the same amount to preserve hops away. - tosend->hop_start -= (tosend->hop_limit - 2); - tosend->hop_limit = 2; - } -#endif - - if (p->next_hop == NO_NEXT_HOP_PREFERENCE) { - FloodingRouter::send(tosend); - } else { - NextHopRouter::send(tosend); - } - - return true; + // Use shared logic to determine if hop_limit should be decremented + if (shouldDecrementHopLimit(p)) { + tosend->hop_limit--; // bump down the hop count + } else { + LOG_INFO("Router/CLIENT_BASE-to-favorite-router/CLIENT_BASE relay: preserving hop_limit"); } + + NextHopRouter::send(tosend); + + return true; } else { - LOG_DEBUG("No rebroadcast: Role = CLIENT_MUTE or Rebroadcast Mode = NONE"); + LOG_DEBUG("Not rebroadcasting: Role = CLIENT_MUTE or Rebroadcast Mode = NONE"); } - } else { - LOG_DEBUG("Ignore 0 id broadcast"); } } @@ -230,13 +231,13 @@ bool NextHopRouter::stopRetransmission(GlobalPacketId key) } } - // Regardless of whether or not we canceled this packet from the txQueue, remove it from our pending list so it - // doesn't get scheduled again. (This is the core of stopRetransmission.) + // Regardless of whether or not we canceled this packet from the txQueue, remove it from our pending list so it doesn't + // get scheduled again. (This is the core of stopRetransmission.) auto numErased = pending.erase(key); assert(numErased == 1); - // When we remove an entry from pending, always be sure to release the copy of the packet that was allocated in the - // call to startRetransmission. + // When we remove an entry from pending, always be sure to release the copy of the packet that was allocated in the call + // to startRetransmission. packetPool.release(p); return true; diff --git a/src/mesh/NextHopRouter.h b/src/mesh/NextHopRouter.h index c1df3596b..0022644e9 100644 --- a/src/mesh/NextHopRouter.h +++ b/src/mesh/NextHopRouter.h @@ -148,7 +148,7 @@ class NextHopRouter : public FloodingRouter */ uint8_t getNextHop(NodeNum to, uint8_t relay_node); - /** Check if we should be rebroadcasting this packet if so, do so. - * @return true if we did rebroadcast */ - bool perhapsRebroadcast(const meshtastic_MeshPacket *p) override; + /** Check if we should be relaying this packet if so, do so. + * @return true if we did relay */ + bool perhapsRelay(const meshtastic_MeshPacket *p); }; \ No newline at end of file diff --git a/src/mesh/PacketHistory.cpp b/src/mesh/PacketHistory.cpp index b4af707ae..49d581d9a 100644 --- a/src/mesh/PacketHistory.cpp +++ b/src/mesh/PacketHistory.cpp @@ -94,6 +94,7 @@ bool PacketHistory::wasSeenRecently(const meshtastic_MeshPacket *p, bool withUpd LOG_DEBUG("Packet History - Hop limit upgrade: packet 0x%08x from hop_limit=%d to hop_limit=%d", p->id, found->hop_limit, p->hop_limit); *wasUpgraded = true; + seenRecently = false; // Allow router processing but prevent duplicate app delivery } else if (wasUpgraded) { *wasUpgraded = false; // Initialize to false if not an upgrade } diff --git a/src/mesh/http/ContentHandler.cpp b/src/mesh/http/ContentHandler.cpp index 7b7ebb595..f87c6e3b0 100644 --- a/src/mesh/http/ContentHandler.cpp +++ b/src/mesh/http/ContentHandler.cpp @@ -148,8 +148,6 @@ void registerHandlers(HTTPServer *insecureServer, HTTPSServer *secureServer) void handleAPIv1FromRadio(HTTPRequest *req, HTTPResponse *res) { - if (webServerThread) - webServerThread->markActivity(); LOG_DEBUG("webAPI handleAPIv1FromRadio"); @@ -393,9 +391,6 @@ void handleFsDeleteStatic(HTTPRequest *req, HTTPResponse *res) void handleStatic(HTTPRequest *req, HTTPResponse *res) { - if (webServerThread) - webServerThread->markActivity(); - // Get access to the parameters ResourceParameters *params = req->getParams(); diff --git a/src/mesh/http/WebServer.cpp b/src/mesh/http/WebServer.cpp index 3a264fa5a..bf170de59 100644 --- a/src/mesh/http/WebServer.cpp +++ b/src/mesh/http/WebServer.cpp @@ -49,12 +49,6 @@ Preferences prefs; using namespace httpsserver; #include "mesh/http/ContentHandler.h" -static const uint32_t ACTIVE_THRESHOLD_MS = 5000; -static const uint32_t MEDIUM_THRESHOLD_MS = 30000; -static const int32_t ACTIVE_INTERVAL_MS = 50; -static const int32_t MEDIUM_INTERVAL_MS = 200; -static const int32_t IDLE_INTERVAL_MS = 1000; - static SSLCert *cert; static HTTPSServer *secureServer; static HTTPServer *insecureServer; @@ -181,32 +175,6 @@ WebServerThread::WebServerThread() : concurrency::OSThread("WebServer") if (!config.network.wifi_enabled && !config.network.eth_enabled) { disable(); } - lastActivityTime = millis(); -} - -void WebServerThread::markActivity() -{ - lastActivityTime = millis(); -} - -int32_t WebServerThread::getAdaptiveInterval() -{ - uint32_t currentTime = millis(); - uint32_t timeSinceActivity; - - if (currentTime >= lastActivityTime) { - timeSinceActivity = currentTime - lastActivityTime; - } else { - timeSinceActivity = (UINT32_MAX - lastActivityTime) + currentTime + 1; - } - - if (timeSinceActivity < ACTIVE_THRESHOLD_MS) { - return ACTIVE_INTERVAL_MS; - } else if (timeSinceActivity < MEDIUM_THRESHOLD_MS) { - return MEDIUM_INTERVAL_MS; - } else { - return IDLE_INTERVAL_MS; - } } int32_t WebServerThread::runOnce() @@ -221,7 +189,8 @@ int32_t WebServerThread::runOnce() ESP.restart(); } - return getAdaptiveInterval(); + // Loop every 5ms. + return (5); } void initWebServer() diff --git a/src/mesh/http/WebServer.h b/src/mesh/http/WebServer.h index e7a29a5a7..815d87432 100644 --- a/src/mesh/http/WebServer.h +++ b/src/mesh/http/WebServer.h @@ -10,17 +10,13 @@ void createSSLCert(); class WebServerThread : private concurrency::OSThread { - private: - uint32_t lastActivityTime = 0; public: WebServerThread(); uint32_t requestRestart = 0; - void markActivity(); protected: virtual int32_t runOnce() override; - int32_t getAdaptiveInterval(); }; extern WebServerThread *webServerThread; diff --git a/src/modules/TraceRouteModule.cpp b/src/modules/TraceRouteModule.cpp index 5bdde1919..fc2cc232b 100644 --- a/src/modules/TraceRouteModule.cpp +++ b/src/modules/TraceRouteModule.cpp @@ -21,11 +21,6 @@ void TraceRouteModule::alterReceivedProtobuf(meshtastic_MeshPacket &p, meshtasti { const meshtastic_Data &incoming = p.decoded; - // Update next-hops using returned route - if (incoming.request_id) { - updateNextHops(p, r); - } - // Insert unknown hops if necessary insertUnknownHops(p, r, !incoming.request_id); @@ -158,65 +153,6 @@ void TraceRouteModule::alterReceivedProtobuf(meshtastic_MeshPacket &p, meshtasti } } -void TraceRouteModule::updateNextHops(meshtastic_MeshPacket &p, meshtastic_RouteDiscovery *r) -{ - // E.g. if the route is A->B->C->D and we are B, we can set C as next-hop for C and D - // Similarly, if we are C, we can set D as next-hop for D - // If we are A, we can set B as next-hop for B, C and D - - // First check if we were the original sender or in the original route - int8_t nextHopIndex = -1; - if (isToUs(&p)) { - nextHopIndex = 0; // We are the original sender, next hop is first in route - } else { - // Check if we are in the original route - for (uint8_t i = 0; i < r->route_count; i++) { - if (r->route[i] == nodeDB->getNodeNum()) { - nextHopIndex = i + 1; // Next hop is the one after us - break; - } - } - } - - // If we are in the original route, update the next hops - if (nextHopIndex != -1) { - // For every node after us, we can set the next-hop to the first node after us - NodeNum nextHop; - if (nextHopIndex == r->route_count) { - nextHop = p.from; // We are the last in the route, next hop is destination - } else { - nextHop = r->route[nextHopIndex]; - } - - if (nextHop == NODENUM_BROADCAST) { - return; - } - uint8_t nextHopByte = nodeDB->getLastByteOfNodeNum(nextHop); - - // For the rest of the nodes in the route, set their next-hop - // Note: if we are the last in the route, this loop will not run - for (int8_t i = nextHopIndex; i < r->route_count; i++) { - NodeNum targetNode = r->route[i]; - maybeSetNextHop(targetNode, nextHopByte); - } - - // Also set next-hop for the destination node - maybeSetNextHop(p.from, nextHopByte); - } -} - -void TraceRouteModule::maybeSetNextHop(NodeNum target, uint8_t nextHopByte) -{ - if (target == NODENUM_BROADCAST) - return; - - meshtastic_NodeInfoLite *node = nodeDB->getMeshNode(target); - if (node && node->next_hop != nextHopByte) { - LOG_INFO("Updating next-hop for 0x%08x to 0x%02x based on traceroute", target, nextHopByte); - node->next_hop = nextHopByte; - } -} - void TraceRouteModule::processUpgradedPacket(const meshtastic_MeshPacket &mp) { if (mp.which_payload_variant != meshtastic_MeshPacket_decoded_tag || mp.decoded.portnum != meshtastic_PortNum_TRACEROUTE_APP) diff --git a/src/modules/TraceRouteModule.h b/src/modules/TraceRouteModule.h index dac422388..a069f7157 100644 --- a/src/modules/TraceRouteModule.h +++ b/src/modules/TraceRouteModule.h @@ -55,12 +55,6 @@ class TraceRouteModule : public ProtobufModule, // Call to add your ID to the route array of a RouteDiscovery message void appendMyIDandSNR(meshtastic_RouteDiscovery *r, float snr, bool isTowardsDestination, bool SNRonly); - // Update next-hops in the routing table based on the returned route - void updateNextHops(meshtastic_MeshPacket &p, meshtastic_RouteDiscovery *r); - - // Helper to update next-hop for a single node - void maybeSetNextHop(NodeNum target, uint8_t nextHopByte); - /* Call to print the route array of a RouteDiscovery message. Set origin to where the request came from. Set dest to the ID of its destination, or NODENUM_BROADCAST if it has not yet arrived there. */ diff --git a/src/power.h b/src/power.h index cdbdd3ea0..23eb95064 100644 --- a/src/power.h +++ b/src/power.h @@ -138,7 +138,6 @@ class Power : private concurrency::OSThread void reboot(); // open circuit voltage lookup table uint8_t low_voltage_counter; - int32_t lastLogTime = 0; #ifdef DEBUG_HEAP uint32_t lastheap; #endif