diff --git a/.travis.yml b/.travis.yml index b6675ce..8ff09af 100644 --- a/.travis.yml +++ b/.travis.yml @@ -12,4 +12,4 @@ install: - make deps script: - - make check check-sr NGSDN_TUTORIAL_SUDO=sudo + - make check check-sr check-gtp NGSDN_TUTORIAL_SUDO=sudo diff --git a/EXERCISE-7.md b/EXERCISE-7.md index 334551e..8182179 100644 --- a/EXERCISE-7.md +++ b/EXERCISE-7.md @@ -182,6 +182,9 @@ Re-start ONOS and Mininet, this time with the new IPv4 topology: $ make start-v4 +Wait about 1 minute before proceeding with the next steps, this will give time +to ONOS to start all of its subsystems. + ### 2. Load fabric pipeconf and segmentrouting Differently from previous exercises, instead of building and installing our own @@ -596,6 +599,6 @@ forwarding in a 2x2 leaf-spine fabric of IPv4 hosts. [netcfg.json]: mininet/netcfg.json [docker-compose.yml]: docker-compose.yml [pseudo-wire]: https://en.wikipedia.org/wiki/Pseudo-wire -[onos/apps/segmentrouting]: https://github.com/opennetworkinglab/onos/tree/onos-2.2/apps/segmentrouting -[onos/pipelines/fabric]: https://github.com/opennetworkinglab/onos/tree/onos-2.2/pipelines/fabric +[onos/apps/segmentrouting]: https://github.com/opennetworkinglab/onos/tree/2.2.2/apps/segmentrouting +[onos/pipelines/fabric]: https://github.com/opennetworkinglab/onos/tree/2.2.2/pipelines/fabric [fabric-tofino]: https://github.com/opencord/fabric-tofino diff --git a/EXERCISE-8.md b/EXERCISE-8.md new file mode 100644 index 0000000..24e51b9 --- /dev/null +++ b/EXERCISE-8.md @@ -0,0 +1,451 @@ +# Exercise 8: GTP termination with fabric.p4 + +The goal of this exercise is to learn how to use Trellis and fabric.p4 to +encapsulate and route packets using the GTP header as in a 4G/5G mobile core +Serving and Packet Gateway (SPGW). + +## Overview + +![Topology GTP](img/topo-gtp.png) + +The topology we will use in this exercise ([topo-gtp.py]) is a very simple one, +with the usual 2x2 fabric, but only two hosts emulating: + +* An eNodeB, i.e., a base station providing radio connectivity to User + Equipments (UEs) such as smartphones; +* An host on the Packet Data Network (PDN), i.e., any host on the Internet. + +To provide connectivity between the UEs and the Internet, we need to program our +fabric to act as a SPGW. The SPGW is a very complex and feature-rich component +of the mobile architecture that is used as a gateway between the base stations +and the Internet. Base stations aggregate UE traffic in GTP tunnels (one or more +per UE). The SPGW has many functions, among which that of terminating GTP +tunnels. In other words, it encapsulates downlink traffic (Internet→UE) in an +additional IPv4+UDP+GTP-U header, or it removes it for the uplink direction +(UE→Internet). + +In this exercise you will learn how to: + +* Program a switch with the `fabric-spgw` profile; +* Use Trellis to route traffic from the PDN to the eNodeB; +* Use the ONOS REST APIs to enable GTP encapsulation of downlink traffic on + `leaf1`. + +## Exercise steps + +### 1. Start ONOS and Mininet with GTP topology + +Since we want to use a different topology, we need to reset the current +environment (if currently active): + + $ make reset + +This command will stop ONOS and Mininet and remove any state associated with +them. + +Re-start ONOS and Mininet, this time with the new topology: + +**IMPORTANT:** please notice the `-gtp` suffix! + + $ make start-gtp + +Wait about 1 minute before proceeding with the next steps, this will give time +to ONOS to start all of its subsystems. + +### 2. Load additional apps + +As in the previous exercises, let's activate the `segmentrouting` and `fabric` +pipeconf app using the ONOS CLI (`make onos-cli`): + + onos> app activate fabric + onos> app activate segmentrouting + +Let's also activate a third app named `netcfghostprovider`: + + onos> app activate netcfghostprovider + +The `netcfghostprovider` (Network Config Host Provider ) is a built-in service +similar to the `hostprovider` (Host Location Provider) seen in the previous +exercises. It is responsible for registering hosts in the system, however, +differently from `hostprovider`, it does not listen for ARP or DHCP packet-ins +to automatically discover hosts. Instead, it uses information in the netcfg JSON +file, allowing operators to pre-populate the ONOS host store. + +This is useful for static topologies and to avoid relying on ARP, DHCP, and +other host-generated traffic. In this exercise, we use the netcfg JSON to +configure the location of the `enodeb` and `pdn` hosts. + +#### Verify apps + +The complete list of apps should look like the following (21 in total) + + onos> apps -s -a + * 18 org.onosproject.drivers 2.2.2 Default Drivers + * 37 org.onosproject.protocols.grpc 2.2.2 gRPC Protocol Subsystem + * 38 org.onosproject.protocols.gnmi 2.2.2 gNMI Protocol Subsystem + * 39 org.onosproject.generaldeviceprovider 2.2.2 General Device Provider + * 40 org.onosproject.protocols.gnoi 2.2.2 gNOI Protocol Subsystem + * 41 org.onosproject.drivers.gnoi 2.2.2 gNOI Drivers + * 42 org.onosproject.route-service 2.2.2 Route Service Server + * 43 org.onosproject.mcast 2.2.2 Multicast traffic control + * 44 org.onosproject.portloadbalancer 2.2.2 Port Load Balance Service + * 45 org.onosproject.segmentrouting 2.2.2 Segment Routing + * 53 org.onosproject.hostprovider 2.2.2 Host Location Provider + * 54 org.onosproject.lldpprovider 2.2.2 LLDP Link Provider + * 64 org.onosproject.protocols.p4runtime 2.2.2 P4Runtime Protocol Subsystem + * 65 org.onosproject.p4runtime 2.2.2 P4Runtime Provider + * 96 org.onosproject.netcfghostprovider 2.2.2 Network Config Host Provider + * 99 org.onosproject.drivers.gnmi 2.2.2 gNMI Drivers + * 100 org.onosproject.drivers.p4runtime 2.2.2 P4Runtime Drivers + * 101 org.onosproject.pipelines.basic 2.2.2 Basic Pipelines + * 102 org.onosproject.drivers.stratum 2.2.2 Stratum Drivers + * 103 org.onosproject.drivers.bmv2 2.2.2 BMv2 Drivers + * 111 org.onosproject.pipelines.fabric 2.2.2 Fabric Pipeline + * 164 org.onosproject.gui2 2.2.2 ONOS GUI2 + +#### Verify pipeconfs + +All `fabric` pipeconf profiles should have been registered by now. Take a note +on the full ID of the one with SPGW capabilities (`fabric-spgw`), you will need +this ID in the next step. + + onos> pipeconfs + id=org.onosproject.pipelines.fabric-full, behaviors=[PortStatisticsDiscovery, PiPipelineInterpreter, Pipeliner, IntProgrammable], extensions=[P4_INFO_TEXT, BMV2_JSON, CPU_PORT_TXT] + id=org.onosproject.pipelines.int, behaviors=[PiPipelineInterpreter, Pipeliner, PortStatisticsDiscovery, IntProgrammable], extensions=[P4_INFO_TEXT, BMV2_JSON] + id=org.onosproject.pipelines.fabric-spgw-int, behaviors=[PortStatisticsDiscovery, PiPipelineInterpreter, Pipeliner, IntProgrammable], extensions=[P4_INFO_TEXT, BMV2_JSON, CPU_PORT_TXT] + id=org.onosproject.pipelines.fabric, behaviors=[PortStatisticsDiscovery, PiPipelineInterpreter, Pipeliner], extensions=[P4_INFO_TEXT, BMV2_JSON, CPU_PORT_TXT] + id=org.onosproject.pipelines.fabric-bng, behaviors=[PortStatisticsDiscovery, PiPipelineInterpreter, Pipeliner, BngProgrammable], extensions=[P4_INFO_TEXT, BMV2_JSON, CPU_PORT_TXT] + id=org.onosproject.pipelines.fabric-spgw, behaviors=[PortStatisticsDiscovery, PiPipelineInterpreter, Pipeliner], extensions=[P4_INFO_TEXT, BMV2_JSON, CPU_PORT_TXT] + id=org.onosproject.pipelines.fabric-int, behaviors=[PortStatisticsDiscovery, PiPipelineInterpreter, Pipeliner, IntProgrammable], extensions=[P4_INFO_TEXT, BMV2_JSON, CPU_PORT_TXT] + id=org.onosproject.pipelines.basic, behaviors=[PiPipelineInterpreter, Pipeliner, PortStatisticsDiscovery], extensions=[P4_INFO_TEXT, BMV2_JSON] + +### 3. Modify and push netcfg to use fabric-spgw profile + +Up until now, we have used topologies where all switches were configured with +the same pipeconf, and so the same P4 program. + +In this exercise, we want all switches to run with the basic `fabric` profile, +while `leaf1` should act as the SPGW, and so we want it programmed with the +`fabric-spgw` profile. + +#### Modify netcfg JSON + +Let's modify the netcfg JSON to use the `fabric-spgw` profile on switch +`leaf1`. + +1. Open up file [netcfg-gtp.json] and look for the configuration of `leaf1` + in thw `"devices"` block. It should look like this: + + ``` + "devices": { + "device:leaf1": { + "basic": { + "managementAddress": "grpc://mininet:50001?device_id=1", + "driver": "stratum-bmv2", + "pipeconf": "org.onosproject.pipelines.fabric", + ... + ``` + +2. Modify the `pipeconf` property to use the full ID of the `fabric-spgw` + profile obtained in the previous step. + +3. Save the file. + +#### Push netcfg to ONOS + +On a terminal window, type: + +**IMPORTANT**: please notice the `-gtp` suffix! + + $ make netcfg-gtp + +Use the ONOS CLI (`make onos-cli`) to verify that all 4 switches are connected +to ONOS and provisioned with the right pipeconf/profile. + + onos> devices -s + id=device:leaf1, available=true, role=MASTER, type=SWITCH, driver=stratum-bmv2:org.onosproject.pipelines.fabric-spgw + id=device:leaf2, available=true, role=MASTER, type=SWITCH, driver=stratum-bmv2:org.onosproject.pipelines.fabric + id=device:spine1, available=true, role=MASTER, type=SWITCH, driver=stratum-bmv2:org.onosproject.pipelines.fabric + id=device:spine2, available=true, role=MASTER, type=SWITCH, driver=stratum-bmv2:org.onosproject.pipelines.fabric + +Make sure `leaf1` has driver with the `fabric-sgw` pipeconf, while all other +switches should have the basic `fabric` pipeconf. + +##### Troubleshooting + +If `leaf1` does NOT have `available=true`, it probably means that you have +inserted the wrong pipeconf ID in [netcfg-gtp.json] and ONOS is not able to +perform the initial provisioning. + +Check the ONOS log (`make onos-log`) for possible errors. Remember from the +previous exercise that some errors are expected (e.g., for unsupported +`PSEUDO_WIRE` flow objectives). If you see an error like this: + + ERROR [DeviceTaskExecutor] Unable to complete task CONNECTION_SETUP for device:leaf1: pipeconf ... not registered + +It means you have to go to the previous step to correct your pipeconf ID. Modify +the [netcfg-gtp.json] file and push it again using `make netcfg-gtp`. Use the +ONOS CLI and log to make sure the issue is fixed before proceeding. + +#### Check configuration in ONOS + +Check the interface configuration. In this topology we want `segmentrouting` to +forward traffic based on two IP subnets: + + onos> interfaces + leaf1-3: port=device:leaf1/3 ips=[10.0.100.254/24] mac=00:AA:00:00:00:01 vlanUntagged=100 + leaf2-3: port=device:leaf2/3 ips=[10.0.200.254/24] mac=00:AA:00:00:00:02 vlanUntagged=200 + +Check that the `enodeb` and `pdn` hosts have been discovered: + + onos> hosts + id=00:00:00:00:00:10/None, mac=00:00:00:00:00:10, locations=[device:leaf1/3], auxLocations=null, vlan=None, ip(s)=[10.0.100.1], ..., name=enodeb, ..., provider=host:org.onosproject.netcfghost, configured=true + id=00:00:00:00:00:20/None, mac=00:00:00:00:00:20, locations=[device:leaf2/3], auxLocations=null, vlan=None, ip(s)=[10.0.200.1], ..., name=pdn, ..., provider=host:org.onosproject.netcfghost, configured=true + +`provider=host:org.onosproject.netcfghost` and `configured=true` are indications +that the host entry was created by `netcfghostprovider`. + +### 4. Verify IP connectivity between PDN and eNodeB + +Since the two hosts have been already discovered, they should be pingable. + +Using the Mininet CLI (`make mn-cli`) start a ping between `enodeb` and `pdn`: + + mininet> enodeb ping pdn + PING 10.0.200.1 (10.0.200.1) 56(84) bytes of data. + 64 bytes from 10.0.200.1: icmp_seq=1 ttl=62 time=1053 ms + 64 bytes from 10.0.200.1: icmp_seq=2 ttl=62 time=49.0 ms + 64 bytes from 10.0.200.1: icmp_seq=3 ttl=62 time=9.63 ms + ... + +### 5. Start PDN and eNodeB processes + +We have created two Python scripts to emulate the PDN sending downlink +traffic to the UEs, and the eNodeB, expecting to receive the +same traffic but GTP-encapsulated. + +On a new terminal window, start the [send-udp.py] script on the `pdn` host: + + $ util/mn-cmd pdn /mininet/send-udp.py + Sending 5 UDP packets per second to 17.0.0.1... + +[util/mn-cmd] is a convenience script to run commands on mininet hosts when +using multiple terminal windows. + +[mininet/send-udp.py][send-udp.py] generates packets with destination +IPv4 address `17.0.0.1` (UE address). In the rest of the exercise we +will configure Trellis to route these packets through switch `leaf1`, and +we will insert a flow rule in this switch to perform the GTP +encapsulation. For now, this traffic will be dropped at `leaf2`. + +On a second terminal window, start the [recv-gtp.py] script on the `enodeb` +host: + +``` +$ util/mn-cmd enodeb /mininet/recv-gtp.py +Will print a line for each UDP packet received... +``` + +[mininet/recv-gtp.py][recv-gtp.py] simply sniffs packets received and prints +them on screen, informing if the packet is GTP encapsulated or not. You should +see no packets being printed for the moment. + +#### Use ONOS UI to visualize traffic + +Using the ONF Cloud Tutorial Portal, access the ONOS UI. +If you are using the tutorial VM, open up a browser (e.g. Firefox) to +. + +When asked, use the username `onos` and password `rocks`. + +To show hosts, press H. To show real-time link utilization, press +A multiple times until showing "Port stats (packets / second)". + +You should see traffic (5 pps) on the link between the `pdn` host and `leaf2`, +but not on other links. **Packets are dropped at switch `leaf2` as this switch +does not know how to route packets with IPv4 destination `17.0.0.1`.** + +### 6. Install route for UE subnet and debug table entries + +Using the ONOS CLI (`make onos-cli`), type the following command to add a route +for the UE subnet (`17.0.0.0/24`) with next hop the `enodeb` (`10.0.100.1`): + + onos> route-add 17.0.0.0/24 10.0.100.1 + +Check that the new route has been successfully added: + +``` +onos> routes +B: Best route, R: Resolved route + +Table: ipv4 +B R Network Next Hop Source (Node) +> * 17.0.0.0/24 10.0.100.1 STATIC (-) + Total: 1 +... +``` + +Since `10.0.100.1` is a known host to ONOS, i.e., we know its location in the +topology (see `*` under `R` column, which stands for "Resolved route",) +`segmentrouting` uses this information to compute paths and install the +necessary table entries to forward packets with IPv4 destination address +matching `17.0.0.0/24`. + +Open up the terminal window with the [recv-gtp.py] script on the `enodeb` host, +you should see the following output: + + [...] 691 bytes: 10.0.200.1 -> 17.0.0.1, is_gtp_encap=False + Ether / IP / UDP 10.0.200.1:80 > 17.0.0.1:400 / Raw + .... + +These lines indicate that a packet has been received at the eNodeB. The static +route is working! However, there's no trace of GTP headers, yet. We'll get back +to this soon, but for now, let's take a look at table entries in `fabric.p4`. + +Feel free to also check on the ONOS UI to see packets forwarded across the +spines, and delivered to the eNodeB (the next hop of our static route). + +#### Debug fabric.p4 table entries + +You can verify that the table entries for the static route have been added to +the switches by "grepping" the output of the ONOS CLI `flows` command, for +example for `leaf2`: + + onos> flows -s any device:leaf2 | grep "17.0.0.0" + ADDED, bytes=0, packets=0, table=FabricIngress.forwarding.routing_v4, priority=48010, selector=[IPV4_DST:17.0.0.0/24], treatment=[immediate=[FabricIngress.forwarding.set_next_id_routing_v4(next_id=0xd)]] + + +One entry has been `ADDED` to table `FabricIngress.forwarding.routing_v4` with +`next_id=0xd`. + +Let's grep flow rules for `next_id=0xd`: + + onos> flows -s any device:leaf2 | grep "next_id=0xd" + ADDED, bytes=0, packets=0, table=FabricIngress.forwarding.routing_v4, priority=48010, selector=[IPV4_DST:17.0.0.0/24], treatment=[immediate=[FabricIngress.forwarding.set_next_id_routing_v4(next_id=0xd)]] + ADDED, bytes=0, packets=0, table=FabricIngress.forwarding.routing_v4, priority=48010, selector=[IPV4_DST:10.0.100.0/24], treatment=[immediate=[FabricIngress.forwarding.set_next_id_routing_v4(next_id=0xd)]] + ADDED, bytes=1674881, packets=2429, table=FabricIngress.next.hashed, priority=0, selector=[next_id=0xd], treatment=[immediate=[GROUP:0xd]] + ADDED, bytes=1674881, packets=2429, table=FabricIngress.next.next_vlan, priority=0, selector=[next_id=0xd], treatment=[immediate=[FabricIngress.next.set_vlan(vlan_id=0xffe)]] + +We can notice that another route shares the same next ID (`10.0.100.0/24` from +the to interface config for `leaf1`), and that the next ID points to a group +with the same value (`GROUP:0xd`). + +Let's take a look at this specific group: + + onos> groups any device:leaf2 | grep "0xd" + id=0xd, state=ADDED, type=SELECT, bytes=0, packets=0, appId=org.onosproject.segmentrouting, referenceCount=0 + id=0xd, bucket=1, bytes=0, packets=0, weight=1, actions=[FabricIngress.next.mpls_routing_hashed(dmac=0xbb00000001, port_num=0x1, smac=0xaa00000002, label=0x65)] + id=0xd, bucket=2, bytes=0, packets=0, weight=1, actions=[FabricIngress.next.mpls_routing_hashed(dmac=0xbb00000002, port_num=0x2, smac=0xaa00000002, label=0x65)] + +This `SELECT` group is used to hash traffic to the spines (i.e., ECMP) and to +push an MPLS label with hex value`0x65`, or 101 in base 10. + +Spine switches will use this label to forward packets. Can you tell what 101 +identifies here? Hint: take a look at [netcfg-gtp.json] + +### 7. Use ONOS REST APIs to create GTP flow rule + +Finally, it is time to instruct `leaf1` to encapsulate traffic with a GTP tunnel +header. To do this, we will insert a special table entry in the "SPGW portion" +of the [fabric.p4] pipeline, implemented in file [spgw.p4]. Specifically, we +will insert one entry in the [dl_sess_lookup] table, responsible for handling +downlink traffic (i.e., with match on the UE IPv4 address) by setting the GTP +tunnel info which will be used to perform the encapsulation (action +`set_dl_sess_info`). + +**NOTE:** this version of spgw.p4 is from ONOS v2.2.2 (the same used in this +tutorial). The P4 code might have changed recently, and you might see different +tables if you open up the same file in a different branch. + +To insert the flow rule, we will not use an app (which we would have to +implement from scratch!), but instead, we will use the ONOS REST APIs. To learn +more about the available APIs, use the following URL to open up the +automatically generated API docs from your running ONOS instance: + + +The specific API we will use to create new flow rules is `POST /flows`, +described here: + + +This API takes a JSON request. The file +[mininet/flowrule-gtp.json][flowrule-gtp.json] specifies the flow rule we +want to create. This file is incomplete, and you need to modify it before we can +send it via the REST APIs. + +1. Open up file [mininet/flowrule-gtp.json][flowrule-gtp.json]. + + Look for the `"selector"` section that specifies the match fields: + ``` + "selector": { + "criteria": [ + { + "type": "IPV4_DST", + "ip": "/32" + } + ] + }, + ... + ``` + +2. Modify the `ip` field to match on the IP address of the UE (17.0.0.1). + + Since the `dl_sess_lookup` table performs exact match on the IPv4 + address, make sure to specify the match field with `/32` prefix length. + + Also, note that the `set_dl_sess_info` action, is specified as + `PROTOCOL_INDEPENDENT`. This is the ONOS terminology to describe custom + flow rule actions. For this reason, the action parameters are specified + as byte strings in hexadecimal format: + + * `"teid": "BEEF"`: GTP tunnel identifier (48879 in decimal base) + * `"s1u_enb_addr": "0a006401"`: destination IPv4 address of the + GTP tunnel, i.e., outer IPv4 header (10.0.100.1). This is the address of + the eNodeB. + * `"s1u_sgw_addr": "0a0064fe"`: source address of the GTP outer IPv4 + header (10.0.100.254). This is the address of the switch interface + configured in Trellis. + +3. Save the [flowrule-gtp.json] file. + +4. Push the flow rule to ONOS using the REST APIs. + + On a terminal window, type the following commands: + + ``` + $ make flowrule-gtp + ``` + + This command uses `cURL` to push the flow rule JSON file to the ONOS REST API + endpoint. If the flow rule has been created correctly, you should see an + output similar to the following one: + + ``` + *** Pushing flowrule-gtp.json to ONOS... + curl --fail -sSL --user onos:rocks --noproxy localhost -X POST -H 'Content-Type:application/json' \ + http://localhost:8181/onos/v1/flows?appId=rest-api -d@./mininet/flowrule-gtp.json + {"flows":[{"deviceId":"device:leaf1","flowId":"54606147878186474"}]} + + ``` + +5. Check the eNodeB process. You should see that the received packets + are now GTP encapsulated! + + ``` + [...] 727 bytes: 10.0.100.254 -> 10.0.100.1, is_gtp_encap=True + Ether / IP / UDP / GTP_U_Header / IP / UDP 10.0.200.1:80 > 17.0.0.1:400 / Raw + ``` + +## Congratulations! + +You have completed the eighth exercise! You were able to use fabric.p4 and +Trellis to encapsulate traffic in GTP tunnels and to route it across the fabric. + +[topo-gtp.py]: mininet/topo-gtp.py +[netcfg-gtp.json]: mininet/netcfg-gtp.json +[send-udp.py]: mininet/send-udp.py +[recv-gtp.py]: mininet/recv-gtp.py +[util/mn-cmd]: util/mn-cmd +[fabric.p4]: https://github.com/opennetworkinglab/onos/blob/2.2.2/pipelines/fabric/impl/src/main/resources/fabric.p4 +[spgw.p4]: https://github.com/opennetworkinglab/onos/blob/2.2.2/pipelines/fabric/impl/src/main/resources/include/spgw.p4 +[dl_sess_lookup]: https://github.com/opennetworkinglab/onos/blob/2.2.2/pipelines/fabric/impl/src/main/resources/include/spgw.p4#L70 +[flowrule-gtp.json]: mininet/flowrule-gtp.json diff --git a/Makefile b/Makefile index cb1da70..3fd21fa 100644 --- a/Makefile +++ b/Makefile @@ -19,12 +19,10 @@ _docker_pull_all: docker tag ${P4RT_SH_IMG}@${P4RT_SH_SHA} ${P4RT_SH_IMG} docker pull ${P4C_IMG}@${P4C_SHA} docker tag ${P4C_IMG}@${P4C_SHA} ${P4C_IMG} - docker pull ${MN_STRATUM_IMG}@${MN_STRATUM_SHA} - docker tag ${MN_STRATUM_IMG}@${MN_STRATUM_SHA} ${MN_STRATUM_IMG} + docker pull ${STRATUM_BMV2_IMG}@${STRATUM_BMV2_SHA} + docker tag ${STRATUM_BMV2_IMG}@${STRATUM_BMV2_SHA} ${STRATUM_BMV2_IMG} docker pull ${MVN_IMG}@${MVN_SHA} docker tag ${MVN_IMG}@${MVN_SHA} ${MVN_IMG} - docker pull ${PTF_IMG}@${PTF_SHA} - docker tag ${PTF_IMG}@${PTF_SHA} ${PTF_IMG} docker pull ${GNMI_CLI_IMG}@${GNMI_CLI_SHA} docker tag ${GNMI_CLI_IMG}@${GNMI_CLI_SHA} ${GNMI_CLI_IMG} docker pull ${YANG_IMG}@${YANG_SHA} @@ -45,6 +43,9 @@ start: _start start-v4: NGSDN_TOPO_PY := topo-v4.py start-v4: _start +start-gtp: NGSDN_TOPO_PY := topo-gtp.py +start-gtp: _start + stop: $(info *** Stopping ONOS and Mininet...) @NGSDN_TOPO_PY=foo docker-compose down -t0 @@ -82,6 +83,21 @@ netcfg: _netcfg netcfg-sr: NGSDN_NETCFG_JSON := netcfg-sr.json netcfg-sr: _netcfg +netcfg-gtp: NGSDN_NETCFG_JSON := netcfg-gtp.json +netcfg-gtp: _netcfg + +flowrule-gtp: + $(info *** Pushing flowrule-gtp.json to ONOS...) + ${onos_curl} -X POST -H 'Content-Type:application/json' \ + ${onos_url}/v1/flows?appId=rest-api -d@./mininet/flowrule-gtp.json + @echo + +flowrule-clean: + $(info *** Removing all flows installed via REST APIs...) + ${onos_curl} -X DELETE -H 'Content-Type:application/json' \ + ${onos_url}/v1/flows/application/rest-api + @echo + reset: stop -$(NGSDN_TUTORIAL_SUDO) rm -rf ./tmp @@ -101,7 +117,7 @@ p4-build: p4src/main.p4 @echo "*** P4 program compiled successfully! Output files are in p4src/build" p4-test: - @cd ptf && PTF_DOCKER_IMG=$(PTF_IMG) ./run_tests $(TEST) + @cd ptf && PTF_DOCKER_IMG=$(STRATUM_BMV2_IMG) ./run_tests $(TEST) _copy_p4c_out: $(info *** Copying p4c outputs to app resources...) @@ -132,9 +148,6 @@ app-uninstall: app-reload: app-uninstall app-install -mn-single: - docker run --privileged --rm -it -v /tmp/mn-stratum:/tmp -p 50001:50001 ${MN_STRATUM_IMG} - yang-tools: docker run --rm -it -v ${curr_dir}/yang/demo-port.yang:/models/demo-port.yang ${YANG_IMG} @@ -147,6 +160,7 @@ solution-apply: rsync -r solution/ ./ solution-revert: + test -d working_copy $(NGSDN_TUTORIAL_SUDO) rm -rf ./app/* $(NGSDN_TUTORIAL_SUDO) rm -rf ./p4src/* $(NGSDN_TUTORIAL_SUDO) rm -rf ./ptf/* @@ -229,3 +243,25 @@ check-sr: util/mn-cmd h4 ping -c 1 172.16.2.1 make stop make solution-revert + +check-gtp: + make reset + make start-gtp + sleep 45 + util/onos-cmd app activate segmentrouting + util/onos-cmd app activate pipelines.fabric + util/onos-cmd app activate netcfghostprovider + sleep 15 + make solution-apply + make netcfg-gtp + sleep 20 + util/mn-cmd enodeb ping -c 1 10.0.100.254 + util/mn-cmd pdn ping -c 1 10.0.200.254 + util/onos-cmd route-add 17.0.0.0/24 10.0.100.1 + make flowrule-gtp + # util/mn-cmd requires a TTY because it uses docker -it option + # hence we use screen for putting it in the background + screen -d -m util/mn-cmd pdn /mininet/send-udp.py + util/mn-cmd enodeb /mininet/recv-gtp.py -e + make stop + make solution-revert diff --git a/README.md b/README.md index 238e08b..bcad2d0 100644 --- a/README.md +++ b/README.md @@ -168,7 +168,8 @@ Click on the exercise name to see the instructions: 4. [Enabling ONOS built-in services](./EXERCISE-4.md) 5. [Implementing IPv6 routing with ECMP](./EXERCISE-5.md) 6. [Implementing SRv6](./EXERCISE-6.md) - 6. [Trellis Basics](./EXERCISE-7.md) + 7. [Trellis Basics](./EXERCISE-7.md) + 8. [GTP termination with fabric.p4](./EXERCISE-8.md) ## Solutions diff --git a/docker-compose.yml b/docker-compose.yml index 1865584..4ead30e 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -2,7 +2,7 @@ version: "3" services: mininet: - image: opennetworking/mn-stratum + image: opennetworking/ngsdn-tutorial:stratum_bmv2 hostname: mininet container_name: mininet privileged: true diff --git a/img/topo-gtp.png b/img/topo-gtp.png new file mode 100644 index 0000000..070cd3c Binary files /dev/null and b/img/topo-gtp.png differ diff --git a/mininet/flowrule-gtp.json b/mininet/flowrule-gtp.json new file mode 100644 index 0000000..63527e1 --- /dev/null +++ b/mininet/flowrule-gtp.json @@ -0,0 +1,33 @@ +{ + "flows": [ + { + "deviceId": "device:leaf1", + "tableId": "FabricIngress.spgw_ingress.dl_sess_lookup", + "priority": 10, + "timeout": 0, + "isPermanent": true, + "selector": { + "criteria": [ + { + "type": "IPV4_DST", + "ip": "/32" + } + ] + }, + "treatment": { + "instructions": [ + { + "type": "PROTOCOL_INDEPENDENT", + "subtype": "ACTION", + "actionId": "FabricIngress.spgw_ingress.set_dl_sess_info", + "actionParams": { + "teid": "BEEF", + "s1u_enb_addr": "0a006401", + "s1u_sgw_addr": "0a0064fe" + } + } + ] + } + } + ] +} diff --git a/mininet/netcfg-gtp.json b/mininet/netcfg-gtp.json new file mode 100644 index 0000000..43efd4e --- /dev/null +++ b/mininet/netcfg-gtp.json @@ -0,0 +1,130 @@ +{ + "devices": { + "device:leaf1": { + "basic": { + "managementAddress": "grpc://mininet:50001?device_id=1", + "driver": "stratum-bmv2", + "pipeconf": "org.onosproject.pipelines.fabric", + "locType": "grid", + "gridX": 200, + "gridY": 600 + }, + "segmentrouting": { + "name": "leaf1", + "ipv4NodeSid": 101, + "ipv4Loopback": "192.168.1.1", + "routerMac": "00:AA:00:00:00:01", + "isEdgeRouter": true, + "adjacencySids": [] + } + }, + "device:leaf2": { + "basic": { + "managementAddress": "grpc://mininet:50002?device_id=1", + "driver": "stratum-bmv2", + "pipeconf": "org.onosproject.pipelines.fabric", + "locType": "grid", + "gridX": 800, + "gridY": 600 + }, + "segmentrouting": { + "name": "leaf2", + "ipv4NodeSid": 102, + "ipv4Loopback": "192.168.1.2", + "routerMac": "00:AA:00:00:00:02", + "isEdgeRouter": true, + "adjacencySids": [] + } + }, + "device:spine1": { + "basic": { + "managementAddress": "grpc://mininet:50003?device_id=1", + "driver": "stratum-bmv2", + "pipeconf": "org.onosproject.pipelines.fabric", + "locType": "grid", + "gridX": 400, + "gridY": 400 + }, + "segmentrouting": { + "name": "spine1", + "ipv4NodeSid": 201, + "ipv4Loopback": "192.168.2.1", + "routerMac": "00:BB:00:00:00:01", + "isEdgeRouter": false, + "adjacencySids": [] + } + }, + "device:spine2": { + "basic": { + "managementAddress": "grpc://mininet:50004?device_id=1", + "driver": "stratum-bmv2", + "pipeconf": "org.onosproject.pipelines.fabric", + "locType": "grid", + "gridX": 600, + "gridY": 400 + }, + "segmentrouting": { + "name": "spine2", + "ipv4NodeSid": 202, + "ipv4Loopback": "192.168.2.2", + "routerMac": "00:BB:00:00:00:02", + "isEdgeRouter": false, + "adjacencySids": [] + } + } + }, + "ports": { + "device:leaf1/3": { + "interfaces": [ + { + "name": "leaf1-3", + "ips": [ + "10.0.100.254/24" + ], + "vlan-untagged": 100 + } + ] + }, + "device:leaf2/3": { + "interfaces": [ + { + "name": "leaf2-3", + "ips": [ + "10.0.200.254/24" + ], + "vlan-untagged": 200 + } + ] + } + }, + "hosts": { + "00:00:00:00:00:10/None": { + "basic": { + "name": "enodeb", + "gridX": 100, + "gridY": 700, + "locType": "grid", + "ips": [ + "10.0.100.1" + ], + "locations": [ + "device:leaf1/3" + ] + } + }, + "00:00:00:00:00:20/None": { + "basic": { + "name": "pdn", + "gridX": 850, + "gridY": 700, + "locType": "grid", + "ips": [ + "10.0.200.1" + ], + "locations": [ + "device:leaf2/3" + ] + } + } + } +} diff --git a/mininet/recv-gtp.py b/mininet/recv-gtp.py new file mode 100755 index 0000000..8ecc1e5 --- /dev/null +++ b/mininet/recv-gtp.py @@ -0,0 +1,48 @@ +#!/usr/bin/python + +# Script used in Exercise 8 that sniffs packets and prints on screen whether +# they are GTP encapsulated or not. + +import signal +import sys + +from ptf.packet import IP +from scapy.contrib import gtp +from scapy.sendrecv import sniff + +pkt_count = 0 + + +def handle_pkt(pkt, ex): + global pkt_count + pkt_count = pkt_count + 1 + if gtp.GTP_U_Header in pkt: + is_gtp_encap = True + else: + is_gtp_encap = False + + print "[%d] %d bytes: %s -> %s, is_gtp_encap=%s\n\t%s" \ + % (pkt_count, len(pkt), pkt[IP].src, pkt[IP].dst, + is_gtp_encap, pkt.summary()) + + if is_gtp_encap and ex: + exit() + + +print "Will print a line for each UDP packet received..." + + +def handle_timeout(signum, frame): + print "Timeout! Did not receive any GTP packet" + exit(1) + + +exitOnSuccess = False +if len(sys.argv) > 1 and sys.argv[1] == "-e": + # wait max 10 seconds or exit + signal.signal(signal.SIGALRM, handle_timeout) + signal.alarm(10) + exitOnSuccess = True + +sniff(count=0, store=False, filter="udp", + prn=lambda x: handle_pkt(x, exitOnSuccess)) diff --git a/mininet/send-udp.py b/mininet/send-udp.py new file mode 100755 index 0000000..1df22b0 --- /dev/null +++ b/mininet/send-udp.py @@ -0,0 +1,16 @@ +#!/usr/bin/python + +# Script used in Exercise 8. +# Send downlink packets to UE address. + +from scapy.layers.inet import IP, UDP +from scapy.sendrecv import send + +UE_ADDR = '17.0.0.1' +RATE = 5 # packets per second +PAYLOAD = ' '.join(['P4 is great!'] * 50) + +print "Sending %d UDP packets per second to %s..." % (RATE, UE_ADDR) + +pkt = IP(dst=UE_ADDR) / UDP(sport=80, dport=400) / PAYLOAD +send(pkt, inter=1.0 / RATE, loop=True, verbose=True) diff --git a/mininet/topo-gtp.py b/mininet/topo-gtp.py new file mode 100755 index 0000000..e67d8f6 --- /dev/null +++ b/mininet/topo-gtp.py @@ -0,0 +1,112 @@ +#!/usr/bin/python + +# Copyright 2019-present Open Networking Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse + +from mininet.cli import CLI +from mininet.log import setLogLevel +from mininet.net import Mininet +from mininet.node import Host +from mininet.topo import Topo +from stratum import StratumBmv2Switch + +CPU_PORT = 255 + + +class IPv4Host(Host): + """Host that can be configured with an IPv4 gateway (default route). + """ + + def config(self, mac=None, ip=None, defaultRoute=None, lo='up', gw=None, + **_params): + super(IPv4Host, self).config(mac, ip, defaultRoute, lo, **_params) + self.cmd('ip -4 addr flush dev %s' % self.defaultIntf()) + self.cmd('ip -6 addr flush dev %s' % self.defaultIntf()) + self.cmd('sysctl -w net.ipv4.ip_forward=0') + self.cmd('ip -4 link set up %s' % self.defaultIntf()) + self.cmd('ip -4 addr add %s dev %s' % (ip, self.defaultIntf())) + if gw: + self.cmd('ip -4 route add default via %s' % gw) + # Disable offload + for attr in ["rx", "tx", "sg"]: + cmd = "/sbin/ethtool --offload %s %s off" % ( + self.defaultIntf(), attr) + self.cmd(cmd) + + def updateIP(): + return ip.split('/')[0] + + self.defaultIntf().updateIP = updateIP + + +class TutorialTopo(Topo): + """2x2 fabric topology for GTP encap exercise with 2 IPv4 hosts emulating an + enodeb (base station) and a gateway to a Packet Data Metwork (PDN) + """ + + def __init__(self, *args, **kwargs): + Topo.__init__(self, *args, **kwargs) + + # Leaves + # gRPC port 50001 + leaf1 = self.addSwitch('leaf1', cls=StratumBmv2Switch, cpuport=CPU_PORT) + # gRPC port 50002 + leaf2 = self.addSwitch('leaf2', cls=StratumBmv2Switch, cpuport=CPU_PORT) + + # Spines + # gRPC port 50003 + spine1 = self.addSwitch('spine1', cls=StratumBmv2Switch, cpuport=CPU_PORT) + # gRPC port 50004 + spine2 = self.addSwitch('spine2', cls=StratumBmv2Switch, cpuport=CPU_PORT) + + # Switch Links + self.addLink(spine1, leaf1) + self.addLink(spine1, leaf2) + self.addLink(spine2, leaf1) + self.addLink(spine2, leaf2) + + # IPv4 hosts attached to leaf 1 + enodeb = self.addHost('enodeb', cls=IPv4Host, mac='00:00:00:00:00:10', + ip='10.0.100.1/24', gw='10.0.100.254') + self.addLink(enodeb, leaf1) # port 3 + + # IPv4 hosts attached to leaf 2 + pdn = self.addHost('pdn', cls=IPv4Host, mac='00:00:00:00:00:20', + ip='10.0.200.1/24', gw='10.0.200.254') + self.addLink(pdn, leaf2) # port 3 + + +def main(): + net = Mininet(topo=TutorialTopo(), controller=None) + net.start() + CLI(net) + net.stop() + print '#' * 80 + print 'ATTENTION: Mininet was stopped! Perhaps accidentally?' + print 'No worries, it will restart automatically in a few seconds...' + print 'To access again the Mininet CLI, use `make mn-cli`' + print 'To detach from the CLI (without stopping), press Ctrl-D' + print 'To permanently quit Mininet, use `make stop`' + print '#' * 80 + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description='Mininet topology script for 2x2 fabric with stratum_bmv2 and IPv4 hosts') + args = parser.parse_args() + setLogLevel('info') + + main() diff --git a/solution/mininet/flowrule-gtp.json b/solution/mininet/flowrule-gtp.json new file mode 100644 index 0000000..3a288ff --- /dev/null +++ b/solution/mininet/flowrule-gtp.json @@ -0,0 +1,33 @@ +{ + "flows": [ + { + "deviceId": "device:leaf1", + "tableId": "FabricIngress.spgw_ingress.dl_sess_lookup", + "priority": 10, + "timeout": 0, + "isPermanent": true, + "selector": { + "criteria": [ + { + "type": "IPV4_DST", + "ip": "17.0.0.1/32" + } + ] + }, + "treatment": { + "instructions": [ + { + "type": "PROTOCOL_INDEPENDENT", + "subtype": "ACTION", + "actionId": "FabricIngress.spgw_ingress.set_dl_sess_info", + "actionParams": { + "teid": "BEEF", + "s1u_enb_addr": "0a006401", + "s1u_sgw_addr": "0a0064fe" + } + } + ] + } + } + ] +} diff --git a/solution/mininet/netcfg-gtp.json b/solution/mininet/netcfg-gtp.json new file mode 100644 index 0000000..6c6659f --- /dev/null +++ b/solution/mininet/netcfg-gtp.json @@ -0,0 +1,130 @@ +{ + "devices": { + "device:leaf1": { + "basic": { + "managementAddress": "grpc://mininet:50001?device_id=1", + "driver": "stratum-bmv2", + "pipeconf": "org.onosproject.pipelines.fabric-spgw", + "locType": "grid", + "gridX": 200, + "gridY": 600 + }, + "segmentrouting": { + "name": "leaf1", + "ipv4NodeSid": 101, + "ipv4Loopback": "192.168.1.1", + "routerMac": "00:AA:00:00:00:01", + "isEdgeRouter": true, + "adjacencySids": [] + } + }, + "device:leaf2": { + "basic": { + "managementAddress": "grpc://mininet:50002?device_id=1", + "driver": "stratum-bmv2", + "pipeconf": "org.onosproject.pipelines.fabric", + "locType": "grid", + "gridX": 800, + "gridY": 600 + }, + "segmentrouting": { + "name": "leaf2", + "ipv4NodeSid": 102, + "ipv4Loopback": "192.168.1.2", + "routerMac": "00:AA:00:00:00:02", + "isEdgeRouter": true, + "adjacencySids": [] + } + }, + "device:spine1": { + "basic": { + "managementAddress": "grpc://mininet:50003?device_id=1", + "driver": "stratum-bmv2", + "pipeconf": "org.onosproject.pipelines.fabric", + "locType": "grid", + "gridX": 400, + "gridY": 400 + }, + "segmentrouting": { + "name": "spine1", + "ipv4NodeSid": 201, + "ipv4Loopback": "192.168.2.1", + "routerMac": "00:BB:00:00:00:01", + "isEdgeRouter": false, + "adjacencySids": [] + } + }, + "device:spine2": { + "basic": { + "managementAddress": "grpc://mininet:50004?device_id=1", + "driver": "stratum-bmv2", + "pipeconf": "org.onosproject.pipelines.fabric", + "locType": "grid", + "gridX": 600, + "gridY": 400 + }, + "segmentrouting": { + "name": "spine2", + "ipv4NodeSid": 202, + "ipv4Loopback": "192.168.2.2", + "routerMac": "00:BB:00:00:00:02", + "isEdgeRouter": false, + "adjacencySids": [] + } + } + }, + "ports": { + "device:leaf1/3": { + "interfaces": [ + { + "name": "leaf1-3", + "ips": [ + "10.0.100.254/24" + ], + "vlan-untagged": 100 + } + ] + }, + "device:leaf2/3": { + "interfaces": [ + { + "name": "leaf2-3", + "ips": [ + "10.0.200.254/24" + ], + "vlan-untagged": 200 + } + ] + } + }, + "hosts": { + "00:00:00:00:00:10/None": { + "basic": { + "name": "enodeb", + "gridX": 100, + "gridY": 700, + "locType": "grid", + "ips": [ + "10.0.100.1" + ], + "locations": [ + "device:leaf1/3" + ] + } + }, + "00:00:00:00:00:20/None": { + "basic": { + "name": "pdn", + "gridX": 850, + "gridY": 700, + "locType": "grid", + "ips": [ + "10.0.200.1" + ], + "locations": [ + "device:leaf2/3" + ] + } + } + } +} diff --git a/util/docker/Makefile b/util/docker/Makefile index 8d9c7f6..a93c13a 100644 --- a/util/docker/Makefile +++ b/util/docker/Makefile @@ -1,20 +1,19 @@ include Makefile.vars -build: build-ptf build-mvn -push: push-ptf push-mvn +build: build-stratum_bmv2 build-mvn +push: push-stratum_bmv2 push-mvn -build-ptf: - cd ptf && docker build --build-arg MN_STRATUM_SHA=$(MN_STRATUM_SHA) \ - -t ${PTF_IMG} . +build-stratum_bmv2: + cd stratum_bmv2 && docker build -t ${STRATUM_BMV2_IMG} . build-mvn: cd ../../app && docker build --squash -f ../util/docker/mvn/Dockerfile \ -t ${MVN_IMG} . -push-ptf: +push-stratum_bmv2: # Remember to update Makefile.vars with the new image sha - docker push ${PTF_IMG} + docker push ${STRATUM_BMV2_IMG} push-mvn: # Remember to update Makefile.vars with the new image sha - docker push ${MVN_IMG} \ No newline at end of file + docker push ${MVN_IMG} diff --git a/util/docker/Makefile.vars b/util/docker/Makefile.vars index 3faf8d3..dc237d1 100644 --- a/util/docker/Makefile.vars +++ b/util/docker/Makefile.vars @@ -1,9 +1,8 @@ ONOS_IMG := onosproject/onos:2.2.2 P4RT_SH_IMG := p4lang/p4runtime-sh:latest P4C_IMG := opennetworking/p4c:stable -MN_STRATUM_IMG := opennetworking/mn-stratum:latest -MVN_IMG := ccasconeonf/ngsdn-tutorial:mvn -PTF_IMG := ccasconeonf/ngsdn-tutorial:ptf +STRATUM_BMV2_IMG := opennetworking/ngsdn-tutorial:stratum_bmv2 +MVN_IMG := opennetworking/ngsdn-tutorial:mvn GNMI_CLI_IMG := bocon/gnmi-cli:latest YANG_IMG := bocon/yang-tools:latest SSHPASS_IMG := ictu/sshpass @@ -11,9 +10,8 @@ SSHPASS_IMG := ictu/sshpass ONOS_SHA := sha256:438815ab20300cd7a31702b7dea635152c4c4b5b2fed9b14970bd2939a139d2a P4RT_SH_SHA := sha256:6ae50afb5bde620acb9473ce6cd7b990ff6cc63fe4113cf5584c8e38fe42176c P4C_SHA := sha256:8f9d27a6edf446c3801db621359fec5de993ebdebc6844d8b1292e369be5dfea -MN_STRATUM_SHA := sha256:ae7c59885509ece8062e196e6a8fb6aa06386ba25df646ed27c765d92d131692 -PTF_SHA := sha256:77ea53bf5d17c0ab984103919da9b7effd322031dadb943fcc108f9dc9607097 -MVN_SHA := sha256:415a46bd733b3b0eb66c93beff031f6fecaa6e7a5a5c8e5ed25545ea1de1f476 +STRATUM_BMV2_SHA := sha256:f31faa5e83abbb2d9cf39d28b3578f6e113225641337ec7d16d867b0667524ef +MVN_SHA := sha256:d85eb93ac909a90f49b16b33cb872620f9b4f640e7a6451859aec704b21f9243 GNMI_CLI_SHA := sha256:6f1590c35e71c07406539d0e1e288e87e1e520ef58de25293441c3b9c81dffc0 YANG_SHA := sha256:feb2dc322af113fc52f17b5735454abfbe017972c867e522ba53ea44e8386fd2 SSHPASS_SHA := sha256:6e3d0d7564b259ef9612843d220cc390e52aab28b0ff9adaec800c72a051f41c diff --git a/util/docker/ptf/Dockerfile b/util/docker/stratum_bmv2/Dockerfile similarity index 74% rename from util/docker/ptf/Dockerfile rename to util/docker/stratum_bmv2/Dockerfile index 4798747..a77364b 100644 --- a/util/docker/ptf/Dockerfile +++ b/util/docker/stratum_bmv2/Dockerfile @@ -12,9 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. -# Docker image to run PTF-based data plane tests using stratum_bmv2 +# Docker image that extends opennetworking/mn-stratum with other dependencies +# required by this tutorial. opennetworking/mn-stratum is the official image +# from the Stratum project which contains stratum_bmv2 and the Mininet +# libraries. We extend that with PTF, scapy, etc. -ARG MN_STRATUM_SHA +ARG MN_STRATUM_SHA="sha256:1bba2e2c06460c73b0133ae22829937786217e5f20f8f80fcc3063dcf6707ebe" FROM bitnami/minideb:stretch as builder @@ -35,8 +38,6 @@ RUN pip install --no-cache-dir --root /output $PIP_DEPS FROM opennetworking/mn-stratum:latest@$MN_STRATUM_SHA as runtime -LABEL description="Docker image to run PTF-based data plane tests using stratum_bmv2" - ENV RUNTIME_DEPS \ make RUN install_packages $RUNTIME_DEPS @@ -45,4 +46,4 @@ COPY --from=builder /output / ENV DOCKER_RUN true -ENTRYPOINT [] \ No newline at end of file +ENTRYPOINT []