...
 
Commits (11)
......@@ -16,7 +16,7 @@
###############################################################################
# Docker image for building EdgeX Foundry Config Seed
FROM golang:1.11-alpine AS build-env
FROM golang:1.12-alpine AS build-env
# environment variables
ENV GO111MODULE=on
......
......@@ -6,7 +6,7 @@
# SPDX-License-Identifier: Apache-2.0
#
FROM golang:1.11-alpine AS builder
FROM golang:1.12-alpine AS builder
ENV GO111MODULE=on
WORKDIR /go/src/github.com/edgexfoundry/edgex-go
......
......@@ -7,7 +7,7 @@
#
# Docker image for Golang Core Data micro service
FROM golang:1.11-alpine AS builder
FROM golang:1.12-alpine AS builder
ENV GO111MODULE=on
WORKDIR /go/src/github.com/edgexfoundry/edgex-go
......
......@@ -6,7 +6,7 @@
# SPDX-License-Identifier: Apache-2.0
#
FROM golang:1.11-alpine AS builder
FROM golang:1.12-alpine AS builder
ENV GO111MODULE=on
WORKDIR /go/src/github.com/edgexfoundry/edgex-go
......
......@@ -6,7 +6,7 @@
# SPDX-License-Identifier: Apache-2.0
#
FROM golang:1.11-alpine AS builder
FROM golang:1.12-alpine AS builder
ENV GO111MODULE=on
WORKDIR /go/src/github.com/edgexfoundry/edgex-go
......
......@@ -6,7 +6,7 @@
# SPDX-License-Identifier: Apache-2.0
#
FROM golang:1.11-alpine AS builder
FROM golang:1.12-alpine AS builder
ENV GO111MODULE=on
WORKDIR /go/src/github.com/edgexfoundry/edgex-go
......
......@@ -5,7 +5,7 @@
# SPDX-License-Identifier: Apache-2.0
#
FROM golang:1.11-alpine AS builder
FROM golang:1.12-alpine AS builder
ENV GO111MODULE=on
WORKDIR /go/src/github.com/edgexfoundry/edgex-go
......
......@@ -5,7 +5,7 @@
# SPDX-License-Identifier: Apache-2.0
#
FROM golang:1.11-alpine AS builder
FROM golang:1.12-alpine AS builder
ENV GO111MODULE=on
WORKDIR /go/src/github.com/edgexfoundry/edgex-go
......
......@@ -6,7 +6,7 @@
# SPDX-License-Identifier: Apache-2.0
#
FROM golang:1.11-alpine AS builder
FROM golang:1.12-alpine AS builder
ENV GO111MODULE=on
WORKDIR /go/src/github.com/edgexfoundry/edgex-go
......
......@@ -5,7 +5,7 @@
# SPDX-License-Identifier: Apache-2.0
#
FROM golang:1.11-alpine AS builder
FROM golang:1.12-alpine AS builder
ENV GO111MODULE=on
WORKDIR /go/src/github.com/edgexfoundry/edgex-go
......
......@@ -45,6 +45,7 @@ COPY getting-started ./
COPY walk-through ./
COPY quick-start ./
COPY security/* ./
COPY application/* ./
COPY entrypoint.sh /entrypoint.sh
......
......@@ -40,7 +40,7 @@ On the "Create new instance" page, enter EdgeXData as the name of your instance
Configure the User and Topic
-----------------------------
With the MQTT istance now created, you need to configure the MQTT topic and users with access to the topic. On the Instances page displayed after creating the MQTT instance, click on the EdgeXData instance name in the instances listing.
With the MQTT istance now created, you need to configure the MQTT topic and users with access to the topic. On the Instances page displayed after creating the MQTT instance, click on the EdgeXData instance name in the instances listing.
.. image:: Export-CloudMQTTInstanceSelect.png
......@@ -52,7 +52,7 @@ In the Users section, enter a name and password for your message publishing user
.. image:: Export-CloudMQTTUserAndPass.png
Now setup the topic and access rights to allow the exportpublisher to push messages into the MQTT topic. On the same page,
Now setup the topic and access rights to allow the exportpublisher to push messages into the MQTT topic. On the same page,
under the ACL section, follow these steps:
1. push on the "Topic" tab
......@@ -69,33 +69,33 @@ Your CloudMQTT topic is now ready for EdgeX to receive sensor data published by
Run EdgeX Foundry
=================
Obtain and start EdgeX Foundry. In particular, per :doc:`../getting-started/Ch-GettingStartedUsers`, get Docker, Docker Compose setup and then pull the EdgeX docker containers. After pulling the EdgeX containers, start these containers with the following commands in order:
Obtain and start EdgeX Foundry. In particular, per :doc:`Ch-GettingStartedUsers`, get Docker, Docker Compose setup and then pull the EdgeX docker containers. After pulling the EdgeX containers, start these containers with the following commands in order:
+------------------------------------+-------------------------------------------------------------------------------------+------------------------------------------------+
| **Docker Command** | **Description** | **Suggested Waiti Time After Completing** |
+====================================+=====================================================================================+================================================+
| **docker-compose pull** | Pull down, but don't start, all the EdgeX Foundry microservices | Docker Compose will indicate when all the |
| | | containers have been pulled successfully |
| | | containers have been pulled successfully |
+------------------------------------+-------------------------------------------------------------------------------------+------------------------------------------------+
| docker-compose up -d volume | Start the EdgeX Foundry file volume--must be done before the other services are | A couple of seconds. In the time it takes to |
| | started | type the next command it shoud be ready. |
| | started | type the next command it shoud be ready. |
+------------------------------------+-------------------------------------------------------------------------------------+------------------------------------------------+
| docker-compose up -d consul | Start the configuration and registry microservice which all services must | A couple of seconds |
| | register with and get their configuration from | |
| | register with and get their configuration from | |
+------------------------------------+-------------------------------------------------------------------------------------+------------------------------------------------+
| docker-compose up -d config-seed | Populate the configuration/registry microservice | A couple of seconds |
+------------------------------------+-------------------------------------------------------------------------------------+------------------------------------------------+
| docker-compose up -d mongo | Start the NoSQL MongoDB container | 10 seconds |
| docker-compose up -d mongo | Start the NoSQL MongoDB container | 10 seconds |
+------------------------------------+-------------------------------------------------------------------------------------+------------------------------------------------+
| docker-compose up -d logging | Start the logging microservice - used by all micro services that make log entries | A couple of seconds |
| docker-compose up -d logging | Start the logging microservice - used by all micro services that make log entries | A couple of seconds |
+------------------------------------+-------------------------------------------------------------------------------------+------------------------------------------------+
| docker-compose up -d notifications | Start the notifications and alerts microservice--used by many of the microservices | 30 seconds |
| | Note: this service is still implemented in Java and takes more time to start | |
+------------------------------------+-------------------------------------------------------------------------------------+------------------------------------------------+
| docker-compose up -d metadata | Start the Core Metadata microservice | A couple of seconds |
| docker-compose up -d metadata | Start the Core Metadata microservice | A couple of seconds |
+------------------------------------+-------------------------------------------------------------------------------------+------------------------------------------------+
| docker-compose up -d data | Start the Core Data microservice | A couple of seconds |
| docker-compose up -d data | Start the Core Data microservice | A couple of seconds |
+------------------------------------+-------------------------------------------------------------------------------------+------------------------------------------------+
| docker-compose up -d command | Start the Core Command microservice | A couple of seconds |
| docker-compose up -d command | Start the Core Command microservice | A couple of seconds |
+------------------------------------+-------------------------------------------------------------------------------------+------------------------------------------------+
| docker-compose up -d scheduler | Start the scheduling microservice -used by many of the microservices | 1 minute |
| | Note: this service is still implemented in Java and takes more time to start | |
......
##################################
Application Services Microservices
##################################
.. image:: ApplicationServices.png
Application Services are the means to extract, process/transform and send event/reading data from EdgeX to an endpoint or process of your choice.
Application Services are based on the idea of a "Functions Pipeline". A functions pipeline is a collection of functions that process messages (in this case EdgeX event/reading messages) in the order that you've specified. The first function in a pipeline is a trigger. A trigger begins the functions pipeline execution. A trigger is something like a message landing in a watched message queue.
An SDK is provided (the Applicaiton Functions SDK) to help build Application Services by assembling triggers, pre-existing functions and custom functions of your making into a pipeline.
.. toctree::
:maxdepth: 1
Ch-ApplicationServices
Ch-ApplicationFunctionsSDK
**Note** Application Services will replace Export Services in a future EdgeX release.
此差异已折叠。
Application Services
====================
Application Services are a means to get data from EdgeX Foundry to external systems and process (be it analytics package, enterprise or on-prem application, cloud systems like Azure IoT, AWS IoT, or Google IoT Core, etc.). Application Services provide the means for data to be prepared (transformed, enriched, filtered, etc.) and groomed (formatted, compressed, encrypted, etc.) before being sent to an endpoint of choice. Endpoints supported out of the box today include HTTP and MQTT endpoints, but will include additional offerings in the future and could include a custom endpoints.
The intention of Application Services are to address scalability concerns of the existing EdgeX Export Client and Distribution Services (Export Services) as well as provide a flexible solution for exporting data outside of EdgeX without encumbering the EdgeX development community itself with trying to support all major cloud providers and export solutions. For the Edinburgh release cycle, the existing Client and Export Service remain supported and are still considered the primary way to export data out of EdgeX. However, it is encouraged for new development efforts adopting EdgeX that the App Functions SDK and resulting Application Services be leveraged moving forward with the intention that by the Fuji release, the SDK will be moved into release status and become the primary method of exporting data from EdgeX.
Application Services are based on the idea of a "Functions Pipeline". A functions pipeline is a collection of functions that process messages (in this case EdgeX event/reading messages) in the order that you've specified. The first function in a pipeline is a trigger. A trigger begins the functions pipeline execution. A trigger is something like a message landing in a watched message queue.
.. image:: TriggersFunctions.png
An Applications Functions Software Development Kit (or App Functions SDK) is a available to help create Application Services. Currently the only SDK supported language is Golang, with the intention that community developed and supported SDKs will come in the future for other languages. It is currently available as a Golang module to remain operating system (OS) agnostic and to comply with the latest EdgeX guidelines on dependency management.
Export Service Deficiencies
---------------------------
With the current export services, developers register their endpoints or MQTT clients with the provided client registration service and as events are consumed from Core Data, the export service would then relay that information to the registered endpoints in a sequential fashion. Requiring the individual export service to rebroadcast data to all registered endpoints overtime creates a bottleneck and leaves applications with a potential delay in receiving events. Furthermore, the overhead and complexity of managing all registered endpoints becomes an added burden to EdgeX services. Finally, the Export services have also begun to address a bit more than is sustainable in regard to supporting all the major cloud provider endpoints. Providing an SDK and removing cloud specific exports is one way to remain agnostic to cloud providers and enables 3rd parties to support their use of any given cloud solution and eliminates the dependency
on EdgeX to support the ever-changing cloud environment.
Application Service Improvements
---------------------------------
Providing an SDK that connects directly to a message bus by which Core Data events are published eliminates performance issues as well as allow the developers extra control on what happens
with that data as soon as it is available. Furthermore, it emphasizes configuration over registration for consuming the data. The application services can be customized to a client's needs and thereby also removing the need for client registration.
Standard Functions
------------------
As mentioned, an Application Service is a function pipeline. The SDK provides some standard functions that can be used in a functions pipeline. In the future, additional functions will be provided "standard" or in other words provided with the SDK. Additinally, developers can implement their own custom functions and add those to the Application Service functions pipeline.
.. image:: SDKFunctions.png
One of the most common use cases for working with data that come from Core Data is to filter data down to what is relevant for a given application and to format it. To help facilitate this, four primary functions ported over from the existing services today are included in the SDK. The first is the `DeviceNameFilter` function which will remove events that do not match the specified IDs and will cease execution of the pipeline if no event matches. The second is the `ValueDescriptorFilter` which exhibits the same behavior as `DeviceNameFilter` except filtering on Value Descriptor instead of DeviceID. The third and fourth provided functions in the SDK transform the data received to either XML or JSON by calling `XMLTransform` or `JSONTransform`.
Typically, after filtering and transforming the data as needed, exporting is the last step in a pipeline to ship the data where it needs to go. There are two primary functions included in the SDK to help facilitate this. The first is `HTTPPost(string url)` function that will POST the provided data to a specified endpoint, and the second is an `MQTTPublish()` function that will
publish the provided data to an MQTT Broker as specified in the configuration.
There are two primary triggers that have been included in the SDK that initiate the start of the function pipeline. First is via a POST HTTP Endpoint `/trigger` with the EdgeX event data as the body. Second is the MessageBus subscription with connection details as specified in the configuration.
Finally, data may be sent back to the message bus or HTTP response by calling.complete() on the context. If the trigger is HTTP, then it will be an HTTP Response. If the trigger is MessageBus, then it will be published to the configured host and topic.
Examples
--------
There are three example Application Services provided in the app-functions-sdk-go repository in the /examples directory that attempt to show basic structure of building an application with the app functions sdk. They also focus on how to leverage various built in provided functions as mentioned above as well as how to write your own in the case that the SDK does not provide what is needed.
#.
`Simple Filter XML <https://github.com/edgexfoundry/app-functions-sdk-go/tree/master/examples/simple-filter-xml>`_ -> Demonstrates Filter of data by device ID and
transforming data to XML
#.
`Simple Filter XML Post <https://github.com/edgexfoundry/app-functions-sdk-go/tree/master/examples/simple-filter-xml-post>`_ -> Same example as #1, but result published to HTTP
Endpoint
#.
`Simple Filter XML MQTT <https://github.com/edgexfoundry/app-functions-sdk-go/tree/master/examples/simple-filter-xml-mqtt>`_ -> Same example as #1, but result published to MQTT
Broker
The features in the initial implementation of the App Functions SDK should be sufficient to provide the foundation for basic filtering and export needs. There are some functions in the existing export services that are not yet available in application functions and are intended to be included in a later release. This includes the Encryption Transformer, the Compressor Transformer, and Valid Event Check. See `Unsupported existing export service functions`_. The primary purpose for leaving this out was to address core pieces of functionality that would set up the ease of adding additional functions in the future.
Unsupported existing export service functions
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
From :doc:`./Ch-Distribution`:
**Compressor Transformer**\ –A transformer component compresses the data string to be delivered to the clients, for any clients that have requested their data be compressed either by GZIP or ZIP methods.
**Encryption Transformer**\ –An encryption component encrypts the data to be sent to the client, using the client provided keys and vectors.
**Valid Event Check**\ –The first component in the pipe and filter, before the copier (described in the previous section) is a filter that can be optionally turned on or off by configuration. This filter is a general purpose data checking filter which assesses the device- or sensor-provided Event, with associated Readings, and ensures the data conforms to the ValueDescriptor associated with the Readings.
* For example, if the data from a sensor is described by its metadata profile as adhering to a “Temperature” value descriptor of floating number type, with the value between -100° F and 200° F, but the data seen in the Event and Readings is not a floating point number, for example if the data in the reading is a word such as “cold,” instead of a number, then the Event is rejected (no client receives the data) and no further processing is accomplished on the Event by the Export Distro service.
HTTP Trigger
============
Designating an HTTP trigger will allow the pipeline to be triggered by a RESTful POST call to http://[host]:[port]/trigger/. The body of the POST must be an EdgeX event.
edgexcontext.Complete([]byte outputData) - Will send the specified data as the response to the request that originally triggered the HTTP Request.
In the main() function, note the call to HTTPPostXML or HTTPPostJSON at the end of the pipeline to return the response.
from `Simple Filter XML Post <https://github.com/edgexfoundry/app-functions-sdk-go/tree/master/examples/simple-filter-xml-post>`_
.. code::
edgexSdk.SetFunctionsPipeline(
edgexSdk.DeviceNameFilter(deviceNames),
edgexSdk.XMLTransform(),
edgexSdk.HTTPPostXML("<Your endpoint goes here>"),
)
Message Bus Trigger
===================
A message bus trigger will execute the pipeline every time data is received off of the configured topic.
Type and Topic Configuration
----------------------------
Here's an example:
.. code::
Type="messagebus"
SubscribeTopic="events"
PublishTopic=""
The Type= is set to "messagebus". EdgeX Core Data is publishing data to the events topic. So to receive data from core data, you can set your SubscribeTopic= either to "" or "events". You may also designate a PublishTopic= if you wish to publish data back to the message bus. edgexcontext.Complete([]byte outputData) - Will send data back to back to the message bus with the topic specified in the PublishTopic= property
Message bus connection configuration
------------------------------------
The other piece of configuration required are the connection settings:
.. code::
[MessageBus]
Type = 'zero' #specifies of message bus (i.e zero for ZMQ)
[MessageBus.PublishHost]
Host = '*'
Port = 5564
Protocol = 'tcp'
[MessageBus.SubscribeHost]
Host = 'localhost'
Port = 5563
Protocol = 'tcp'
By default, EdgeX Core Data publishes data to the events topic on port 5563. The publish host is used if publishing data back to the message bus.
**Important Note:** Publish Host MUST be different for every topic you wish to publish to since the SDK will bind to the specific port. 5563 for example cannot be used to publish since EdgeX Core Data has bound to that port. Similarly, you cannot have two separate instances of the app functions SDK running publishing to the same port.
In the main() function, note the call to MQTTSend at the end of the pipeline to return the response.
from `Simple Filter XML MQTT <https://github.com/edgexfoundry/app-functions-sdk-go/tree/master/examples/simple-filter-xml-mqtt>`_
.. code::
edgexSdk.SetFunctionsPipeline(
edgexSdk.DeviceNameFilter(deviceNames),
edgexSdk.XMLTransform(),
printXMLToConsole,
edgexSdk.MQTTSend(addressable, "", "", 0, false, false),
)
......@@ -6,14 +6,14 @@ Provison a Device - Modbus Example
..
For this example we will use the GS1-10P5 Modbus motor profile we have available as reference (https://github.com/edgexfoundry/device-modbus/blob/master/src/main/resources/GS1-10P5.profile.yaml, device reference: Marathon Electric MicroMax motors via PLC (http://www.marathon-motors.com/Inverter-Vector-Duty-C-Face-Footed-TEFC-Micromax-Motor_c333.htm)). I would recommend using a tool like Postman for simplifying interactions with the REST APIs (refer to the "Device and Device Service Setup (aka Device Service Creation and Device Provisioning)" section for further details at `API Demo Walkthrough`_ , all REST content is JSON). Also note that Postman is capable of importing RAML documents for API framing (RAML docs for the EdgeX services may be found in src/test/resources/raml/*.raml or on the wiki). Note that this specific example can be tweaked for use with the other Device Services.
For this example we will use the GS1-10P5 Modbus motor profile we have available as reference `GS1 Profile <https://github.com/edgexfoundry/device-modbus/blob/master/src/main/resources/GS1-10P5.profile.yaml>`_ , device reference: Marathon Electric MicroMax motors via PLC (http://www.marathon-motors.com/Inverter-Vector-Duty-C-Face-Footed-TEFC-Micromax-Motor_c333.htm)). I would recommend using a tool like Postman for simplifying interactions with the REST APIs (refer to the "Device and Device Service Setup (aka Device Service Creation and Device Provisioning)" section for further details at `API Demo Walkthrough`_ , all REST content is JSON). Also note that Postman is capable of importing RAML documents for API framing (RAML docs for the EdgeX services may be found in src/test/resources/raml or on the wiki). Note that this specific example can be tweaked for use with the other Device Services.
1. Upload the device profile above to metadata with a POST to http://localhost:48081/api/v1/deviceprofile/uploadfile and add the file as key "file" to the body
2. Add the addressable containing reachability information for the device with a POST to http://localhost:48081/api/v1/addressable:
a. If IP connected, the body will look something like: { "name": "Motor", "method": "GET", "protocol": "HTTP", "address": "10.0.1.29", "port": 502 }
b. If serially connected, the body will look something like: { "name": "Motor", "method": "GET", "protocol": "OTHER", "address": "/dev/ttyS5,9600,8,1,1", "port": 0 } (address field contains port, baud rate, number of data bits, stop bits, and parity bits in CSV form)
3. Ensure the Modbus device service is running, adjust the service name below to match if necessary or if using other device services
4. Add the device with a POST to http://localhost:48081/api/v1/device, the body will look something like:
4. Add the device with a POST to http://localhost:48081/api/v1/device, the body will look something like:
::
......@@ -27,7 +27,7 @@ For this example we will use the GS1-10P5 Modbus motor profile we have available
},
"labels": [
],
"location": null,
"service": {
......@@ -46,11 +46,3 @@ The addressable name must match/refer to the addressable added in Step 2, the se
..
Further deep dives on the different microservices and layers can be found in our EdgeX Tech Talks series (`EdgeX Tech Talks`_.) where Jim and I cover some of the intricacies of various services. Of particular relevance here is the Metadata Part 2 discussion covering Device Profiles and Device Provisioning.
Application Functions
=====================
The App Functions Software Development Kit (SDK) is a library that is available
for developers to extract and consume events from core data in the EdgeX
Framework. Currently the only supported language is Golang, with the intention
that community developed and supported SDKs will come in the future for other
languages. It is currently available as a Golang module to remain operating
system (OS) agnostic and to comply with the latest EdgeX guidelines on
dependency management.
The intention of the SDK is to address the scalability concerns of the existing
Client and Export Services as well as provide a flexible solution for
exporting data outside of EdgeX without encumbering the EdgeX development
community itself with trying to support all major cloud providers and export
solutions. For the Edinburgh release cycle, the existing Client and Export
Service remain supported and are still considered the primary way to export data
out of EdgeX. However, it is encouraged for new development efforts adopting
EdgeX that the App Functions SDK be leveraged moving forward with the intention
that by the Fuji release, the SDK will be moved into release status and become
the primary method of consuming data from EdgeX.
With the current export services, developers register their endpoints or MQTT
clients with the provided registration services and as events are consumed from
Core Data, the export service would then relay that information to the
registered endpoints in a sequential fashion. Requiring the individual export
service to rebroadcast data to all registered endpoints overtime creates a
bottleneck and leaves applications with a potential delay in receiving events.
Furthermore, the overhead and complexity of managing all registered endpoints
becomes an added burden to EdgeX services. Finally, the Export services have
also begun to address a bit more than is sustainable in regard to supporting all
the major cloud provider endpoints. Providing an SDK and removing cloud specific
exports is one way to remain agnostic to cloud providers and enables 3rd parties
to support their use of any given cloud solution and eliminates the dependency
on EdgeX to support the ever-changing cloud environment.
Providing an SDK that connects
directly to a message bus by which Core Data events are published eliminates
performance issues as well as allow the developers extra control on what happens
with that data as soon as it is available. Furthermore, it emphasizes
configuration over registration for consuming the data. The application services
can be customized to a client's needs and thereby also removing the need for
client registration.
Standard Functions
------------------
One of the most common use cases for working with data that come from
CoreData is to filter data down to what is relevant for a given application
and to format it. To help facilitate this, four primary functions ported
over from the existing services today are included in the SDK. The first is
the `DeviceNameFilter` function which will remove events that do not match the
specified IDs and will cease execution of the pipeline if no event matches.
The second is the `ValueDescriptorFilter` which exhibits the same behavior
as `DeviceNameFilter` except filtering on Value Descriptor instead of
DeviceID. The third and fourth provided functions in the SDK transform the data
received to either XML or JSON by calling `XMLTransform` or
`JSONTransform`.
Typically, after filtering and transforming the data as needed, exporting is
the last step in a pipeline to ship the data where it needs to go. There are
two primary functions included in the SDK to help facilitate this. The first
is `HTTPPost(string url)` function that will POST the provided data to a
specified endpoint, and the second is an `MQTTPublish()` function that will
publish the provided data to an MQTT Broker as specified in the
configuration.
There are two primary triggers that have been included in the SDK that
initiate the start of the function pipeline. First is via a POST HTTP
Endpoint `/trigger` with the EdgeX event data as the body. Second is the
MessageBus subscription with connection details as specified in the
configuration. See `Appendix A – Message Bus Trigger`_.
Finally, data may be sent back to the message bus or HTTP response by
calling.complete() on the context. If the trigger is HTTP, then it will be
an HTTP Response. If the trigger is MessageBus, then it will be published to
the configured host and topic.
Examples
--------
There are three example applications provided in the app-functions-sdk-go
repository in the /examples directory that attempt to show basic structure of
building an application with the app functions sdk. They also focus on how to
leverage various built in provided functions as mentioned above as well as how
to write your own in the case that the SDK does not provide what is needed.
#.
`Simple Filter XML <https://github.com/edgexfoundry/app-functions-sdk-go/tree/master/examples/simple-filter-xml>`_ -> Demonstrates Filter of data by device ID and
transforming data to XML
#.
`Simple Filter XML Post <https://github.com/edgexfoundry/app-functions-sdk-go/tree/master/examples/simple-filter-xml-post>`_ -> Same example as #1, but result published to HTTP
Endpoint
#.
`Simple Filter XML MQTT <https://github.com/edgexfoundry/app-functions-sdk-go/tree/master/examples/simple-filter-xml-mqtt>`_ -> Same example as #1, but result published to MQTT
Broker
The features in the initial implementation of the App Functions SDK should be
sufficient to provide the foundation for basic filtering and export needs. There
are some functions in the existing export services that are not yet available in
application functions and are intended to be included in a later release. This
includes the Encryption Transformer, the Compressor Transformer, and Valid Event
Check. See `Appendix B – Existing export service functions`_. The primary purpose for leaving this out was to
address core pieces of functionality that would set up the ease of adding
additional functions in the future.
Appendices
----------
Appendix A – Message Bus Trigger
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
A message bus trigger will execute the pipeline everytime data is received off
of the configured topic.
Type and Topic configuration
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Here's an example:
.. code-block:: ini
Type="messagebus"
SubscribeTopic="events"
PublishTopic=""
The Type= is set to "messagebus". EdgeX Core Data is publishing data to
the events topic. So to receive data from core data, you can set
your SubscribeTopic= either to "" or "events". You may also designate
a PublishTopic= if you wish to publish data back to the message
bus. edgexcontext.complete([]byte outputData) - Will send data back to back to
the message bus with the topic specified in the PublishTopic= property
Message bus connection configuration
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The other piece of configuration required are the connection settings:
.. code-block:: ini
[MessageBus]
Type = 'zero' #specifies of message bus (i.e zero for ZMQ)
[MessageBus.PublishHost]
Host = '*'
Port = 5564
Protocol = 'tcp'
[MessageBus.SubscribeHost]
Host = 'localhost'
Port = 5563
Protocol = 'tcp'
By default, EdgeX Core Data publishes data to the events topic on port 5563. The
publish host is used if publishing data back to the message bus.
**Important Note:**\  Publish Host \ **MUST**\  be different for every topic you wish
to publish to since the SDK will bind to the specific port. 5563 for example
cannot be used to publish since EdgeX Core Data has bound to that port.
Similarly, you cannot have two separate instances of the app functions SDK
running publishing to the same port.
Appendix B – Existing export service functions
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
From :doc:`./Ch-Distribution`:
**Compressor Transformer**\ –A transformer component compresses the data string to
be delivered to the clients, for any clients that have requested their data be
compressed either by GZIP or ZIP methods.
**Encryption Transformer**\ –An encryption component encrypts the data to be sent
to the client, using the client provided keys and vectors.
**Valid Event Check**\ –The first component in the pipe and filter, before the
copier (described in the previous section) is a filter that can be optionally
turned on or off by configuration. This filter is a general purpose data
checking filter which assesses the device- or sensor-provided Event, with
associated Readings, and ensures the data conforms to the ValueDescriptor
associated with the Readings.
* For example, if the data from a sensor is described by its metadata profile
as adhering to a “Temperature” value descriptor of floating number type,
with the value between -100° F and 200° F, but the data seen in the Event
and Readings is not a floating point number, for example if the data in the
reading is a word such as “cold,” instead of a number, then the Event is
rejected (no client receives the data) and no further processing is
accomplished on the Event by the Export Distro service.
......@@ -11,6 +11,10 @@ EdgeX Foundry has the following Export microservices available:
Ch-ClientRegistration
Ch-Distribution
Ch-ApplicationFunctions
Ch-GoogleIoTCore
**Note** Export Services are expected to be archived in a future release. They will be replaced by Application services. See :doc:`./Ch-ApplServices`:
.. image:: EdgeXArchitecture-6-1-19.png
:width: 200px
:height: 100px
......@@ -39,17 +39,17 @@ This Data Dictionary has the same content as the Data Dictionary for Export Serv
+---------------------+----------------------------------------------------------------------------------------------+
| **Class Name** | **Descrption** |
| **Class Name** | **Descrption** |
+=====================+==============================================================================================+
| EncryptionDetails | The object describing the encryption method and initialization vector. |
| EncryptionDetails | The object describing the encryption method and initialization vector. |
+---------------------+----------------------------------------------------------------------------------------------+
| ExportFilter | The object containing device and data filter information. |
| ExportFilter | The object containing device and data filter information. |
+---------------------+----------------------------------------------------------------------------------------------+
| ExportMessage | The object containing the data from an Event sent by Export. |
| ExportMessage | The object containing the data from an Event sent by Export. |
+---------------------+----------------------------------------------------------------------------------------------+
| ExportRegistration | The object containing the reachability method and transport parameters for an Export client. |
| ExportRegistration | The object containing the reachability method and transport parameters for an Export client. |
+---------------------+----------------------------------------------------------------------------------------------+
| ExportString | The object sent to a registered Export client containing an Event. |
| ExportString | The object sent to a registered Export client containing an Event. |
+---------------------+----------------------------------------------------------------------------------------------+
===================
......@@ -60,16 +60,11 @@ Inside the Export Distribution service, a set of components, filter and transfor
.. image:: EdgeX_ExportServicesDistributionPipeFilter.png
* **Valid Event Check**--The first component in the pipe and filter, before the copier (described in the previous section) is a filter that can be optionally turned on or off by configuration. This filter is a general purpose data checking filter which assesses the device- or sensor-provided Event, with associated Readings, and ensures the data conforms to the ValueDescriptor associated with the Readings.
* For example, if the data from a sensor is described by its metadata profile as adhering to a "Temperature" value descriptor of floating number type, with the value between -100° F and 200° F, but the data seen in the Event and Readings is not a floating point number, for example if the data in the reading is a word such as "cold," instead of a number, then the Event is rejected (no client receives the data) and no further processing is accomplished on the Event by the Export Distro service.
* **Valid Event Check**--The first component in the pipe and filter, before the copier (described in the previous section) is a filter that can be optionally turned on or off by configuration. This filter is a general purpose data checking filter which assesses the device- or sensor-provided Event, with associated Readings, and ensures the data conforms to the ValueDescriptor associated with the Readings. For example, if the data from a sensor is described by its metadata profile as adhering to a "Temperature" value descriptor of floating number type, with the value between -100° F and 200° F, but the data seen in the Event and Readings is not a floating point number, for example if the data in the reading is a word such as "cold," instead of a number, then the Event is rejected (no client receives the data) and no further processing is accomplished on the Event by the Export Distro service.
* **Client Copier**--As described above, the client copier creates a "super message" made from a copy of the incoming Event message and the details for each client registration. For two clients registered for all data received, for each Event coming from Core Data, the copier produces two copies of a super message, one for each of the two clients, through all of the Export Distro pipe and filter.
* **Filter By Device**--Clients register interest in data only from certain sensors. The Filter by Device component looks at the Event in the message and looks at the devices of interest list, provided by the client registration, and filters out those messages whose Event is for devices not on the client's list of devices of interest. Therefore data generated by a motor does not go to clients only interested in data from a thermostat.
* **Filter By Value Descriptor**--Clients register interest in data from certain types of IoT objects, such as temperatures, motion, and so forth, that may come from an array of sensors or devices. The Filter by Value Descriptor assesses the data in each Event and Reading, and removes readings that have a value descriptor that is not in the list of value descriptors of interest for the client. Therefore, pressure reading data does not go to clients only interested in motion data.
* **Filter By Value Descriptor**--Clients register interest in data from certain types of IoT objects, such as temperatures, motion, and so forth, that may come from an array of sensors or devices. The Filter by Value Descriptor assesses the data in each Event and Reading, and removes readings that have a value descriptor that is not in the list of value descriptors of interest for the client. Therefore, pressure reading data does not go to clients only interested in motion data.
* **XML and JSON Transformers**--Clients must dictate, in their registration, the format of data to be delivered to them. Current options include JSON and XML formats. Transformation components. one each for each format, convert the data from the Events and Readings supplied by Core Data, into JSON or XML strings. A router looks at the client registration in the message and determines which transformer is to transform the data for the client.
* **Compressor Transformer**--A transformer component compresses the data string to be delivered to the clients, for any clients that have requested their data be compressed either by GZIP or ZIP methods.
* **Encryption Transformer**--An encryption component encrypts the data to be sent to the client, using the client provided keys and vectors.
* **REST and MQTT Adapters**--At the end of the Export Distribution pipe and filter, is a pair of components that deliver the Event and Reading data, that may have been transformed based on client request, to the client's destination of choice. The client registration provides the URL, authentication information, and so forth, to enable the adapter components to know how and where to send the data to its destination. Future endpoint adapters can be created to enable use of other protocols or destinations. A router determines which endpoint adapter to use based on the client registration information.
......@@ -10,4 +10,5 @@ EdgeX Foundry Microservices Architecture
Ch-SystemManagement
Ch-Security
Ch-ExportServices
Ch-ApplServices
Ch-DeviceServices
......@@ -11,7 +11,7 @@
Welcome to the EdgeX Foundry documentation
==================================
==========================================
.. toctree::
:maxdepth: 2
......@@ -24,11 +24,9 @@ Welcome to the EdgeX Foundry documentation
Ch-GettingStarted
Ch-Walkthrough
Ch-Examples
Ch-SecurityOverall
Ch-APIReference
Search
======
* :ref:`search`
############################
Access EdgeX REST resources
Access EdgeX REST resources
############################
When the EdgeX API Gateway is used, access to the micro service APIs must go through the reverse proxy. Requestors of an EdgeX REST endpoints must therefore change the URL they use to access the services. Example below explain how to map the non-secured micro service URLs with reverse proxy protected URLS.
To access the ping endpoint of an EdgeX micro service (using the command service as an example), the URL is http://edgex-command-service:48082/api/v1/ping
With API gateway serving as the single access point for the EdgeX services, the ping URL is https://api-gateway-server:8443/command/api/v1/ping?jwt=<JWT-Token>
Please notice that there are 4 major differences when comparing the URLs above
1. Switch from http to https as the API Gateway server enables https
2. The host address and port are switched from original micro service host address and port to a common api gateway service address and 8443 port as the api gateway server will serve as the single point for all the EdgeX services
3. Use the name of the service (in this case “command”) within the URL to indicate that the request is to be routed to the appropriate EdgeX service (command in this example)
4. Add a JWT as part of the URL as all the REST resources are protected by either OAuth2 or JWT authentication. The JWT can be obtained when a user account is created with the security API Gateway.
1. Switch from http to https as the API Gateway server enables https
2. The host address and port are switched from original micro service host address and port to a common api gateway service address and 8443 port as the api gateway server will serve as the single point for all the EdgeX services
3. Use the name of the service (in this case “command”) within the URL to indicate that the request is to be routed to the appropriate EdgeX service (command in this example)
4. Add a JWT as part of the URL as all the REST resources are protected by either OAuth2 or JWT authentication. The JWT can be obtained when a user account is created with the security API Gateway.
此差异已折叠。
#################
Security Features
#################
########
Security
########
.. image:: EdgeX_Security.png
Security elements, both inside and outside of EdgeX Foundry, protect the data and control of devices, sensors, and other IoT objects managed by EdgeX Foundry. Based on the fact that EdgeX is a “vendor-neutral open source software platform at the edge of the network”, the EdgeX security features are also built on a foundation of open interfaces and pluggable, replaceable modules.
With security service enabled, the administrator of the EdgeX would be able to initialize the security components, set up running environment for security services, manage user access control, and create JWT( JSON Web Token) for resource access for other EdgeX business services. There are two major EdgeX security components. The first is a security store, which is used to provide a safe place to keep the EdgeX secrets. The second is an API gateway, which is used as a reverse proxy to restrict access to EdgeX REST resources and perform access control related works.
Security elements, both inside and outside of EdgeX Foundry, protect the data and control of devices, sensors, and other IoT objects managed by EdgeX Foundry. Based on the fact that EdgeX is a “vendor-neutral open source software platform at the edge of the network”, the EdgeX security features are also built on a foundation of open interfaces and pluggable, replaceable modules.
With security service enabled, the administrator of the EdgeX would be able to initialize the security components, set up running environment for security services, manage user access control, and create JWT( JSON Web Token) for resource access for other EdgeX business services. There are two major EdgeX security components. The first is a security store, which is used to provide a safe place to keep the EdgeX secrets. The second is an API gateway, which is used as a reverse proxy to restrict access to EdgeX REST resources and perform access control related works.
In summary, the current features are as below:
* Secret creation, store and retrieve (password, cert, access key etc.)
......@@ -16,8 +16,7 @@ In summary, the current features are as below:
.. toctree::
:maxdepth: 1
Ch-SecretStore
Ch-APIGateway
Ch-SecretStore
Ch-APIGateway
Ch-AccessEdgeXRESTResources
Ch-StartingSecurity
......@@ -4,26 +4,27 @@ Security Issues
This page describes how to report EdgeX Foundry security issues and how they are handled.
This page describes how to report EdgeX Foundry security issues and how they are handled.
======================
Security Announcements
======================
Join the edgexfoundry-announce group at: https://groups.google.com/d/forum/edgexfoundry-announce)
Join the edgexfoundry-announce group at: https://groups.google.com/d/forum/edgexfoundry-announce)
for emails about security and major API announcements.
=======================
Vulnerability Reporting
=======================
The EdgeX Foundry Open Source Community is grateful for all security reports made by users and security researchers.
The EdgeX Foundry Open Source Community is grateful for all security reports made by users and security researchers.
All reports are thoroughly investigated by a set of community volunteers.
.. _security_issue_template: https://github.com/edgexfoundry/edgex-go/blob/master/.github/ISSUE_TEMPLATE/4-security-issue-disclosure.md.
To make a report, please email the private list: security-issues@edgexfoundry.org, providing as much detail as possible.
Use the security issue template: `security_issue_template`_.
At this time we do not yet offer an encrypted bug reporting option.
At this time we do not yet offer an encrypted bug reporting option.
When to Report a Vulnerability?
......@@ -44,17 +45,17 @@ When NOT to Report a Vulnerability?
Security Vulnerability Response
===============================
Each report is acknowledged and analyzed by Security Issue Review (SIR) team
within one week.
Each report is acknowledged and analyzed by Security Issue Review (SIR) team
within one week.
Any vulnerability information shared with SIR stays private, and is shared with
Any vulnerability information shared with SIR stays private, and is shared with
sub-projects as necessary to get the issue fixed.
As the security issue moves from triage, to identified fix, to release planning we will keep the reporter updated.
In the case of 3 rd party dependency (code or library not managed and maintained by the EdgeX community)
In the case of 3 rd party dependency (code or library not managed and maintained by the EdgeX community)
related security issues, while the issue report triggers the same response workflow, the EdgeX community will defer to
owning community for fixes.
owning community for fixes.
On receipt of a security issue report, SIR:
......@@ -70,20 +71,19 @@ On receipt of a security issue report, SIR:
6. Communicates the fix
7. Uploads a Common Vulnerabilities and Exposures (CVE) style report of the issue
7. Uploads a Common Vulnerabilities and Exposures (CVE) style report of the issue
and associated threat
The issue reporter will be kept in the loop as appropriate. Note that
The issue reporter will be kept in the loop as appropriate. Note that
a critical or high severity issue can delay a scheduled release to incorporate a fix or mitigation.
========================
Public Disclosure Timing
========================
A public disclosure date is negotiated by the EdgeX Product Security Committee and the bug submitter.
We prefer to fully disclose the bug as soon as possible AFTER a mitigation is available.
It is reasonable to delay disclosure when the bug or the fix is not yet fully understood,
the solution is not well-tested, or for vendor coordination. The timeframe for disclosure
may be immediate (especially publicly known issues) to a few weeks.
A public disclosure date is negotiated by the EdgeX Product Security Committee and the bug submitter.
We prefer to fully disclose the bug as soon as possible AFTER a mitigation is available.
It is reasonable to delay disclosure when the bug or the fix is not yet fully understood,
the solution is not well-tested, or for vendor coordination. The timeframe for disclosure
may be immediate (especially publicly known issues) to a few weeks.
The EdgeX Foundry Product Security Committee holds the final say when setting a disclosure date.
......@@ -144,8 +144,6 @@ After that, simply import "github.com/edgexfoundry/go-mod-core-contracts/clients
client.Error("some info")
}
::
Log statements will only be written to the log if they match or exceed the minimum LogLevel set in the configuration (described above). This setting can be changed on the fly without restarting the service to help with real-time troubleshooting.
Log statements are currently output in a simple key/value format. For example:
......@@ -154,32 +152,24 @@ Log statements are currently output in a simple key/value format. For example:
level=INFO ts=2019-05-16T22:23:44.424176Z app=edgex-support-notifications source=cleanup.go:32 msg="Cleaning up of notifications and transmissions"
::
Everything up to the "msg" key is handled by the logging infrastructure. You get the log level, timestamp, service name and the location in the source code of the logging statement for free with every method invocation on the LoggingClient. The "msg" key's value is the first parameter passed to one of the Logging Client methods shown above. So to extend the usage example a bit, the above calls would result in something like:
::
level=INFO ts=2019-05-16T22:23:44.424176Z app=logging-demo source=main.go:11 msg="some info"
::
You can add as many custom key/value pairs as you like by simply adding them to the method call:
::
client.Info("some info","key1","abc","key2","def")
::
This would result in:
::
level=INFO ts=2019-05-16T22:23:44.424176Z app=logging-demo source=main.go:11 msg="some info" key1=abc key2=def
::
Quotes are only put around values that contain spaces.
==================
......@@ -188,7 +178,7 @@ EdgeX Logging Keys
Within the Edgex Go reference implementation, log entries are currently written as a set of key/value pairs. We may change this later to be more of a struct type than can be formatted according to the user’s requirements (JSON, XML, system, etc). In that case, the targeted struct should contain properties that support the keys utilized by the system and described below.
+-----------------------------------------------+---------------------------------------------------------------------------------------+
| **Key** | **Intent* |
| **Key** | **Intent** |
+===============================================+=======================================================================================+
| level | Indicates the log level of the individual log entry (INFO, DEBUG, ERROR, etc) |
+-----------------------------------------------+---------------------------------------------------------------------------------------+
......@@ -215,8 +205,3 @@ Within the Edgex Go reference implementation, log entries are currently written
+-----------------------------------------------+---------------------------------------------------------------------------------------+
Additional keys can be added as need warrants. This document should be kept updated to reflect their inclusion and purpose.
......@@ -19,7 +19,7 @@ The diagram shows the high-level architecture of Alerts and Notifications. On th
On the right side, the notification receiver could be a person or an application system on Cloud or in a server room. By invoking the Subscription RESTful interface to subscribe the specific types of notifications, the receiver obtains the appropriate notifications through defined receiving channels when events occur. The receiving channels include SMS message, e-mail, REST callback, AMQP, MQTT, and so on. Currently in EdgeX Foundry, e-mail and REST callback channels are provided.
When Alerts and Notifications receive notifications from any interface, the notifications are passed to the Notifications Handler internally. The Notifications Handler persists the receiving notification first, and passes them to the Distribution Coordinator immediately if the notifications are critical (severity = “CRITICAL”). For normal notifications (severity = “NORMAL”), they wait for the Message Scheduler to process in batch.
When Alerts and Notifications receive notifications from any interface, the notifications are passed to the Notifications Handler internally. The Notifications Handler persists the receiving notification first, and passes them to the Distribution Coordinator immediately if the notifications are critical (severity = “CRITICAL”). For normal notifications (severity = “NORMAL”), they wait for the Message Scheduler to process in batch.
The Alerts and Notifications is scalable, can be expanded to add more severities and set up corresponding Message Schedulers to process them. For example, the Message Scheduler of normal severity notifications is triggered every two hours, and the minor severity notifications is triggered every 24 hours, at midnight each night.
......@@ -38,13 +38,13 @@ Data Dictionary
===============
+---------------------+--------------------------------------------------------------------------------------------+
| **Class Name** | **Descrption** |
| **Class Name** | **Descrption** |
+=====================+============================================================================================+
| Channel | The object used to describe the Notification end point. |
| Channel | The object used to describe the Notification end point. |
+---------------------+--------------------------------------------------------------------------------------------+
| Notification | The object used to describe the message and sender content of a Notification. |
| Notification | The object used to describe the message and sender content of a Notification. |
+---------------------+--------------------------------------------------------------------------------------------+
| Transmission | The object used for grouping of Notifications. |
| Transmission | The object used for grouping of Notifications. |
+---------------------+--------------------------------------------------------------------------------------------+
===============================
......@@ -67,15 +67,15 @@ When receiving a normal notification (SEVERITY = “NORMAL”), it persists firs
**Critical Resend Sequence**
When encountering any error during sending critical notification, an individual resend task is scheduled, and each transmission record persists. If the resend tasks keeps failing and the resend count exceeds the configurable limit, the escalation process is triggered. The escalated notification is sent to particular receivers of a special subscription (slug = “ESCALATION”).
When encountering any error during sending critical notification, an individual resend task is scheduled, and each transmission record persists. If the resend tasks keeps failing and the resend count exceeds the configurable limit, the escalation process is triggered. The escalated notification is sent to particular receivers of a special subscription (slug = “ESCALATION”).
.. image:: EdgeX_SupportingServicesCriticalResend.png
.. image:: EdgeX_SupportingServicesCriticalResend.png
**Resend Sequence**
For other non-critical notifications, the resend operation is triggered by a scheduler.
.. image:: EdgeX_SupportingServicesResend.png
.. image:: EdgeX_SupportingServicesResend.png
**Cleanup Sequence**
......@@ -90,7 +90,7 @@ Configuration Properties
+---------------------------------------------------------+-------------------------------------+---------------------------------------------------------------------------+
| **Configuration** | **Default Value** | **Dependencies** |
+=========================================================+=====================================+===========================================================================+
| Service MaxResultCount | 50000 \* | Read data limit per invocation |
| Service MaxResultCount | 50000 \* | Read data limit per invocation |
+---------------------------------------------------------+-------------------------------------+---------------------------------------------------------------------------+
| Service BootTimeout | 300000 \* | Heart beat time in milliseconds |
+---------------------------------------------------------+-------------------------------------+---------------------------------------------------------------------------+
......@@ -168,7 +168,7 @@ Configuration Properties
Configure Mail Server
=====================
All the properties with prefix "smtp" are for mail server configuration. Configure the mail server appropriately to send Alerts and Notifications. The correct values depend on which mail server is used.
All the properties with prefix "smtp" are for mail server configuration. Configure the mail server appropriately to send Alerts and Notifications. The correct values depend on which mail server is used.
-----
Gmail
......@@ -187,7 +187,7 @@ Then, use the following settings for the mail server properties:
Smtp Host=smtp.gmail.com
Smtp Sender=${Gmail account}
Smtp Password=${Gmail password or App password}
----------
Yahoo Mail
----------
......@@ -205,23 +205,3 @@ Then, use the following settings for the mail server properties:
Smtp Host=smtp.mail.yahoo.com
Smtp Sender=${Yahoo account}
Smtp Password=${Yahoo password or App password}
......@@ -21,10 +21,9 @@ Data Dictionary
===============
+---------------------+--------------------------------------------------------------------------------------------+
| **Class Name** | **Description** |
| **Class Name** | **Description** |
+=====================+============================================================================================+
| Interval | An object defining a specific "period" in time. |
| Interval | An object defining a specific "period" in time. |
+---------------------+--------------------------------------------------------------------------------------------+
| IntervalAction | The action taken by a Service when the Interval occurs. |
+---------------------+--------------------------------------------------------------------------------------------+
......@@ -2,7 +2,7 @@
EdgeX Demonstration API Walk Through
####################################
In order to better appreciate the EdgeX Foundry micro services (what they do and how they work), how they inter-operate with each other, and some of the more important API calls that each micro service has to offer, this demonstration API walk through shows how a device service and device are established in EdgeX, how data is sent flowing through the various services, and how data is then shipped out of EdgeX to the cloud or enterprise system.
In order to better appreciate the EdgeX Foundry micro services (what they do and how they work), how they inter-operate with each other, and some of the more important API calls that each micro service has to offer, this demonstration API walk through shows how a device service and device are established in EdgeX, how data is sent flowing through the various services, and how data is then shipped out of EdgeX to the cloud or enterprise system.
.. image:: EdgeX_WalkthroughDeployment.png
......@@ -10,7 +10,6 @@ Through this demonstration, you will play the part of various EdgeX micro servic
.. toctree::
:maxdepth: 1
:numbered:
:caption: Contents:
Ch-WalkthroughSetup
......@@ -25,5 +24,3 @@ Through this demonstration, you will play the part of various EdgeX micro servic
Ch-WalkthroughExporting
Next `Setup your environment 〉 <Ch-WalkthroughSetup.html>`_
......@@ -18,7 +18,6 @@ import (
"strconv"
"github.com/edgexfoundry/edgex-go/internal/pkg/db"
dataBase "github.com/edgexfoundry/edgex-go/internal/pkg/db"
contract "github.com/edgexfoundry/go-mod-core-contracts/models"
"github.com/gomodule/redigo/redis"
"github.com/google/uuid"
......@@ -256,15 +255,7 @@ func (c *Client) GetAllDevices() ([]contract.Device, error) {
}
func (c *Client) GetDevicesByProfileId(id string) ([]contract.Device, error) {
d, err := c.getDevicesByValue(db.Device + ":profile:" + id)
// XXX This is here only because test/db_metadata.go is inconsistent when testing for _not found_. It
// should always be checking for database.ErrNotFound but too often it is checking for nil
if len(d) == 0 {
err = dataBase.ErrNotFound
}
return d, err
return c.getDevicesByValue(db.Device + ":profile:" + id)
}
func (c *Client) GetDeviceById(id string) (contract.Device, error) {
......@@ -286,15 +277,7 @@ func (c *Client) GetDeviceByName(n string) (contract.Device, error) {
}
func (c *Client) GetDevicesByServiceId(id string) ([]contract.Device, error) {
d, err := c.getDevicesByValue(db.Device + ":service:" + id)
// XXX This is here only because test/db_metadata.go is inconsistent when testing for _not found_. It
// should always be checking for database.ErrNotFound but too often it is checking for nil
if len(d) == 0 {
err = dataBase.ErrNotFound
}
return d, err
return c.getDevicesByValue(db.Device + ":service:" + id)
}
func (c *Client) GetDevicesWithLabel(l string) ([]contract.Device, error) {
......@@ -439,15 +422,7 @@ func (c *Client) GetDeviceProfileByName(n string) (contract.DeviceProfile, error
}
func (c *Client) GetDeviceProfilesByCommandId(id string) ([]contract.DeviceProfile, error) {
dp, err := c.getDeviceProfilesByValues(db.DeviceProfile + ":command:" + id)
// XXX This is here only because test/db_metadata.go is inconsistent when testing for _not found_. It
// should always be checking for database.ErrNotFound but too often it is checking for nil
if len(dp) == 0 {
err = dataBase.ErrNotFound
}
return dp, err
return c.getDeviceProfilesByValues(db.DeviceProfile + ":command:" + id)
}
// Get device profiles with the passed query
......@@ -798,13 +773,6 @@ func (c *Client) GetDeviceServicesByAddressableId(id string) ([]contract.DeviceS
return []contract.DeviceService{}, err
}
// XXX This should really return an ErrNotFound. It's not to be consistent with existing code
// assumptions
//
// if len(objects) == 0 {
// return []contract.DeviceService{}, dataBase.ErrNotFound
// }
d := make([]contract.DeviceService, len(objects))
for i, object := range objects {
err = unmarshalDeviceService(object, &d[i])
......@@ -988,27 +956,11 @@ func (c *Client) GetProvisionWatchersByIdentifier(k string, v string) (pw []cont
}
func (c *Client) GetProvisionWatchersByServiceId(id string) ([]contract.ProvisionWatcher, error) {
pw, err := c.getProvisionWatchersByValue(db.ProvisionWatcher + ":service:" + id)
// XXX This is here only because test/db_metadata.go is inconsistent when testing for _not found_. It
// should always be checking for database.ErrNotFound but too often it is checking for nil
if len(pw) == 0 {
err = dataBase.ErrNotFound
}
return pw, err
return c.getProvisionWatchersByValue(db.ProvisionWatcher + ":service:" + id)
}
func (c *Client) GetProvisionWatchersByProfileId(id string) ([]contract.ProvisionWatcher, error) {
pw, err := c.getProvisionWatchersByValue(db.ProvisionWatcher + ":profile:" + id)
// XXX This is here only because test/db_metadata.go is inconsistent when testing for _not found_. It
// should always be checking for database.ErrNotFound but too often it is checking for nil
if len(pw) == 0 {
err = dataBase.ErrNotFound
}
return pw, err
return c.getProvisionWatchersByValue(db.ProvisionWatcher + ":profile:" + id)
}
func (c *Client) GetProvisionWatcherById(id string) (contract.ProvisionWatcher, error) {
......
......@@ -877,7 +877,7 @@ func testDBDeviceProfile(t *testing.T, db interfaces.DBClient) {
}
deviceProfiles, err = db.GetDeviceProfilesByCommandId(uuid.New().String())
if err != dataBase.ErrNotFound {
if (err != nil && err != dataBase.ErrNotFound) || len(deviceProfiles) != 0 {
t.Fatalf("Error getting deviceProfiles %v", err)
}
if len(deviceProfiles) != 0 {
......@@ -972,7 +972,7 @@ func testDBDevice(t *testing.T, db interfaces.DBClient) {
}
devices, err = db.GetDevicesByProfileId(uuid.New().String())
if err != dataBase.ErrNotFound {
if (err != nil && err != dataBase.ErrNotFound) || len(devices) != 0 {
t.Fatalf("Error getting devices %v", err)
}
if len(devices) != 0 {
......@@ -988,7 +988,7 @@ func testDBDevice(t *testing.T, db interfaces.DBClient) {
}
devices, err = db.GetDevicesByServiceId(uuid.New().String())
if err != dataBase.ErrNotFound {
if (err != nil && err != dataBase.ErrNotFound) || len(devices) != 0 {
t.Fatalf("Error getting devices %v", err)
}
if len(devices) != 0 {
......@@ -1095,7 +1095,7 @@ func testDBProvisionWatcher(t *testing.T, db interfaces.DBClient) {
}
provisionWatchers, err = db.GetProvisionWatchersByServiceId(uuid.New().String())
if err != dataBase.ErrNotFound {
if (err != nil && err != dataBase.ErrNotFound) || len(provisionWatchers) != 0 {
t.Fatalf("Error getting provisionWatchers %v", err)
}
if len(provisionWatchers) != 0 {
......@@ -1111,7 +1111,7 @@ func testDBProvisionWatcher(t *testing.T, db interfaces.DBClient) {
}
provisionWatchers, err = db.GetProvisionWatchersByProfileId(uuid.New().String())
if err != dataBase.ErrNotFound {
if (err != nil && err != dataBase.ErrNotFound) || len(provisionWatchers) != 0 {
t.Fatalf("Error getting provisionWatchers %v", err)
}
if len(provisionWatchers) != 0 {
......
......@@ -290,7 +290,7 @@ func notificationByStartEndHandler(w http.ResponseWriter, r *http.Request) {
LoggingClient.Error(err.Error())
return
}
encodeWithUTF8(n, w)
encode(n, w)
}
}
......
......@@ -47,7 +47,7 @@ func subscriptionHandler(w http.ResponseWriter, r *http.Request) {
http.Error(w, err.Error(), http.StatusServiceUnavailable)
return
}
encodeWithUTF8(subscriptions, w)
encode(subscriptions, w)
// Modify (an existing) subscription
case http.MethodPut:
......@@ -172,11 +172,11 @@ func subscriptionsBySlugHandler(w http.ResponseWriter, r *http.Request) {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
LoggingClient.Error(err.Error())
encodeWithUTF8(s, w)
encode(s, w)
return
}
encodeWithUTF8(s, w)
encode(s, w)
case http.MethodDelete:
_, err := dbClient.GetSubscriptionBySlug(slug)
if err != nil {
......@@ -225,7 +225,7 @@ func subscriptionsByCategoriesHandler(w http.ResponseWriter, r *http.Request) {
return
}
encodeWithUTF8(s, w)
encode(s, w)
}
}
......@@ -308,6 +308,6 @@ func subscriptionsByReceiverHandler(w http.ResponseWriter, r *http.Request) {
LoggingClient.Error(err.Error())
return
}
encodeWithUTF8(s, w)
encode(s, w)
}
}
......@@ -38,20 +38,6 @@ func encode(i interface{}, w http.ResponseWriter) {
}
}
// Another helper function for encoding responses (with UTF-8) for returning from REST calls
func encodeWithUTF8(i interface{}, w http.ResponseWriter) {
w.Header().Add("Content-Type", "application/json;charset=utf-8")
enc := json.NewEncoder(w)
err := enc.Encode(i)
// Problems encoding
if err != nil {
LoggingClient.Error("Error encoding the data: " + err.Error())
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
}
// Printing function purely for debugging purposes
// Print the body of a request to the console
func printBody(r io.ReadCloser) {
......
......@@ -159,3 +159,15 @@ for svc in export-distro export-client support-notifications support-scheduler s
# stop them afterwards
snapctl stop --disable "$SNAP_NAME.$svc"
done
# if we are on arm64, disable the security-api-gateway because kong isn't
# properly supported on arm64 due to incorrect memory pointers used by lua and
# openresty
# see https://github.com/edgexfoundry/blackbox-testing/issues/185 for more
# details
if [ "$SNAP_ARCH" == "arm64" ]; then
snapctl set security-api-gateway=off
snapctl stop --disable "$SNAP_NAME.cassandra"
snapctl stop --disable "$SNAP_NAME.kong-daemon"
snapctl stop --disable "$SNAP_NAME.edgexproxy"
fi
#!/bin/bash -e
# this is a workaround to prevent kong from running on arm64 until kong
# upstream supports running on arm64 properly, see
# https://github.com/edgexfoundry/blackbox-testing/issues/185 for more details
# also note that we disable kong from the install hook, but that is only
# valid on first install, any refreshes will trigger it to be restarted due to
# https://bugs.launchpad.net/snapd/+bug/1818306 , hence this workaround
if [ "$SNAP_ARCH" = "arm64" ]; then
exit 0
fi
# the kong wrapper script from $SNAP
export KONG_SNAP="$SNAP/bin/kong-wrapper.sh"
......@@ -8,7 +18,7 @@ export KONG_SNAP="$SNAP/bin/kong-wrapper.sh"
# and in this case we should just loop and keep trying
# we don't implement a timeout here because systemd will kill us if we
# don't succeed in 15 minutes (or whatever the configured stop-timeout is)
until $KONG_SNAP migrations up --yes --conf "$KONG_CONF"; do
until $KONG_SNAP migrations bootstrap --conf "$KONG_CONF"; do
sleep 5
done
......
此差异已折叠。