RGB Camera

Returns camera frames which can be used for color recognition and victim detection

This page is under construction.

Basics

The camera sensor returns a 3D matrix/list, representing a 2D image of pixels, with each pixel comprised of RGBA color channels. Each channel can be in the range of 0 to 255, with (255,255,255) being white, and (0, 0, 0) being black. It is mainly used in the following ways:

  • As a RGB color sensor to detect environment colors, like the color of the floor (In this case, it may be implemented as a 1px by 1px camera).
  • As a regular camera, which returns frames that can be used with image processing libraries for victim or hazard map detection.

There are 4 basic steps to using this sensor:

  1. Start by importing or including the appropriate class for the camera.
  2. Retrieve the sensor by name from the robot.
  3. Enable the sensor by passing in the update rate of the sensor. (Usually the timestep)
  4. Finally, retrieve values from the sensor.
   
 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
from controller import Robot, Camera # Step 1: Import Camera

robot = Robot()

colorSensor = robot.getDevice("colour_sensor") # Step 2: Retrieve the sensor, named "colour_sensor", from the robot. Note that the sensor name may differ between robots)

timestep = int(robot.getBasicTimeStep())

colorSensor.enable(timestep) # Step 3: Enable the sensor, using the timestep as the update rate

while robot.step(timestep) != -1:
    
    image = colorSensor.getImage() # Step 4: Retrieve the image frame.
    
    # Get the individual RGB color channels from the pixel (0,0)
    # Note that these functions require you to pass the width of the overall image in pixels.
    # Since this is a 1px by 1px color sensor, the width of the image is just 1. 
    r = colorSensor.imageGetRed(image, 1, 0, 0)
    g = colorSensor.imageGetGreen(image, 1, 0, 0)
    b = colorSensor.imageGetBlue(image, 1, 0, 0)
    
    print("r: " + str(r) + " g: " + str(g) + " b: " + str(b))
 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
#include <iostream>

#include <webots/Robot.hpp>
#include <webots/Camera.hpp> // Step 1: Include Camera header

using namespace webots;
using namespace std;

int main(int argc, char **argv) {

  Robot *robot = new Robot();
  
  Camera* colorSensor = robot->getCamera("colour_sensor"); // Step 2: Retrieve the sensor, named "colour_sensor", from the robot. Note that the sensor name may differ between robots.

  int timeStep = (int)robot->getBasicTimeStep();
  
  colorSensor->enable(timeStep); // Step 3: Enable the sensor, using the timestep as the update rate

  robot->step(timeStep);
  
  while (robot->step(timeStep) != -1) {
  
    const unsigned char* image = colorSensor->getImage(); // Step 4: Retrieve the image frame.
    
    // Get the individual RGB color channels from the pixel (0,0)
    // Note that these functions require you to pass the width of the overall image in pixels.
    // Since this is a 1px by 1px color sensor, the width of the image is just 1. 
    int r = colorSensor->imageGetRed(image, 1, 0, 0);
    int g = colorSensor->imageGetGreen(image, 1, 0, 0);
    int b = colorSensor->imageGetBlue(image, 1, 0, 0);
    
    cout << "r: " << r << " g: " << g << " b: " << b << endl;
  }
    
  delete robot;
  return 0;
}

Note: The dimensions of the frame, in pixels, can be found with the getWidth() and getHeight functions:

   
1
2
height = colorSensor.getHeight();
width = colorSensor.getWidth();
1
2
int height = colorSensor->getHeight();
int width = colorSensor->getWidth();

Example

This example demonstrates techniques to interface the camera frames from Erebus with third-party libraries to perform advanced image processing. The frame is fed into a data format usable by the OpenCV library, which then grayscales and displays the modified image.

   
 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
from controller import Robot, Camera
import cv2 # Include OpenCV library
import numpy as np # For python, include numpy as well

robot = Robot()

camera = robot.getDevice("cameraName")

timestep = int(robot.getBasicTimeStep())

camera.enable(timestep)

while robot.step(timestep) != -1:
    
     # Convert the camera image into an openCV Mat. You must pass in the height
     # and width of the camera in pixels, as well as specify CV_8UC4, meaning that
     # it is in RGBA format

    image = camera.getImage()
    image = np.frombuffer(image, np.uint8).reshape((camera.getHeight(), camera.getWidth(), 4))
    frame = cv2.cvtColor(image, cv2.COLOR_BGRA2BGR)
    
    cv2.imshow("frame", frame)
    
    frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # Grayscale
    cv2.imshow("grayScale", frame)
    
    cv2.threshold(frame, 80, 255, cv2.THRESH_BINARY) # Threshold
    cv2.imshow("thresh", frame)
    
    cv2.waitKey(1) # Render imshows on screen
 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
#include <opencv2/opencv.hpp> // Include OpenCV library
using namespace cv;

#include <webots/Robot.hpp>
#include <webots/Camera.hpp>

using namespace webots;

int main(int argc, char **argv) {

  Robot *robot = new Robot();

  Camera *cam = robot->getCamera("cameraName");

  int timeStep = (int)robot->getBasicTimeStep();
 
  cam->enable(timeStep);
 
  while (robot->step(timeStep) != -1) {

    /*
     * Convert the camera image into an openCV Mat. You must pass in the height
     * and width of the camera in pixels, as well as specify CV_8UC4, meaning that
     * it is in RGBA format.
     */
    Mat frame(cam->getHeight(), cam->getWidth(), CV_8UC4, (void*)cam->getImage());
    
    imshow("frame", frame);
    
    cvtColor(frame,frame,COLOR_BGR2GRAY); // grayscale
    imshow("grayScale", frame);
    
    threshold(frame,frame,80,255,THRESH_BINARY); // threshold
    imshow("thresh", frame);
    
    waitKey(1); // Render imshows on screen
  };

  delete robot;
  return 0;
}