Something you realise quite early on when working with the Kinect Fusion component in the Microsoft Kinect SDK is that it’s really meant to integrate into a 2D UI: it takes care of rendering the volume that’s being mapped, and you simply have to integrate the generated bitmap somewhere into your app’s UI. The primary benefit of this approach is its low latency: it all happens very quickly and the only data you need to move from the runtime into your app is the bitmap itself.
With 3D systems such as AutoCAD, though, you really want to take the 3D data directly and integrate it into your 3D scene in some way, such as via a jig.
I haven’t been very happy with the approach I’ve so far taken to do this: I’ve been asking the Kinect Fusion runtime to calculate a mesh for a reconstruction volume and have then used its vertices to get point (XYZ) – and now colour (RGB) – data. This has always felt like a bit of a longwinded way of doing things, as we’re throwing away a bunch of data – we’re only using the vertices, not the triangles – but that’s what I was able to get working.
The reason for doing this is simple: AutoCAD is not architected to deal with huge, dynamically generated meshes (we have other products such as Maya and now Project Memento that are much better at this), so it’s a deliberate choice to just take the points.
This “point cloud generation” is then (hopefully) happening multiple times a second as we execute our jig before we then perform a more detailed query of the volume’s data to generate the final, full-colour point cloud.
Back when I first started working with Kinect Fusion, I identified this need to pass via a mesh object as a potential issue and raised it with the Kinect for Windows team. They very kindly went ahead and implemented a new API named ExportVolumeBlock(), which provided more direct access to the volume’s low-level data. You basically create an appropriately sized array of shorts that the method populates with the volume’s voxel data.
When I first worked with this API I had trouble getting my head around it (the documentation on MSDN seemed to require a better understanding of voxels – in particular – than I had at the time), and so I eventually gave up and stuck with my existing approach of generating meshes.
This week I decided that in order to do my AU talk on Kinect Fusion justice, I really wanted to get this working and be able to compare the performance of these two approaches.
So that’s what I did: I finally worked out how to find out which of the voxels in the volume array was “on” (i.e. a point contributing to the point cloud) and then use that as an alternative approach for generating points from the Kinect Fusion reconstruction volume. This approach seemed to have some advantages, such as being able to leave the array allocated rather than reusing it the whole time, so I was hopeful we’d see some performance gains.
In order to help test this out, I modified my jig with the approach shown in this previous post to display FPS (frames per second) information during execution, as well as implementing some additional UI to choose which of these two approaches to use.
Here’s s snapshot of the jig in action with the FPS at the bottom-left corner of the screen:
It’s actually a handy tool for tweaking the various settings and bringing the FPS count up, which can help a great deal with the tracking between frames.
So how did it work out?
I ended up discovering that using the mesh calculation approach is actually pretty comparable to using ExportVolumeBlock(), performance-wise. It’s possible that using a parallel loop to crank through the results of the latter might help speed things up, but with a standard sequential loop the results are comparable. I’m happy to have the code working, nonetheless.
And just the act of bringing the frame rate up allows you to do pretty cool captures – here’s a dining room chair, as an example (which resulted in a 33MB, 1.7 million point PCG file).
Here’s the C# code allowing you to experiment with these two approaches:
using System;
using System.Collections.Generic;
using System.Collections.ObjectModel;
using System.Diagnostics;
using System.Drawing;
using System.Threading;
using System.Threading.Tasks;
using System.Windows.Threading;
using Autodesk.AutoCAD.EditorInput;
using Autodesk.AutoCAD.Geometry;
using Autodesk.AutoCAD.Runtime;
using Microsoft.Kinect;
using Microsoft.Kinect.Toolkit.Fusion;
using Autodesk.AutoCAD.GraphicsInterface;
#pragma warning disable 162
namespace KinectSamples
{
public static class ColorUtils
{
public static Point3dCollection
Point3dFromColoredPointCollection(
IList<ColoredPoint3d> vecs
)
{
var pts = new Point3dCollection();
foreach (var vec in vecs)
{
pts.Add(new Point3d(vec.X, vec.Y, vec.Z));
}
return pts;
}
public static Point3dCollection
Point3dFromVertCollection(
ReadOnlyCollection<Vector3> vecs
)
{
var pts = new Point3dCollection();
foreach (var vec in vecs)
{
pts.Add(new Point3d(vec.X, vec.Z, -vec.Y));
}
return pts;
}
public static List<ColoredPoint3d>
ColoredPoint3FromVertCollection(
ReadOnlyCollection<Vector3> vecs, ReadOnlyCollection<int> cols
)
{
Debug.Assert(vecs.Count == cols.Count);
var pts = new List<ColoredPoint3d>();
for(int i=0; i < vecs.Count; i++)
{
var vec = vecs[i];
var col = cols[i];
pts.Add(
new ColoredPoint3d(
vec.X, vec.Z, -vec.Y,
(col >> 16) & 255,
(col >> 8) & 255,
col & 255
)
);
}
return pts;
}
}
public class KinectFusionColorJig : KinectPointCloudJig
{
// Constants
private const int MaxTrackingErrors = 100;
private const ReconstructionProcessor ProcessorType =
ReconstructionProcessor.Amp;
private const int DeviceToUse = -1;
private const int ColorIntegrationInterval = 2;
private const bool AutoResetReconstructionWhenLost = true;
private const DepthImageFormat DepthFormat =
DepthImageFormat.Resolution640x480Fps30;
private const ColorImageFormat ColorFormat =
ColorImageFormat.RgbResolution640x480Fps30;
// Member variables
private Editor _ed;
private SynchronizationContext _ctxt;
private double _roomWidth;
private double _roomLength;
private double _roomHeight;
private int _lowResStep;
private bool _useMesh;
private int _voxelsPerMeter;
private FusionFloatImageFrame _depthFloatBuffer;
private Matrix4 _worldToCameraTransform;
private Matrix4 _defaultWorldToVolumeTransform;
private ColorReconstruction _volume;
private int _processedFrameCount;
private bool _lastTrackingAttemptSucceeded;
private int _trackingErrors;
private int _frameDataLength;
private bool _processing;
private bool _translateResetPoseByMinDepthThreshold = true;
private float _minDepthClip =
FusionDepthProcessor.DefaultMinimumDepth;
private float _maxDepthClip =
FusionDepthProcessor.DefaultMaximumDepth;
private DepthImagePixel[] _depthImagePixels;
private byte[] _colorImagePixels;
private FusionColorImageFrame _mappedColorFrame;
private ColorImagePoint[] _colorCoordinates;
private int[] _mappedColorPixels;
private CoordinateMapper _mapper;
private int _depthWidth = 0;
private int _depthHeight = 0;
private int _colorWidth = 0;
private int _colorHeight = 0;
private short[] _voxels = null;
private TextStyle _style;
// FPS tracking stuff
private const int FpsInterval = 2;
private DispatcherTimer _fpsTimer;
private DateTime _lastFPSTimestamp;
private double _currentFps;
// Constructor
public KinectFusionColorJig(
Editor ed, SynchronizationContext ctxt,
double width, double length, double height, int vpm, int step,
bool useMesh
)
{
_ed = ed;
_ctxt = ctxt;
_roomWidth = width;
_roomLength = length;
_roomHeight = height;
_voxelsPerMeter = vpm;
_lowResStep = step;
_useMesh = useMesh;
_processing = false;
_lastTrackingAttemptSucceeded = true;
_vecs = new List<ColoredPoint3d>();
}
private void PostToAutoCAD(SendOrPostCallback cb)
{
_ctxt.Post(cb, null);
System.Windows.Forms.Application.DoEvents();
}
// Get the depth image size from the input depth image format.
private static Size GetImageSize(DepthImageFormat imageFormat)
{
switch (imageFormat)
{
case DepthImageFormat.Resolution320x240Fps30:
return new Size(320, 240);
case DepthImageFormat.Resolution640x480Fps30:
return new Size(640, 480);
case DepthImageFormat.Resolution80x60Fps30:
return new Size(80, 60);
}
throw new ArgumentOutOfRangeException("imageFormat");
}
// Get the color image size from the input color image format.
private static Size GetImageSize(ColorImageFormat imageFormat)
{
switch (imageFormat)
{
case ColorImageFormat.RgbResolution640x480Fps30:
return new Size(640, 480);
case ColorImageFormat.RgbResolution1280x960Fps12:
return new Size(1280, 960);
case ColorImageFormat.InfraredResolution640x480Fps30:
return new Size(640, 480);
case ColorImageFormat.RawBayerResolution1280x960Fps12:
return new Size(1280, 960);
case ColorImageFormat.RawBayerResolution640x480Fps30:
return new Size(640, 480);
case ColorImageFormat.RawYuvResolution640x480Fps15:
return new Size(640, 480);
case ColorImageFormat.YuvResolution640x480Fps15:
return new Size(640, 480);
}
throw new ArgumentOutOfRangeException("imageFormat");
}
public override bool StartSensor()
{
if (_kinect != null)
{
_kinect.Start();
_kinect.ElevationAngle = 0;
_kinect.Stop();
Size depthImageSize = GetImageSize(DepthFormat);
_depthWidth = (int)depthImageSize.Width;
_depthHeight = (int)depthImageSize.Height;
Size colorImageSize = GetImageSize(ColorFormat);
_colorWidth = (int)colorImageSize.Width;
_colorHeight = (int)colorImageSize.Height;
_kinect.DepthStream.Enable(DepthFormat);
_kinect.ColorStream.Enable(ColorFormat);
_frameDataLength = _kinect.DepthStream.FramePixelDataLength;
_depthImagePixels = new DepthImagePixel[_frameDataLength];
try
{
// Allocate a volume
var volParam =
new ReconstructionParameters(
_voxelsPerMeter,
(int)(_voxelsPerMeter * _roomWidth),
(int)(_voxelsPerMeter * _roomHeight),
(int)(_voxelsPerMeter * _roomLength)
);
_worldToCameraTransform = Matrix4.Identity;
_volume =
ColorReconstruction.FusionCreateReconstruction(
volParam, ProcessorType, DeviceToUse,
_worldToCameraTransform
);
_defaultWorldToVolumeTransform =
_volume.GetCurrentWorldToVolumeTransform();
if (_translateResetPoseByMinDepthThreshold)
{
ResetReconstruction();
}
}
catch (InvalidOperationException ex)
{
_ed.WriteMessage("Invalid operation: " + ex.Message);
return false;
}
catch (DllNotFoundException ex)
{
_ed.WriteMessage("DLL not found: " + ex.Message);
return false;
}
catch (ArgumentException ex)
{
_ed.WriteMessage("Invalid argument: " + ex.Message);
return false;
}
catch (OutOfMemoryException ex)
{
_ed.WriteMessage("Out of memory: " + ex.Message);
return false;
}
_depthFloatBuffer =
new FusionFloatImageFrame(_depthWidth, _depthHeight);
_mappedColorFrame =
new FusionColorImageFrame(_depthWidth, _depthHeight);
int depthImageArraySize = _depthWidth * _depthHeight;
int colorImageArraySize =
_colorWidth * _colorHeight * sizeof(int);
_depthImagePixels =
new DepthImagePixel[depthImageArraySize];
_colorImagePixels = new byte[colorImageArraySize];
_colorCoordinates =
new ColorImagePoint[depthImageArraySize];
_mappedColorPixels = new int[depthImageArraySize];
var res = _voxelsPerMeter / _lowResStep;
var destResX = (int)(_roomWidth * res);
var destResY = (int)(_roomHeight * res);
var destResZ = (int)(_roomLength * res);
var destRes = destResX * destResY * destResZ;
_voxels = new short[destRes];
// Initialize and start the FPS timer
_fpsTimer = new DispatcherTimer();
_fpsTimer.Tick += new EventHandler(FpsTimerTick);
_fpsTimer.Interval = new TimeSpan(0, 0, FpsInterval);
_fpsTimer.Start();
_lastFPSTimestamp = DateTime.UtcNow;
// Text style for our jig
_style = new TextStyle();
_style.Font =
new FontDescriptor("Calibri", false, false, 0, 0);
_style.TextSize = 14;
_kinect.Start();
return true;
}
_ed.WriteMessage(
"\nUnable to start Kinect sensor - " +
"are you sure it's plugged in?"
);
return false;
}
// Handler for FPS timer tick
private void FpsTimerTick(object sender, EventArgs e)
{
// Calculate time span from last calculation of FPS
double intervalSeconds =
(DateTime.UtcNow - _lastFPSTimestamp).TotalSeconds;
// Calculate and show fps on status bar
_currentFps =
(double)(_processedFrameCount / intervalSeconds);
// Reset frame counter
_processedFrameCount = 0;
_lastFPSTimestamp = DateTime.UtcNow;
}
override public void OnAllFramesReady(
object sender, AllFramesReadyEventArgs e
)
{
if (!_processing && !_finished)
{
using (var colorFrame = e.OpenColorImageFrame())
{
if (null != colorFrame)
{
// Copy color pixels from the image to a buffer
colorFrame.CopyPixelDataTo(_colorImagePixels);
}
}
using (var depthFrame = e.OpenDepthImageFrame())
{
if (depthFrame != null)
{
// Copy depth pixels to buffer
depthFrame.CopyDepthImagePixelDataTo(_depthImagePixels);
// Stop other processing from happening until the
// background processing of this frame has completed
_processing = true;
// Process on a background thread
Dispatcher.CurrentDispatcher.BeginInvoke(
DispatcherPriority.Background,
(Action)(() => ProcessDepthData())
);
}
}
}
}
// Process the depth input
private void ProcessDepthData()
{
if (_finished)
return;
try
{
// Convert the depth image frame to depth float image frame
_volume.DepthToDepthFloatFrame(
_depthImagePixels,
_depthFloatBuffer,
FusionDepthProcessor.DefaultMinimumDepth,
FusionDepthProcessor.DefaultMaximumDepth,
false
);
bool trackingSucceeded = false;
bool integrateColor =
_processedFrameCount % ColorIntegrationInterval == 0;
if (integrateColor)
{
MapColorToDepth();
trackingSucceeded =
_volume.ProcessFrame(
_depthFloatBuffer,
_mappedColorFrame,
FusionDepthProcessor.DefaultAlignIterationCount,
FusionDepthProcessor.DefaultIntegrationWeight,
FusionDepthProcessor.DefaultColorIntegrationOfAllAngles,
_volume.GetCurrentWorldToCameraTransform()
);
}
else
{
trackingSucceeded =
_volume.ProcessFrame(
_depthFloatBuffer,
FusionDepthProcessor.DefaultAlignIterationCount,
FusionDepthProcessor.DefaultIntegrationWeight,
_volume.GetCurrentWorldToCameraTransform()
);
}
if (!trackingSucceeded)
{
_trackingErrors++;
PostToAutoCAD(
a =>
{
_ed.WriteMessage(
"\nTracking failure. Keep calm and carry on."
);
if (AutoResetReconstructionWhenLost)
{
_ed.WriteMessage(
" ({0}/{1})",
_trackingErrors, MaxTrackingErrors
);
}
else
{
_ed.WriteMessage(" {0}", _trackingErrors);
}
}
);
}
else
{
if (!_lastTrackingAttemptSucceeded)
{
PostToAutoCAD(
a => _ed.WriteMessage("\nWe're back on track!")
);
}
// Set the camera pose and reset tracking errors
_worldToCameraTransform =
_volume.GetCurrentWorldToCameraTransform();
_trackingErrors = 0;
}
_lastTrackingAttemptSucceeded = trackingSucceeded;
if (
AutoResetReconstructionWhenLost &&
!trackingSucceeded &&
_trackingErrors >= MaxTrackingErrors
)
{
PostToAutoCAD(
a =>
{
_ed.WriteMessage(
"\nReached error threshold: automatically resetting."
);
_vecs.Clear();
}
);
Console.Beep();
ResetReconstruction();
}
_points =
_useMesh ? GetPointCloud(true) : GetPointCloud2(true);
++_processedFrameCount;
}
catch (InvalidOperationException ex)
{
PostToAutoCAD(
a =>
{
_ed.WriteMessage(
"\nInvalid operation: {0}", ex.Message
);
}
);
}
finally
{
// We can now let other processing happen
_processing = false;
}
}
// Process the color and depth inputs, converting the color into
// the depth space
private unsafe void MapColorToDepth()
{
if (null == _mapper)
{
// Create a coordinate mapper
_mapper = new CoordinateMapper(_kinect);
}
_mapper.MapDepthFrameToColorFrame(
DepthFormat,
_depthImagePixels,
ColorFormat,
_colorCoordinates
);
// Here we make use of unsafe code to just copy the whole
// pixel as an int for performance reasons, as we do
// not need access to the individual rgba components
fixed (byte* ptrColorPixels = _colorImagePixels)
{
int* rawColorPixels = (int*)ptrColorPixels;
// Horizontal flip the color image as the standard depth
// image is flipped internally in Kinect Fusion
// to give a viewpoint as though from behind the Kinect
// looking forward by default
Parallel.For(
0,
_depthHeight,
y =>
{
int destIndex = y * _depthWidth;
int flippedDestIndex = destIndex + (_depthWidth - 1);
for (
int x = 0;
x < _depthWidth;
++x, ++destIndex, --flippedDestIndex
)
{
// Calculate index into depth array
int colorInDepthX = _colorCoordinates[destIndex].X;
int colorInDepthY = _colorCoordinates[destIndex].Y;
// Make sure the depth pixel maps to a valid point
// in color space
if (
colorInDepthX >= 0 &&
colorInDepthX < _colorWidth &&
colorInDepthY >= 0 &&
colorInDepthY < _colorHeight &&
_depthImagePixels[destIndex].Depth != 0
)
{
// Calculate index into color array- this will
// perform a horizontal flip as well
int sourceColorIndex =
colorInDepthX + (colorInDepthY * _colorWidth);
// Copy color pixel
_mappedColorPixels[flippedDestIndex] =
rawColorPixels[sourceColorIndex];
}
else
{
_mappedColorPixels[flippedDestIndex] = 0;
}
}
}
);
}
_mappedColorFrame.CopyPixelDataFrom(_mappedColorPixels);
}
// Reset the reconstruction to initial value
private void ResetReconstruction()
{
// Reset tracking error counter
_trackingErrors = 0;
// Set the world-view transform to identity, so the world
// origin is the initial camera location.
_worldToCameraTransform = Matrix4.Identity;
if (_volume != null)
{
// Translate the reconstruction volume location away from
// the world origin by an amount equal to the minimum depth
// threshold. This ensures that some depth signal falls
// inside the volume. If set false, the default world origin
// is set to the center of the front face of the volume,
// which has the effect of locating the volume directly in
// front of the initial camera position with the +Z axis
// into the volume along the initial camera direction of
// view.
if (_translateResetPoseByMinDepthThreshold)
{
Matrix4 worldToVolumeTransform =
_defaultWorldToVolumeTransform;
// Translate the volume in the Z axis by the
// minDepthThreshold distance
float minDist =
(_minDepthClip < _maxDepthClip) ?
_minDepthClip :
_maxDepthClip;
worldToVolumeTransform.M43 -= minDist * _voxelsPerMeter;
_volume.ResetReconstruction(
_worldToCameraTransform, worldToVolumeTransform
);
}
else
{
_volume.ResetReconstruction(_worldToCameraTransform);
}
}
}
protected override SamplerStatus SamplerData()
{
if (_vecs.Count > 0)
{
_points.Clear();
foreach (var vec in _vecs)
{
_points.Add(
new Point3d(vec.X, vec.Y, vec.Z)
);
}
}
ForceMessage();
return SamplerStatus.OK;
}
protected override bool WorldDrawData(WorldDraw draw)
{
var wg = draw.Geometry;
// Push our transforms onto the stack
wg.PushOrientationTransform(
OrientationBehavior.Screen
);
wg.PushPositionTransform(
PositionBehavior.Screen,
new Point2d(30, 30)
);
// Draw our screen-fixed text
wg.Text(
new Point3d(0, 0, 0), // Position
new Vector3d(0, 0, 1), // Normal
new Vector3d(1, 0, 0), // Direction
String.Format("{0:F1} FPS", _currentFps), // Text
true, // Rawness
_style // TextStyle
);
// Remember to pop our transforms off the stack
wg.PopModelTransform();
wg.PopModelTransform();
return base.WorldDrawData(draw);
}
public override void AttachHandlers()
{
// Attach the event handlers
if (_kinect != null)
{
_kinect.AllFramesReady +=
new EventHandler<AllFramesReadyEventArgs>(
OnAllFramesReady
);
}
}
public override void RemoveHandlers()
{
// Detach the event handlers
if (_kinect != null)
{
_kinect.AllFramesReady -=
new EventHandler<AllFramesReadyEventArgs>(
OnAllFramesReady
);
}
}
public ColorMesh GetMesh()
{
return _volume.CalculateMesh(1);
}
// Get a point cloud from the vertices of a mesh
// (would be better to access the volume info directly)
public Point3dCollection GetPointCloud(bool lowRes = false)
{
using (var m = _volume.CalculateMesh(lowRes ? _lowResStep : 1))
{
return
m != null ?
ColorUtils.Point3dFromVertCollection(
m.GetVertices()
) :
new Point3dCollection();
}
}
public List<ColoredPoint3d> GetColoredPointCloud(int step)
{
using (var m = _volume.CalculateMesh(step))
{
return ColorUtils.ColoredPoint3FromVertCollection(
m.GetVertices(), m.GetColors()
);
}
}
// Get a point cloud from the volume directly
public Point3dCollection GetPointCloud2(bool lowRes = false)
{
var step = lowRes ? _lowResStep : 1;
var res = (double)(_voxelsPerMeter / step);
var destResX = (int)(_roomWidth * res);
var destResY = (int)(_roomHeight * res);
var destResZ = (int)(_roomLength * res);
var destRes = destResX * destResY * destResZ;
var offx = _roomWidth / -2.0;
var offy = _roomHeight / -2.0;
var offz = 0.0; //_roomLength / -2.0;
var pts = new Point3dCollection();
try
{
_volume.ExportVolumeBlock(
0, 0, 0, destResX, destResY, destResZ, step, _voxels
);
var pitch = destResX;
var slice = destResY * pitch;
for (int x = 0; x < destResX; x++)
{
for (int y = 0; y < destResY; y++)
{
for (int z = 0; z < destResZ; z++)
{
var vox = (int)(_voxels[z * slice + y * pitch + x]);
var v = (double)((vox | 0xFFFF) / 0xFFFF);
if (v > 0.0)
{
pts.Add(
new Point3d(
x / res + offx,
z / res + offz,
-(y / res + offy)
)
);
}
}
}
}
}
catch { }
return pts;
}
public List<ColoredPoint3d> GetColoredPointCloud2(int step)
{
var res = (double)(_voxelsPerMeter / step);
var destResX = (int)(_roomWidth * res);
var destResY = (int)(_roomHeight * res);
var destResZ = (int)(_roomLength * res);
var destRes = destResX * destResY * destResZ;
var offx = _roomWidth / -2.0;
var offy = _roomHeight / -2.0;
var offz = 0.0; //_roomLength / -2.0;
var voxels = new short[destRes];
var colors = new int[destRes];
// This should return an array of voxels:
// these are currently all 0
var pts = new List<ColoredPoint3d>();
try
{
_volume.ExportVolumeBlock(
0, 0, 0, destResX, destResY, destResZ, step, voxels, colors
);
var pitch = destResX;
var slice = destResY * pitch;
for (int x = 0; x < destResX; x++)
{
for (int y = 0; y < destResY; y++)
{
for (int z = 0; z < destResZ; z++)
{
var idx = z * slice + y * pitch + x;
var vox = (int)voxels[idx];
var v = (double)((vox | 0xFFFF) / 0xFFFF);
if (v > 0.0)
{
int col = colors[idx];
pts.Add(
new ColoredPoint3d(
x / res + offx,
z / res + offz,
-(y / res + offy),
(col >> 16) & 255,
(col >> 8) & 255,
col & 255
)
);
}
}
}
}
}
catch { }
return pts;
}
public void CleanUp()
{
if (null != _depthFloatBuffer)
{
_depthFloatBuffer.Dispose();
_depthFloatBuffer = null;
}
if (null != _mappedColorFrame)
{
_mappedColorFrame.Dispose();
_mappedColorFrame = null;
}
if (null != _fpsTimer)
{
_fpsTimer.Stop();
_fpsTimer.Tick -= new EventHandler(FpsTimerTick);
}
if (null != _volume)
{
_volume.Dispose();
_volume = null;
}
if (null != _style)
{
_style.Dispose();
_style = null;
}
}
}
public class KinectFusionColorCommands
{
private const int RoomWidth = 3;
private const int RoomHeight = 2;
private const int RoomLength = 3;
private const int VoxelsPerMeter = 128;
private const int LowResStep = 4;
private const bool UseMesh = false;
private double _roomWidth = RoomWidth;
private double _roomLength = RoomLength;
private double _roomHeight = RoomHeight;
private int _voxelsPerMeter = VoxelsPerMeter;
private int _lowResStep = LowResStep;
private bool _useMesh = UseMesh;
[CommandMethod("ADNPLUGINS", "KINFUSCOL", CommandFlags.Modal)]
public void ImportFromKinectFusionWithColor()
{
var doc =
Autodesk.AutoCAD.ApplicationServices.
Application.DocumentManager.MdiActiveDocument;
var db = doc.Database;
var ed = doc.Editor;
// Ask the user for double information
var pdo = new PromptDoubleOptions("\nEnter width of volume");
pdo.AllowNegative = false;
pdo.AllowZero = false;
pdo.DefaultValue = _roomWidth;
pdo.UseDefaultValue = true;
var pdr = ed.GetDouble(pdo);
if (pdr.Status != PromptStatus.OK)
return;
_roomWidth = pdr.Value;
pdo.Message = "\nEnter length of volume";
pdo.DefaultValue = _roomLength;
pdr = ed.GetDouble(pdo);
if (pdr.Status != PromptStatus.OK)
return;
_roomLength = pdr.Value;
pdo.Message = "\nEnter height of volume";
pdo.DefaultValue = _roomHeight;
pdr = ed.GetDouble(pdo);
if (pdr.Status != PromptStatus.OK)
return;
_roomHeight = pdr.Value;
// Ask the user for integer information
var pio =
new PromptIntegerOptions("\nEnter voxels per meter");
pio.AllowNegative = false;
pio.AllowZero = false;
pio.DefaultValue = _voxelsPerMeter;
pio.UseDefaultValue = true;
var pir = ed.GetInteger(pio);
if (pir.Status != PromptStatus.OK)
return;
_voxelsPerMeter = pir.Value;
pio.Message = "\nLow resolution sampling";
pio.DefaultValue = _lowResStep;
pir = ed.GetInteger(pio);
if (pir.Status != PromptStatus.OK)
return;
_lowResStep = pir.Value;
// Ask the user for keyword information
var pko =
new PromptKeywordOptions(
"\nUse a mesh object to calculate points?"
);
pko.AllowNone = true;
pko.Keywords.Add("Yes");
pko.Keywords.Add("No");
pko.Keywords.Default = _useMesh ? "Yes" : "No";
var pkr = ed.GetKeywords(pko);
if (pkr.Status != PromptStatus.OK)
return;
_useMesh = (pkr.StringResult == "Yes");
// Create a form to set the sync context properly
using (var f1 = new Form1())
{
var ctxt = SynchronizationContext.Current;
if (ctxt == null)
{
throw
new System.Exception(
"Current sync context is null."
);
}
// Create our jig
var kj =
new KinectFusionColorJig(
ed, ctxt,
_roomWidth, _roomLength, _roomHeight,
_voxelsPerMeter, _lowResStep, _useMesh
);
if (!kj.StartSensor())
{
kj.StopSensor();
kj.CleanUp();
return;
}
var pr = ed.Drag(kj);
if (pr.Status != PromptStatus.OK && !kj.Finished)
{
kj.StopSensor();
kj.CleanUp();
return;
}
kj.PauseSensor();
try
{
ed.WriteMessage(
"\nCapture complete: examining points...\n"
);
System.Windows.Forms.Application.DoEvents();
var voxelStep = 1;
bool loop = false, cancel = false;
List<ColoredPoint3d> pts;
do
{
loop = false;
pts =
_useMesh ?
kj.GetColoredPointCloud(voxelStep) :
kj.GetColoredPointCloud2(voxelStep);
ed.WriteMessage(
"Extracted mesh data: {0} vertices with" +
" voxel step of {1}.\n",
pts.Count, voxelStep
);
var pio2 =
new PromptIntegerOptions(
"Enter new voxel step or accept default to continue"
);
pio2.AllowNegative = false;
pio2.AllowZero = false;
pio2.DefaultValue = voxelStep;
var pir2 = ed.GetInteger(pio2);
if (pir2.Status != PromptStatus.OK)
{
cancel = true;
}
else if (
pir2.Status == PromptStatus.OK &&
pir2.Value != voxelStep
)
{
voxelStep = pir2.Value;
loop = true;
}
} while (loop);
if (!cancel)
{
kj.WriteAndImportPointCloud(doc, pts);
}
}
catch (System.Exception ex)
{
ed.WriteMessage("\nException: {0}", ex.Message);
}
kj.StopSensor();
kj.CleanUp();
}
}
}
}