After this previous post, which updated my previous implementation drawing polylines in AutoCAD to use the Microsoft Kinect SDK, it made sense to give the same treatment to this implementation, too.
This version of the code doesn’t really go beyond the OpenNI/NITE version – it’s very much a “flat” port, which means it comes with the same issues the other version had: the sweep is performed along a single, increasingly complex path, which means that it quickly slows down (as the path’s complexity increases with its length) and at some point just stops working. And along the way, there’s significant risk of self-intersection as you’re drawing the path, which will definitely cause the sweep operation to fail.
But anyway – the next step is to take this code and allow segments to be swept, reducing the complexity of any one chunk of the solid. We’ll take a look at that next week, with any luck.
Here’s the C# code that defines the updated KINEXT command that sweeps a circle of user-specified radius along a spline path defined by the tracked user’s gestures:
using Autodesk.AutoCAD.ApplicationServices;
using Autodesk.AutoCAD.DatabaseServices;
using Autodesk.AutoCAD.EditorInput;
using Autodesk.AutoCAD.Geometry;
using Autodesk.AutoCAD.Runtime;
using AcGi = Autodesk.AutoCAD.GraphicsInterface;
using System.Runtime.InteropServices;
using System.Collections.Generic;
using System.Diagnostics;
using System.Reflection;
using System.IO;
using System;
using Microsoft.Research.Kinect.Nui;
namespace KinectIntegration
{
// Our own class duplicating the one implemented by nKinect
// to aid with porting
public class ColorVector3
{
public double X, Y, Z;
public int R, G, B;
}
public class KinectJig : DrawJig
{
[DllImport("acad.exe", CharSet = CharSet.Auto,
CallingConvention = CallingConvention.Cdecl,
EntryPoint = "?acedPostCommand@@YAHPB_W@Z"
)]
extern static private int acedPostCommand(string strExpr);
// Draw our transient spline in red
const short transPathColor = 1;
// Our transient solids (cursor sphere & tube) are yellow
const short transSolColor = 2;
// Our final solids will be green
const short finalSolColor = 3;
// A transaction and database to add solids
private Transaction _tr;
private Document _doc;
// We need our Kinect sensor
private Runtime _kinect = null;
// With the images collected by it
private ImageFrame _depth = null;
private ImageFrame _video = null;
// A list of points captured by the sensor
// (for eventual export)
private List<ColorVector3> _vecs;
// A list of points to be displayed
// (we use this for the jig)
private Point3dCollection _points;
// A list of vertices to draw between
// (we use this for the final polyline creation)
private Point3dCollection _vertices;
// The most recent vertex being captured/drawn
private Point3d _curPt;
private Entity _cursor;
// Entities to create our solid
private Entity _profile; // A profile circle
private Spline _path; // A spline path
private Solid3d _tube; // The solid itself
private bool _sweepBroken; // Can the sweep continue?
// The sweep options for creating the solid
private SweepOptions _sweepOpts;
// The radius of the profile circle to create
private double _profRad;
// An offset value we use to move the mouse back
// and forth by one screen unit
private int _offset;
// Flags to indicate Kinect gesture modes
private bool _drawing; // Drawing mode active
private bool _finished; // Finished - want to exit
public bool Finished
{
get { return _finished; }
}
public KinectJig(Document doc, Transaction tr, double profRad)
{
// Initialise the various members
_doc = doc;
_tr = tr;
_points = new Point3dCollection();
_vertices = new Point3dCollection();
_cursor = null;
_offset = 1;
_drawing = false;
_finished = false;
_profile = null;
_sweepOpts = null;
_path = null;
_tube = null;
_profRad = profRad;
_sweepBroken = false;
// Create our sensor object - the constructor takes
// three callbacks to receive various data:
// - skeleton movement
// - rgb data
// - depth data
_kinect = new Runtime();
_kinect.SkeletonFrameReady +=
new EventHandler<SkeletonFrameReadyEventArgs>(
OnSkeletonFrameReady
);
_kinect.VideoFrameReady +=
new EventHandler<ImageFrameReadyEventArgs>(
OnVideoFrameReady
);
_kinect.DepthFrameReady +=
new EventHandler<ImageFrameReadyEventArgs>(
OnDepthFrameReady
);
}
void OnDepthFrameReady(
object sender, ImageFrameReadyEventArgs e
)
{
_depth = e.ImageFrame;
}
void OnVideoFrameReady(
object sender, ImageFrameReadyEventArgs e
)
{
_video = e.ImageFrame;
}
void OnSkeletonFrameReady(
object sender, SkeletonFrameReadyEventArgs e
)
{
SkeletonFrame s = e.SkeletonFrame;
if (!_finished)
{
foreach (SkeletonData data in s.Skeletons)
{
if (SkeletonTrackingState.Tracked == data.TrackingState)
{
Point3d leftHip =
PointFromVector(
data.Joints[JointID.HipLeft].Position
);
Point3d leftHand =
PointFromVector(
data.Joints[JointID.HandLeft].Position
);
Point3d rightHand =
PointFromVector(
data.Joints[JointID.HandRight].Position
);
_drawing = (leftHand.Z < leftHip.Z);
if (
leftHand.DistanceTo(Point3d.Origin) > 0 &&
rightHand.DistanceTo(Point3d.Origin) > 0 &&
leftHand.DistanceTo(rightHand) < 0.1)
{
_drawing = false;
_finished = true;
}
if (_drawing)
{
// If we have at least one prior vertex...
if (_vertices.Count > 0)
{
// ... check whether we're a certain distance away
// from the last one before adding it (this smooths
// off the jitters of adding every point)
Point3d lastVert = _vertices[_vertices.Count - 1];
if (lastVert.DistanceTo(rightHand) > 0.2)
{
// Add the new vertex to our list
_vertices.Add(rightHand);
}
}
else
{
// Add the first vertex to our list
_vertices.Add(rightHand);
}
}
break;
}
}
}
}
public void StartSensor()
{
if (_kinect != null)
{
_kinect.Initialize(
RuntimeOptions.UseDepth |
RuntimeOptions.UseColor |
RuntimeOptions.UseSkeletalTracking
);
_kinect.VideoStream.Open(
ImageStreamType.Video, 2,
ImageResolution.Resolution640x480,
ImageType.Color
);
_kinect.DepthStream.Open(
ImageStreamType.Depth, 2,
ImageResolution.Resolution640x480,
ImageType.Depth
);
}
}
public void StopSensor()
{
if (_kinect != null)
{
_kinect.Uninitialize();
_kinect = null;
}
}
public void Cleanup()
{
if (_path != null)
{
_path.Dispose();
_path = null;
};
if (_profile != null)
{
_profile.Dispose();
_profile = null;
};
if (_tube != null)
{
_tube.Dispose();
_tube = null;
};
_sweepOpts = null;
_vertices.Clear();
}
public void UpdatePointCloud()
{
_vecs = GeneratePointCloud(1, true);
}
private List<ColorVector3> GeneratePointCloud(
int sampling, bool withColor = false
)
{
// We will return a list of our ColorVector3 objects
List<ColorVector3> res = new List<ColorVector3>();
// Let's start by determining the dimensions of the
// respective images
int depHeight = _depth.Image.Height;
int depWidth = _depth.Image.Width;
int vidHeight = _video.Image.Height;
int vidWidth = _video.Image.Width;
// For the sake of this initial implementation, we
// expect them to be the same size. But this should not
// actually need to be a requirement
if (vidHeight != depHeight || vidWidth != depWidth)
{
Application.DocumentManager.MdiActiveDocument.
Editor.WriteMessage(
"\nVideo and depth images are of different sizes."
);
return null;
}
// Depth and color data for each pixel
Byte[] depthData = _depth.Image.Bits;
Byte[] colorData = _video.Image.Bits;
// Loop through the depth information - we process two
// bytes at a time
for (int i = 0; i < depthData.Length; i += (2 * sampling))
{
// The depth pixel is two bytes long - we shift the
// upper byte by 8 bits (a byte) and "or" it with the
// lower byte
int depthPixel = (depthData[i + 1] << 8) | depthData[i];
// The x and y positions can be calculated using modulus
// division from the array index
int x = (i / 2) % depWidth;
int y = (i / 2) / depWidth;
// The x and y we pass into DepthImageToSkeleton() need to
// be normalised (between 0 and 1), so we divide by the
// width and height of the depth image, respectively
// As we're using UseDepth (not UseDepthAndPlayerIndex) in
// the depth sensor settings, we also need to shift the
// depth pixel by 3 bits
Vector v =
_kinect.SkeletonEngine.DepthImageToSkeleton(
((float)x) / ((float)depWidth),
((float)y) / ((float)depHeight),
(short)(depthPixel << 3)
);
// A zero value for Z means there is no usable depth for
// that pixel
if (v.Z > 0)
{
// Create a ColorVector3 to store our XYZ and RGB info
// for a pixel
ColorVector3 cv = new ColorVector3();
cv.X = v.X;
cv.Y = v.Z;
cv.Z = v.Y;
// Only calculate the colour when it's needed (as it's
// now more expensive, albeit more accurate)
if (withColor)
{
// Get the colour indices for that particular depth
// pixel. We once again need to shift the depth pixel
// and also need to flip the x value (as UseDepth means
// it is mirrored on X) and do so on the basis of
// 320x240 resolution (so we divide by 2, assuming
// 640x480 is chosen earlier), as that's what this
// function expects. Phew!
int colorX, colorY;
_kinect.NuiCamera.
GetColorPixelCoordinatesFromDepthPixel(
_video.Resolution, _video.ViewArea,
320 - (x/2), (y/2), (short)(depthPixel << 3),
out colorX, out colorY
);
// Make sure both indices are within bounds
colorX = Math.Max(0, Math.Min(vidWidth - 1, colorX));
colorY = Math.Max(0, Math.Min(vidHeight - 1, colorY));
// Extract the RGB data from the appropriate place
// in the colour data
int colIndex = 4 * (colorX + (colorY * vidWidth));
cv.B = (byte)(colorData[colIndex + 0]);
cv.G = (byte)(colorData[colIndex + 1]);
cv.R = (byte)(colorData[colIndex + 2]);
}
else
{
// If we don't need colour information, just set each
// pixel to white
cv.B = 255;
cv.G = 255;
cv.R = 255;
}
// Add our pixel data to the list to return
res.Add(cv);
}
}
return res;
}
private Point3d PointFromVector(Vector v)
{
// Rather than just return a point, we're effectively
// transforming it to the drawing space: flipping the
// Y and Z axes (which makes it consistent with the
// point cloud, and makes sure Z is actually up - from
// the Kinect's perspective Y is up), and reversing
// the X axis (which is the result of choosing UseDepth
// rather than UseDepthAndPlayerIndex)
return new Point3d(-v.X, v.Z, v.Y);
}
protected override SamplerStatus Sampler(JigPrompts prompts)
{
// We don't really need a point, but we do need some
// user input event to allow us to loop, processing
// for the Kinect input
PromptPointResult ppr =
prompts.AcquirePoint("\nClick to capture: ");
if (ppr.Status == PromptStatus.OK)
{
if (_finished)
{
acedPostCommand("CANCELCMD");
return SamplerStatus.Cancel;
}
// If not finished, but stopped drawing, add the
// geometry that was previously drawn to the database
if (!_drawing && (_path != null || _tube != null))
{
AddSolidOrPath();
}
// Generate a point cloud
try
{
if (_depth != null && _video != null)
{
// Use a sampling of one in 50 points for the jig
_vecs = GeneratePointCloud(50);
// We just need the point coordinates for jigging
// (no colours)
_points.Clear();
foreach (ColorVector3 vec in _vecs)
{
_points.Add(
new Point3d(vec.X, vec.Y, vec.Z)
);
}
// Let's move the mouse slightly to avoid having
// to do it manually to keep the input coming
System.Drawing.Point pt =
System.Windows.Forms.Cursor.Position;
System.Windows.Forms.Cursor.Position =
new System.Drawing.Point(
pt.X, pt.Y + _offset
);
_offset = -_offset;
}
}
catch {}
return SamplerStatus.OK;
}
return SamplerStatus.Cancel;
}
protected override bool WorldDraw(AcGi.WorldDraw draw)
{
short origCol = draw.SubEntityTraits.Color;
// This simply draws our points
draw.Geometry.Polypoint(_points, null, null);
// If we're currently drawing...
if (_drawing)
{
try
{
// Let's start by creating our spline path
if ((_path == null && _vertices.Count > 1) ||
(_path != null &&
_vertices.Count > _path.NumFitPoints))
{
if (_path != null)
_path.Dispose();
_path = new Spline(_vertices, 0, 0.0);
// And our sweep profile, if we don't have one
if (_profile != null)
_profile.Dispose();
_profile =
new Circle(
_vertices[0],
_vertices[1] - _vertices[0],
_profRad
);
// And our sweep options, if we don't have one
if (_sweepOpts == null)
{
SweepOptionsBuilder sob =
new SweepOptionsBuilder();
// Align the entity to sweep to the path
sob.Align =
SweepOptionsAlignOption.AlignSweepEntityToPath;
// The base point is the start of the path
sob.BasePoint = _path.StartPoint;
// The profile will rotate to follow the path
sob.Bank = true;
_sweepOpts = sob.ToSweepOptions();
}
// Finally create a blank solid, if it's null
if (_tube == null)
_tube = new Solid3d();
// And sweep our profile along our path
_tube.CreateSweptSolid(_profile, _path, _sweepOpts);
}
}
catch (Autodesk.AutoCAD.Runtime.Exception ex)
{
_sweepBroken = true;
_tube.Dispose();
_tube = null;
_doc.Editor.WriteMessage(
"\nException: {0}", ex.Message
);
}
// Draw our path, if we have one
if (_path != null)
{
draw.SubEntityTraits.Color = transPathColor;
_path.WorldDraw(draw);
}
// And our solid
if (_tube != null)
{
draw.SubEntityTraits.Color = transSolColor;
_tube.WorldDraw(draw);
}
if (_vertices.Count > 0)
{
// Get the last point (at which our cursor should
// be located, if it exists)
Point3d lastPt = _vertices[_vertices.Count - 1];
if (_cursor == null)
{
// Create a cursor sphere
_cursor = new Solid3d();
((Solid3d)_cursor).CreateSphere(_profRad);
_curPt = Point3d.Origin;
}
// Move it to the current point
_cursor.TransformBy(
Matrix3d.Displacement(lastPt - _curPt)
);
_curPt = lastPt;
// Draw the cursor
draw.SubEntityTraits.Color =
(_sweepBroken ? transPathColor : transSolColor);
_cursor.WorldDraw(draw);
}
}
draw.SubEntityTraits.Color = origCol;
return true;
}
public void AddSolidOrPath()
{
if (_tube != null || _path != null)
{
// We'll add the swept solid, if we have one, otherwise
// we'll add the path
Entity ent;
if (_tube == null)
{
ent = _path;
_path = null;
}
else
{
ent = _tube;
_tube = null;
}
BlockTableRecord btr =
(BlockTableRecord)_tr.GetObject(
_doc.Database.CurrentSpaceId,
OpenMode.ForWrite
);
ent.ColorIndex = finalSolColor;
btr.AppendEntity(ent);
_tr.AddNewlyCreatedDBObject(ent, true);
}
Cleanup();
_vertices.Clear();
_sweepBroken = false;
}
public void ExportPointCloud(string filename)
{
if (_vecs.Count > 0)
{
using (StreamWriter sw = new StreamWriter(filename))
{
// For each pixel, write a line to the text file:
// X, Y, Z, R, G, B
foreach (ColorVector3 pt in _vecs)
{
sw.WriteLine(
"{0}, {1}, {2}, {3}, {4}, {5}",
pt.X, pt.Y, pt.Z, pt.R, pt.G, pt.B
);
}
}
}
}
}
public class Commands
{
[CommandMethod("ADNPLUGINS", "KINEXT", CommandFlags.Modal)]
public void ImportFromKinect()
{
Document doc =
Autodesk.AutoCAD.ApplicationServices.
Application.DocumentManager.MdiActiveDocument;
Editor ed = doc.Editor;
PromptDoubleOptions pdo =
new PromptDoubleOptions("\nEnter profile radius");
pdo.AllowZero = false;
pdo.AllowNegative = false;
pdo.AllowNone = false;
pdo.DefaultValue = 0.05;
pdo.UseDefaultValue = true;
PromptDoubleResult pdr = ed.GetDouble(pdo);
if (pdr.Status != PromptStatus.OK)
return;
Transaction tr =
doc.TransactionManager.StartTransaction();
KinectJig kj = new KinectJig(doc, tr, pdr.Value);
try
{
kj.StartSensor();
}
catch (System.Exception ex)
{
ed.WriteMessage(
"\nUnable to start Kinect sensor: " + ex.Message
);
tr.Dispose();
return;
}
PromptResult pr = ed.Drag(kj);
if (pr.Status != PromptStatus.OK && !kj.Finished)
{
kj.StopSensor();
kj.Cleanup();
tr.Dispose();
return;
}
// Generate a final point cloud with color before stopping
// the sensor
kj.UpdatePointCloud();
kj.StopSensor();
kj.AddSolidOrPath();
tr.Commit();
// Manually dispose to avoid scoping issues with
// other variables
tr.Dispose();
// We'll store most local files in the temp folder.
// We get a temp filename, delete the file and
// use the name for our folder
string localPath = Path.GetTempFileName();
File.Delete(localPath);
Directory.CreateDirectory(localPath);
localPath += "\\";
// Paths for our temporary files
string txtPath = localPath + "points.txt";
string lasPath = localPath + "points.las";
// Our PCG file will be stored under My Documents
string outputPath =
Environment.GetFolderPath(
Environment.SpecialFolder.MyDocuments
) + "\\Kinect Point Clouds\\";
if (!Directory.Exists(outputPath))
Directory.CreateDirectory(outputPath);
// We'll use the title as a base filename for the PCG,
// but will use an incremented integer to get an unused
// filename
int cnt = 0;
string pcgPath;
do
{
pcgPath =
outputPath + "Kinect" +
(cnt == 0 ? "" : cnt.ToString()) + ".pcg";
cnt++;
}
while (File.Exists(pcgPath));
// The path to the txt2las tool will be the same as the
// executing assembly (our DLL)
string exePath =
Path.GetDirectoryName(
Assembly.GetExecutingAssembly().Location
) + "\\";
if (!File.Exists(exePath + "txt2las.exe"))
{
ed.WriteMessage(
"\nCould not find the txt2las tool: please make sure " +
"it is in the same folder as the application DLL."
);
return;
}
// Export our point cloud from the jig
ed.WriteMessage(
"\nSaving TXT file of the captured points.\n"
);
kj.ExportPointCloud(txtPath);
// Use the txt2las utility to create a .LAS
// file from our text file
ed.WriteMessage(
"\nCreating a LAS from the TXT file.\n"
);
ProcessStartInfo psi =
new ProcessStartInfo(
exePath + "txt2las",
"-i \"" + txtPath +
"\" -o \"" + lasPath +
"\" -parse xyzRGB"
);
psi.CreateNoWindow = false;
psi.WindowStyle = ProcessWindowStyle.Hidden;
// Wait up to 20 seconds for the process to exit
try
{
using (Process p = Process.Start(psi))
{
p.WaitForExit();
}
}
catch
{ }
// If there's a problem, we return
if (!File.Exists(lasPath))
{
ed.WriteMessage(
"\nError creating LAS file."
);
return;
}
File.Delete(txtPath);
ed.WriteMessage(
"Indexing the LAS and attaching the PCG.\n"
);
// Index the .LAS file, creating a .PCG
string lasLisp = lasPath.Replace('\\', '/'),
pcgLisp = pcgPath.Replace('\\', '/');
doc.SendStringToExecute(
"(command \"_.POINTCLOUDINDEX\" \"" +
lasLisp + "\" \"" +
pcgLisp + "\")(princ) ",
false, false, false
);
// Attach the .PCG file
doc.SendStringToExecute(
"_.WAITFORFILE \"" +
pcgLisp + "\" \"" +
lasLisp + "\" " +
"(command \"_.-POINTCLOUDATTACH\" \"" +
pcgLisp +
"\" \"0,0\" \"1\" \"0\")(princ) ",
false, false, false
);
doc.SendStringToExecute(
"_.-VISUALSTYLES _C _Conceptual ",
false, false, false
);
}
// Return whether a file is accessible
private bool IsFileAccessible(string filename)
{
// If the file can be opened for exclusive access it means
// the file is accesible
try
{
FileStream fs =
File.Open(
filename, FileMode.Open,
FileAccess.Read, FileShare.None
);
using (fs)
{
return true;
}
}
catch (IOException)
{
return false;
}
}
// A command which waits for a particular PCG file to exist
[CommandMethod(
"ADNPLUGINS", "WAITFORFILE", CommandFlags.NoHistory
)]
public void WaitForFileToExist()
{
Document doc =
Application.DocumentManager.MdiActiveDocument;
Editor ed = doc.Editor;
HostApplicationServices ha =
HostApplicationServices.Current;
PromptResult pr = ed.GetString("Enter path to PCG: ");
if (pr.Status != PromptStatus.OK)
return;
string pcgPath = pr.StringResult.Replace('/', '\\');
pr = ed.GetString("Enter path to LAS: ");
if (pr.Status != PromptStatus.OK)
return;
string lasPath = pr.StringResult.Replace('/', '\\');
ed.WriteMessage(
"\nWaiting for PCG creation to complete...\n"
);
// Check the write time for the PCG file...
// if it hasn't been written to for at least half a second,
// then we try to use a file lock to see whether the file
// is accessible or not
const int ticks = 50;
TimeSpan diff;
bool cancelled = false;
// First loop is to see when writing has stopped
// (better than always throwing exceptions)
while (true)
{
if (File.Exists(pcgPath))
{
DateTime dt = File.GetLastWriteTime(pcgPath);
diff = DateTime.Now - dt;
if (diff.Ticks > ticks)
break;
}
System.Windows.Forms.Application.DoEvents();
if (HostApplicationServices.Current.UserBreak())
{
cancelled = true;
break;
}
}
// Second loop will wait until file is finally accessible
// (by calling a function that requests an exclusive lock)
if (!cancelled)
{
int inacc = 0;
while (true)
{
if (IsFileAccessible(pcgPath))
break;
else
inacc++;
System.Windows.Forms.Application.DoEvents();
if (HostApplicationServices.Current.UserBreak())
{
cancelled = true;
break;
}
}
ed.WriteMessage("\nFile inaccessible {0} times.", inacc);
try
{
CleanupTmpFiles(lasPath);
}
catch
{ }
}
}
internal void CleanupTmpFiles(string txtPath)
{
if (File.Exists(txtPath))
File.Delete(txtPath);
Directory.Delete(
Path.GetDirectoryName(txtPath)
);
}
}
}
Here’s some sample results of the KINEXT command:
And just to prove it’s in 3D, here’s another viewing angle: