337 lines
10 KiB
HTML

<!DOCTYPE html>
<html lang="en">
<head>
<title>three.js - kinect</title>
<meta charset="utf-8" />
<meta
name="viewport"
content="width=device-width, user-scalable=no, minimum-scale=1.0, maximum-scale=1.0"
/>
</head>
<body>
<video
id="video"
loop
muted
crossorigin="anonymous"
playsinline
style="display: none"
>
<!-- <source
src="https://cdn.glitch.global/5bef24a3-b00f-440a-b7ad-33368d47b340/2022-07-12%2017-20-08.mp4?v=1737668838585"
type="video/mp4"
/> -->
<source
src="https://stream.vrcdn.live/live/ropebunny.live.mp4"
type="video/mp4"
/>
</video>
<script id="vs" type="x-shader/x-vertex">
uniform sampler2D map;
uniform float width;
uniform float height;
uniform float nearClipping, farClipping;
uniform float pointSize;
uniform float boxSize;
uniform float cameraIndex;
uniform float numCameras;
uniform mat4 c2wm;
varying vec2 vUv;
void main() {
vUv = vec2( position.x / width, position.y / height );
//vec4 color = texture2D( map, vUv*vec2(0.5,(1.0/3.0))+vec2(0,(1.0/3.0)));
vec4 color = texture2D( map, vUv*vec2(1.0/numCameras,(1.0/3.0))+vec2(cameraIndex/numCameras,(1.0/3.0)));
float depth = ( color.r + color.g + color.b ) / 3.0;
// Projection code by @kcmic
// instead of building a matrix and multiplying by the matrix, math is being done to
// guess the projection.
// Undoes the perspective division
float z = depth * (farClipping-nearClipping) + nearClipping;
vec4 pos = vec4(
( position.x / width - 0.5 )*boxSize,
( position.y / height - 0.5 )*boxSize,
z*boxSize*0.5,
1.0);
// vec4 pos2 = c2wm * pos;
vec4 pos2 = inverse(c2wm) * pos;
// float(cameraIndex)
gl_PointSize = pointSize;
gl_Position = projectionMatrix * modelViewMatrix * pos2;
if (depth <.01) { // move this point out of the view box if the depth is nearly zero
gl_Position = vec4(-1., -1., -1., -1.);
}
}
</script>
<script id="fs" type="x-shader/x-fragment">
uniform sampler2D map;
uniform float cameraIndex;
uniform float numCameras;
uniform mat4 c2wm;
varying vec2 vUv;
void main() {
vec4 color = texture2D( map, vUv*vec2(1.0/numCameras,(1.0/3.0))+vec2(cameraIndex/numCameras,0.0));
// if (color.r < .2) { discard;}
if (c2wm[1][2] > .26) {
gl_FragColor = vec4(1.0, 0.5, 0.0, 1.0);
}
else {
gl_FragColor = vec4( color.r, color.g, color.b, 1.0);
}
}
</script>
<script type="importmap">
{
"imports": {
"three": "https://threejs.org/build/three.module.js",
"three/addons/": "https://threejs.org/examples/jsm/"
}
}
</script>
<script type="module">
import * as THREE from "three";
import { GUI } from "three/addons/libs/lil-gui.module.min.js";
let scene, camera, renderer;
let geometry;
let meshList = [];
let materialList = [];
let mouse, center;
let numCameras;
const cameraFloats = [];
init();
function pixelArrayToUint32(imageData, canvasWidth, rowNumber) {
let buffer = imageData;
let result = 0;
for (let i = 0; i < 32; i++) {
let thisBit = (buffer[(i*4) + (rowNumber*(canvasWidth*4))] > 128) & 1;
result |= (thisBit << (31-i));
}
return result;
}
// convert unsigned integer 32 to float
function decodeUint32ToFloat(theInteger) {
const buffer = new ArrayBuffer(4);
const view = new DataView(buffer);
view.setUint32(0, theInteger); // "at address 0, write theInteger to the stack as a 32 bit integer"
// console.log(view.getFloat32(0)); // "at address 0, read the stack as a Float32"
return view.getFloat32(0);
}
function init() {
const container = document.createElement("div");
document.body.appendChild(container);
const info = document.createElement("div");
info.id = "info";
info.innerHTML =
'<a href="https://threejs.org" target="_blank" rel="noopener">three.js</a> - kinect';
document.body.appendChild(info);
camera = new THREE.PerspectiveCamera(
50,
window.innerWidth / window.innerHeight,
1,
10000
);
camera.position.set(0, 0, 1000);
scene = new THREE.Scene();
center = new THREE.Vector3();
center.z = 0;
const video = document.getElementById("video");
const texture = new THREE.VideoTexture(video);
texture.minFilter = THREE.NearestFilter;
texture.generateMipmaps = false;
const captureImageWidth = 256,
captureImageHeight = 256;
const nearClipping = 0.1,
farClipping = 5,
pointSize = 5,
boxSize = 500;
geometry = new THREE.BufferGeometry();
const vertices = new Float32Array(captureImageWidth * captureImageHeight * 3);
for (let i = 0, j = 0, l = vertices.length; i < l; i += 3, j++) {
vertices[i] = j % captureImageWidth;
vertices[i + 1] = Math.floor(j / captureImageWidth);
}
geometry.setAttribute(
"position",
new THREE.BufferAttribute(vertices, 3)
);
// const gui = new GUI();
// gui
// .add(material.uniforms.nearClipping, "value", 0, 1, nearClipping)
// .name("nearClipping");
// gui
// .add(material.uniforms.farClipping, "value", 1, 10, farClipping)
// .name("farClipping");
// gui
// .add(material.uniforms.pointSize, "value", 1, 10, pointSize)
// .name("pointSize");
// gui
// .add(material.uniforms.boxSize, "value", 1, 1000, boxSize)
// .name("boxSize");
video.play();
//
function buildMaterialAndMeshList(numCameras) {
// first clean out old mesh and material list
for (let i = 0; i < materialList.length; i++) {
materialList[i].dispose();
meshList[i].dispose();
scene.remove(meshList[i]);
scene.remove(materialList[i]);
}
materialList = [];
meshList = [];
// now reconstruct both lists
for (let i = 0; i < numCameras; i++) {
let material = new THREE.ShaderMaterial({
uniforms: {
map: { value: texture },
width: { value: captureImageWidth },
height: { value: captureImageHeight },
nearClipping: { value: nearClipping },
farClipping: { value: farClipping },
boxSize: { value: boxSize },
pointSize: { value: pointSize },
cameraIndex: { value: i },
numCameras: { value: numCameras },
c2wm: { value: new THREE.Matrix4() }
},
vertexShader: document.getElementById("vs").textContent,
fragmentShader: document.getElementById("fs").textContent,
blending: THREE.NormalBlending,
depthTest: true,
depthWrite: true,
transparent: true,
});
let mesh = new THREE.Points(geometry, material);
scene.add(mesh);
materialList[i] = material;
meshList[i] = mesh;
}
}
function animate() {
camera.position.x = -mouse.x;
camera.position.y = mouse.y;
camera.lookAt(center);
let canvas = document.getElementById("debugCanvas");
let context = canvas.getContext('2d', { willReadFrequently: true } );
context.drawImage(texture.image, 0,0); //the image must be drawn to a canvas in order to read numCameras
let d = context.getImageData(0,0,canvas.width,1);
numCameras = decodeUint32ToFloat(pixelArrayToUint32(d.data, canvas.width, 0)) // obtains numCameras
// if the number of cameras changes, reconstruct the materialList and meshList
if (numCameras != materialList.length) {
console.log("got new camera count: " + numCameras);
buildMaterialAndMeshList(numCameras);
}
// update all the properties of each camera matrix
for(let i = 0; i < numCameras; i++) {
//skip drawing if this is the first camera, since we already did it to get numCameras above
// if (i != 0) {
// context.drawImage(texture.image, 256*i,0);
// }
// this next line needs to obtain the region of the video texture with
// the appropriate pixel encoded floats for the camera matrix.
let d = context.getImageData((256*i),1,canvas.width,16); //should get data from the second line to the 17th, or 16 rows of pixels
const c2wm_array = [0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0];
for(let rowNr = 0; rowNr < 16; rowNr++) {
// cameraFloats[i][rowNr] = decodeUint32ToFloat(pixelArrayToUint32(d.data, c.width, rowNr))
c2wm_array[rowNr] = decodeUint32ToFloat(pixelArrayToUint32(d.data, canvas.width, rowNr))
}
materialList[i].uniforms.c2wm.value.fromArray(c2wm_array);
}
renderer.render(scene, camera);
}
renderer = new THREE.WebGLRenderer();
renderer.setPixelRatio(window.devicePixelRatio);
renderer.setSize(window.innerWidth, window.innerHeight);
renderer.setAnimationLoop(animate);
container.appendChild(renderer.domElement);
mouse = new THREE.Vector3(0, 0, 1);
document.addEventListener("mousemove", onDocumentMouseMove);
window.addEventListener("resize", onWindowResize);
}
function onWindowResize() {
camera.aspect = window.innerWidth / window.innerHeight;
camera.updateProjectionMatrix();
renderer.setSize(window.innerWidth, window.innerHeight);
}
function onDocumentMouseMove(event) {
mouse.x = event.clientX - window.innerWidth / 2;
mouse.y = event.clientY - window.innerHeight / 2;
}
</script>
<canvas id="debugCanvas" width="512" height="256" />
</body>
</html>