265 lines
8.7 KiB
HTML
265 lines
8.7 KiB
HTML
<!DOCTYPE html>
|
|
<html lang="en">
|
|
<head>
|
|
<title>Holoprojection browser endpoint</title>
|
|
<meta charset="utf-8" />
|
|
<meta
|
|
name="viewport"
|
|
content="width=device-width, user-scalable=no, minimum-scale=1.0, maximum-scale=1.0"
|
|
/>
|
|
</head>
|
|
<body>
|
|
<video
|
|
id="video"
|
|
loop
|
|
muted
|
|
crossorigin="anonymous"
|
|
playsinline
|
|
style="display: none"
|
|
>
|
|
<!-- <source
|
|
src="https://cdn.glitch.global/5bef24a3-b00f-440a-b7ad-33368d47b340/2022-07-12%2017-20-08.mp4?v=1737668838585"
|
|
type="video/mp4"
|
|
/> -->
|
|
<source
|
|
src="https://stream.vrcdn.live/live/ropebunny.live.mp4"
|
|
type="video/mp4"
|
|
/>
|
|
</video>
|
|
<div id="main-scene-container"></div>
|
|
<canvas id="debugCanvas" width="512" height="256" />
|
|
|
|
<script type="importmap">
|
|
{
|
|
"imports": {
|
|
"three": "https://threejs.org/build/three.module.js",
|
|
"three/addons/": "https://threejs.org/examples/jsm/"
|
|
}
|
|
}
|
|
</script>
|
|
<script type="module">
|
|
import * as THREE from "three";
|
|
|
|
import { GUI } from "three/addons/libs/lil-gui.module.min.js";
|
|
|
|
let projection_vert_shader_source = await (await fetch('./projection.vert.glsl')).text();
|
|
let projection_frag_shader_source = await (await fetch('./projection.frag.glsl')).text();
|
|
|
|
let scene, camera, renderer;
|
|
let geometry;
|
|
let meshList = [];
|
|
let materialList = [];
|
|
let mouse, center;
|
|
|
|
let numCameras;
|
|
|
|
const cameraFloats = [];
|
|
|
|
|
|
init();
|
|
|
|
function pixelArrayToUint32(imageData, canvasWidth, rowNumber) {
|
|
|
|
let buffer = imageData;
|
|
let result = 0;
|
|
|
|
for (let i = 0; i < 32; i++) {
|
|
let thisBit = (buffer[(i*4) + (rowNumber*(canvasWidth*4))] > 128) & 1;
|
|
result |= (thisBit << (31-i));
|
|
|
|
|
|
}
|
|
return result;
|
|
}
|
|
|
|
// convert unsigned integer 32 to float
|
|
function decodeUint32ToFloat(theInteger) {
|
|
const buffer = new ArrayBuffer(4);
|
|
const view = new DataView(buffer);
|
|
view.setUint32(0, theInteger); // "at address 0, write theInteger to the stack as a 32 bit integer"
|
|
// console.log(view.getFloat32(0)); // "at address 0, read the stack as a Float32"
|
|
return view.getFloat32(0);
|
|
}
|
|
|
|
function init() {
|
|
const container = document.getElementById("main-scene-container");
|
|
|
|
camera = new THREE.PerspectiveCamera(
|
|
50,
|
|
window.innerWidth / window.innerHeight,
|
|
1,
|
|
10000
|
|
);
|
|
camera.position.set(0, 0, 200);
|
|
|
|
scene = new THREE.Scene();
|
|
center = new THREE.Vector3();
|
|
center.z = 0;
|
|
|
|
const video = document.getElementById("video");
|
|
|
|
const texture = new THREE.VideoTexture(video);
|
|
texture.minFilter = THREE.NearestFilter;
|
|
texture.magFilter = THREE.NearestFilter;
|
|
texture.generateMipmaps = false;
|
|
|
|
const captureImageWidth = 256,
|
|
captureImageHeight = 256;
|
|
const nearClipping = 0.1,
|
|
farClipping = 5,
|
|
pointSize = 5,
|
|
boxSize = 100;
|
|
|
|
|
|
geometry = new THREE.BufferGeometry();
|
|
// create a array of points, where every three elements corresponds to one point (x, y, z)
|
|
const vertices = new Float32Array(captureImageWidth * captureImageHeight * 3);
|
|
for (
|
|
let i = 0, j = 0, l = vertices.length;
|
|
i < l;
|
|
i += 3, j++
|
|
) {
|
|
vertices[i] = j % captureImageWidth; // pixels from left
|
|
vertices[i + 1] = Math.floor(j / captureImageWidth); // pixels from bottom (vertical zero in shader land)
|
|
vertices[i + 2] = 0;
|
|
}
|
|
|
|
// treat the "position" property as a vertice made from three elements
|
|
geometry.setAttribute(
|
|
"position",
|
|
new THREE.BufferAttribute(vertices, 3)
|
|
);
|
|
|
|
// const gui = new GUI();
|
|
// gui
|
|
// .add(material.uniforms.nearClipping, "value", 0, 1, nearClipping)
|
|
// .name("nearClipping");
|
|
// gui
|
|
// .add(material.uniforms.farClipping, "value", 1, 10, farClipping)
|
|
// .name("farClipping");
|
|
// gui
|
|
// .add(material.uniforms.pointSize, "value", 1, 10, pointSize)
|
|
// .name("pointSize");
|
|
// gui
|
|
// .add(material.uniforms.boxSize, "value", 1, 1000, boxSize)
|
|
// .name("boxSize");
|
|
|
|
video.play();
|
|
|
|
//
|
|
|
|
function buildMaterialAndMeshList(numCameras) {
|
|
// first clean out old mesh and material list
|
|
for (let i = 0; i < materialList.length; i++) {
|
|
materialList[i].dispose();
|
|
meshList[i].dispose();
|
|
scene.remove(meshList[i]);
|
|
scene.remove(materialList[i]);
|
|
}
|
|
|
|
materialList = [];
|
|
meshList = [];
|
|
|
|
// now reconstruct both lists
|
|
for (let i = 0; i < numCameras; i++) {
|
|
let material = new THREE.ShaderMaterial({
|
|
uniforms: {
|
|
map: { value: texture },
|
|
width: { value: captureImageWidth },
|
|
height: { value: captureImageHeight },
|
|
nearClipping: { value: nearClipping },
|
|
farClipping: { value: farClipping },
|
|
boxSize: { value: boxSize },
|
|
pointSize: { value: pointSize },
|
|
cameraIndex: { value: i },
|
|
numCameras: { value: numCameras },
|
|
c2wm: { value: new THREE.Matrix4() }
|
|
},
|
|
vertexShader: projection_vert_shader_source,
|
|
fragmentShader: projection_frag_shader_source,
|
|
blending: THREE.NormalBlending,
|
|
depthTest: true,
|
|
depthWrite: true,
|
|
transparent: true,
|
|
});
|
|
|
|
let mesh = new THREE.Points(geometry, material);
|
|
scene.add(mesh);
|
|
|
|
materialList[i] = material;
|
|
meshList[i] = mesh;
|
|
}
|
|
|
|
}
|
|
|
|
function animate() {
|
|
camera.position.x = -mouse.x;
|
|
camera.position.y = mouse.y;
|
|
camera.lookAt(center);
|
|
|
|
let canvas = document.getElementById("debugCanvas");
|
|
let context = canvas.getContext('2d', { willReadFrequently: true } );
|
|
context.drawImage(texture.image, 0,0); //the image must be drawn to a canvas in order to read numCameras
|
|
|
|
|
|
let d = context.getImageData(0,0,canvas.width,1);
|
|
numCameras = decodeUint32ToFloat(pixelArrayToUint32(d.data, canvas.width, 0)) // obtains numCameras
|
|
|
|
// if the number of cameras changes, reconstruct the materialList and meshList
|
|
if (numCameras != materialList.length) {
|
|
console.log("got new camera count: " + numCameras);
|
|
buildMaterialAndMeshList(numCameras);
|
|
}
|
|
|
|
|
|
// update all the properties of each camera matrix
|
|
for(let i = 0; i < numCameras; i++) {
|
|
//skip drawing if this is the first camera, since we already did it to get numCameras above
|
|
// if (i != 0) {
|
|
// context.drawImage(texture.image, 256*i,0);
|
|
// }
|
|
|
|
// this next line needs to obtain the region of the video texture with
|
|
// the appropriate pixel encoded floats for the camera matrix.
|
|
let d = context.getImageData((256*i),1,canvas.width,16); //should get data from the second line to the 17th, or 16 rows of pixels
|
|
|
|
const c2wm_array = [0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0];
|
|
for(let rowNr = 0; rowNr < 16; rowNr++) {
|
|
// cameraFloats[i][rowNr] = decodeUint32ToFloat(pixelArrayToUint32(d.data, c.width, rowNr))
|
|
c2wm_array[rowNr] = decodeUint32ToFloat(pixelArrayToUint32(d.data, canvas.width, rowNr))
|
|
}
|
|
materialList[i].uniforms.c2wm.value.fromArray(c2wm_array);
|
|
}
|
|
renderer.render(scene, camera);
|
|
}
|
|
|
|
renderer = new THREE.WebGLRenderer();
|
|
renderer.setPixelRatio(window.devicePixelRatio);
|
|
renderer.setSize(window.innerWidth, window.innerHeight);
|
|
renderer.setAnimationLoop(animate);
|
|
container.appendChild(renderer.domElement);
|
|
|
|
mouse = new THREE.Vector3(0, 0, 1);
|
|
|
|
document.addEventListener("mousemove", onDocumentMouseMove);
|
|
|
|
window.addEventListener("resize", onWindowResize);
|
|
}
|
|
|
|
function onWindowResize() {
|
|
camera.aspect = window.innerWidth / window.innerHeight;
|
|
camera.updateProjectionMatrix();
|
|
|
|
renderer.setSize(window.innerWidth, window.innerHeight);
|
|
}
|
|
|
|
function onDocumentMouseMove(event) {
|
|
mouse.x = event.clientX - window.innerWidth / 2;
|
|
mouse.y = event.clientY - window.innerHeight / 2;
|
|
}
|
|
|
|
|
|
</script>
|
|
</body>
|
|
</html>
|