text
stringlengths
2
104M
meta
dict
{ "repo_name": "aim-uofa/AdelaiDepth", "stars": "926", "repo_language": "Python", "file_name": "train.py", "mime_type": "text/x-script.python" }
import os import os.path import json import cv2 import numpy as np import torch import torchvision.transforms as transforms from torch.utils.data import Dataset from PIL import Image from imgaug import augmenters as iaa from lib.configs.config import cfg class MultiDataset(Dataset): def __init__(self, opt, dataset_name=None): super(MultiDataset, self).__init__() self.opt = opt self.root = opt.dataroot self.dataset_name = dataset_name self.dir_anno = os.path.join(cfg.ROOT_DIR, opt.dataroot, dataset_name, 'annotations', opt.phase_anno + '_annotations.json') self.dir_teacher_list = None self.rgb_paths, self.depth_paths, self.disp_paths, self.sem_masks, self.ins_paths, self.cam_intrinsics, self.all_annos, self.curriculum_list = self.getData() self.data_size = len(self.all_annos) self.focal_length_dict = {'diml_ganet': 1380.0 / 2.0, 'taskonomy': 512.0, 'online': 256.0, 'apolloscape2': 2304.0 / 2.0, '3d-ken-burns': 512.0} def getData(self): with open(self.dir_anno, 'r') as load_f: all_annos = json.load(load_f) curriculum_list = list(np.random.choice(len(all_annos), len(all_annos), replace=False)) rgb_paths = [ os.path.join(cfg.ROOT_DIR, self.root, all_annos[i]['rgb_path']) for i in range(len(all_annos)) ] depth_paths = [ os.path.join(cfg.ROOT_DIR, self.root, all_annos[i]['depth_path']) if 'depth_path' in all_annos[i] else None for i in range(len(all_annos)) ] disp_paths = [ os.path.join(cfg.ROOT_DIR, self.root, all_annos[i]['disp_path']) if 'disp_path' in all_annos[i] else None for i in range(len(all_annos)) ] mask_paths = [ ( os.path.join(cfg.ROOT_DIR, self.root, all_annos[i]['mask_path']) if all_annos[i]['mask_path'] is not None else None ) if 'mask_path' in all_annos[i] else None for i in range(len(all_annos)) ] ins_paths = [ ( os.path.join(cfg.ROOT_DIR, self.root, all_annos[i]['ins_planes_path']) if all_annos[i]['ins_planes_path'] is not None else None ) if 'ins_planes_path' in all_annos[i] else None for i in range(len(all_annos)) ] cam_intrinsics = [ ( all_annos[i]['cam_intrinsic_para'] if 'cam_intrinsic_para' in all_annos[i] else None ) for i in range(len(all_annos)) ] # [f, cx, cy] return rgb_paths, depth_paths, disp_paths, mask_paths, ins_paths, cam_intrinsics, all_annos, curriculum_list def __getitem__(self, anno_index): if 'train' in self.opt.phase: data = self.online_aug(anno_index) else: data = self.load_test_data(anno_index) return data def load_test_data(self, anno_index): """ Augment data for training online randomly. The invalid parts in the depth map are set to -1.0, while the parts in depth bins are set to cfg.MODEL.DECODER_OUTPUT_C + 1. :param anno_index: data index. """ rgb_path = self.rgb_paths[anno_index] rgb = cv2.imread(rgb_path)[:, :, ::-1] # bgr, H*W*C depth, sky_mask, mask_valid = self.load_depth(anno_index, rgb) rgb_resize = cv2.resize(rgb, (cfg.DATASET.CROP_SIZE[1], cfg.DATASET.CROP_SIZE[0]), interpolation=cv2.INTER_LINEAR) # to torch, normalize rgb_torch = self.scale_torch(rgb_resize.copy()) # normalize disp and depth depth_normal = depth / (depth.max() + 1e-8) depth_normal[~mask_valid.astype(np.bool)] = 0 data = {'rgb': rgb_torch, 'gt_depth': depth_normal} return data def online_aug(self, anno_index): """ Augment data for training online randomly. :param anno_index: data index. """ rgb_path = self.rgb_paths[anno_index] # depth_path = self.depth_paths[anno_index] rgb = cv2.imread(rgb_path)[:, :, ::-1] # rgb, H*W*C disp, depth, \ invalid_disp, invalid_depth, \ ins_planes_mask, sky_mask, \ ground_mask, depth_path = self.load_training_data(anno_index, rgb) rgb_aug = self.rgb_aug(rgb) # resize rgb, depth, disp flip_flg, resize_size, crop_size, pad, resize_ratio = self.set_flip_resize_crop_pad(rgb_aug) # focal length cam_intrinsic = self.cam_intrinsics[anno_index] if self.cam_intrinsics[anno_index] is not None \ else [float(self.focal_length_dict[self.dataset_name.lower()]), cfg.DATASET.CROP_SIZE[1] / 2, cfg.DATASET.CROP_SIZE[0] / 2] if self.dataset_name.lower() in self.focal_length_dict \ else [256.0, cfg.DATASET.CROP_SIZE[1] / 2, cfg.DATASET.CROP_SIZE[0] / 2] focal_length = cam_intrinsic[0] * (resize_size[0] + resize_size[1]) / ((cam_intrinsic[1] + cam_intrinsic[2]) * 2) focal_length = focal_length * resize_ratio focal_length = float(focal_length) rgb_resize = self.flip_reshape_crop_pad(rgb_aug, flip_flg, resize_size, crop_size, pad, 0) depth_resize = self.flip_reshape_crop_pad(depth, flip_flg, resize_size, crop_size, pad, -1, resize_method='nearest') disp_resize = self.flip_reshape_crop_pad(disp, flip_flg, resize_size, crop_size, pad, -1, resize_method='nearest') # resize sky_mask, and invalid_regions sky_mask_resize = self.flip_reshape_crop_pad(sky_mask.astype(np.uint8), flip_flg, resize_size, crop_size, pad, 0, resize_method='nearest') invalid_disp_resize = self.flip_reshape_crop_pad(invalid_disp.astype(np.uint8), flip_flg, resize_size, crop_size, pad, 0, resize_method='nearest') invalid_depth_resize = self.flip_reshape_crop_pad(invalid_depth.astype(np.uint8), flip_flg, resize_size, crop_size, pad, 0, resize_method='nearest') # resize ins planes ins_planes_mask[ground_mask] = int(np.unique(ins_planes_mask).max() + 1) ins_planes_mask_resize = self.flip_reshape_crop_pad(ins_planes_mask.astype(np.uint8), flip_flg, resize_size, crop_size, pad, 0, resize_method='nearest') # normalize disp and depth depth_resize = depth_resize / (depth_resize.max() + 1e-8) * 10 disp_resize = disp_resize / (disp_resize.max() + 1e-8) * 10 # invalid regions are set to -1, sky regions are set to 0 in disp and 10 in depth disp_resize[invalid_disp_resize.astype(np.bool) | (disp_resize > 1e7) | (disp_resize < 0)] = -1 depth_resize[invalid_depth_resize.astype(np.bool) | (depth_resize > 1e7) | (depth_resize < 0)] = -1 disp_resize[sky_mask_resize.astype(np.bool)] = 0 # 0 depth_resize[sky_mask_resize.astype(np.bool)] = 20 # to torch, normalize rgb_torch = self.scale_torch(rgb_resize.copy()) depth_torch = self.scale_torch(depth_resize) disp_torch = self.scale_torch(disp_resize) ins_planes = torch.from_numpy(ins_planes_mask_resize) focal_length = torch.tensor(focal_length) if ('taskonomy' in self.dataset_name.lower()) or ('3d-ken-burns' in self.dataset_name.lower()): quality_flg = np.array(3) elif ('diml' in self.dataset_name.lower()): quality_flg = np.array(2) else: quality_flg = np.array(1) data = {'rgb': rgb_torch, 'depth': depth_torch, 'disp': disp_torch, 'A_paths': rgb_path, 'B_paths': depth_path, 'quality_flg': quality_flg, 'planes': ins_planes, 'focal_length': focal_length} return data def rgb_aug(self, rgb): # data augmentation for rgb img_aug = transforms.ColorJitter(brightness=0.0, contrast=0.3, saturation=0.1, hue=0)(Image.fromarray(rgb)) rgb_aug_gray_compress = iaa.Sequential([iaa.MultiplyAndAddToBrightness(mul=(0.6, 1.25), add=(-20, 20)), iaa.Grayscale(alpha=(0.0, 1.0)), iaa.JpegCompression(compression=(0, 70)), ], random_order=True) rgb_aug_blur1 = iaa.AverageBlur(k=((0, 5), (0, 6))) rgb_aug_blur2 = iaa.MotionBlur(k=9, angle=[-45, 45]) img_aug = rgb_aug_gray_compress(image=np.array(img_aug)) blur_flg = np.random.uniform(0.0, 1.0) img_aug = rgb_aug_blur1(image=img_aug) if blur_flg > 0.7 else img_aug img_aug = rgb_aug_blur2(image=img_aug) if blur_flg < 0.3 else img_aug rgb_colorjitter = np.array(img_aug) return rgb_colorjitter def set_flip_resize_crop_pad(self, A): """ Set flip, padding, reshaping and cropping flags. :param A: Input image, [H, W, C] :return: Data augamentation parameters """ # flip flip_prob = np.random.uniform(0.0, 1.0) flip_flg = True if flip_prob > 0.5 and 'train' in self.opt.phase else False # crop if 'train' in self.opt.phase: image_h, image_w = A.shape[:2] croph, cropw = np.random.randint(image_h // 2, image_h + 1), np.random.randint(image_w // 2, image_w + 1) h0 = np.random.randint(image_h - croph + 1) w0 = np.random.randint(image_w - cropw + 1) crop_size = [w0, h0, cropw, croph] else: crop_size = [0, 0, resize_size[1], resize_size[0]] # # crop # start_y = 0 if resize_size[0] <= cfg.DATASET.CROP_SIZE[0] else np.random.randint(0, resize_size[0] - cfg.DATASET.CROP_SIZE[0]) # start_x = 0 if resize_size[1] <= cfg.DATASET.CROP_SIZE[1] else np.random.randint(0, resize_size[1] - cfg.DATASET.CROP_SIZE[1]) # crop_height = resize_size[0] if resize_size[0] <= cfg.DATASET.CROP_SIZE[0] else cfg.DATASET.CROP_SIZE[0] # crop_width = resize_size[1] if resize_size[1] <= cfg.DATASET.CROP_SIZE[1] else cfg.DATASET.CROP_SIZE[1] # crop_size = [start_x, start_y, crop_width, crop_height] if 'train' in self.opt.phase else [0, 0, resize_size[1], resize_size[0]] # # reshape # ratio_list = [0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5] # # if 'train' in self.opt.phase: # resize_ratio = ratio_list[np.random.randint(len(ratio_list))] # else: # resize_ratio = 1.0 # resize_size = [int(A.shape[0] * resize_ratio + 0.5), # int(A.shape[1] * resize_ratio + 0.5)] # [height, width] # reshape resize_size = [cfg.DATASET.CROP_SIZE[0], cfg.DATASET.CROP_SIZE[1]] resize_ratio = (cfg.DATASET.CROP_SIZE[0] + cfg.DATASET.CROP_SIZE[1]) / (cropw + croph) # # pad # pad_height = 0 if resize_size[0] > cfg.DATASET.CROP_SIZE[0] else cfg.DATASET.CROP_SIZE[0] - resize_size[0] # pad_width = 0 if resize_size[1] > cfg.DATASET.CROP_SIZE[1] else cfg.DATASET.CROP_SIZE[1] - resize_size[1] # # [up, down, left, right] # pad = [pad_height, 0, pad_width, 0] if 'train' in self.opt.phase else [0, 0, 0, 0] pad = [0, 0, 0, 0] return flip_flg, resize_size, crop_size, pad, resize_ratio def flip_reshape_crop_pad(self, img, flip, resize_size, crop_size, pad, pad_value=0, resize_method='bilinear'): """ Flip, pad, reshape, and crop the image. :param img: input image, [C, H, W] :param flip: flip flag :param crop_size: crop size for the image, [x, y, width, height] :param pad: pad the image, [up, down, left, right] :param pad_value: padding value :return: """ # Flip if flip: img = np.flip(img, axis=1) # Crop the image img_crop = img[ crop_size[1]:crop_size[1] + crop_size[3], crop_size[0]:crop_size[0] + crop_size[2] ] # Resize the raw image if resize_method == 'nearest': img_resize = cv2.resize(img_crop, (resize_size[1], resize_size[0]), interpolation=cv2.INTER_NEAREST) else: img_resize = cv2.resize(img_crop, (resize_size[1], resize_size[0]), interpolation=cv2.INTER_LINEAR) # Pad the raw image if len(img.shape) == 3: img_pad = np.pad(img_resize, ((pad[0], pad[1]), (pad[2], pad[3]), (0, 0)), 'constant', constant_values=(pad_value, pad_value)) else: img_pad = np.pad(img_resize, ((pad[0], pad[1]), (pad[2], pad[3])), 'constant', constant_values=(pad_value, pad_value)) # # Resize the raw image # if resize_method == 'bilinear': # img_resize = cv2.resize(img, (resize_size[1], resize_size[0]), interpolation=cv2.INTER_LINEAR) # elif resize_method == 'nearest': # img_resize = cv2.resize(img, (resize_size[1], resize_size[0]), interpolation=cv2.INTER_NEAREST) # else: # raise ValueError # # Crop the resized image # img_crop = img_resize[crop_size[1]:crop_size[1] + crop_size[3], crop_size[0]:crop_size[0] + crop_size[2]] # # Pad the raw image # if len(img.shape) == 3: # img_pad = np.pad(img_crop, ((pad[0], pad[1]), (pad[2], pad[3]), (0, 0)), 'constant', # constant_values=(pad_value, pad_value)) # else: # img_pad = np.pad(img_crop, ((pad[0], pad[1]), (pad[2], pad[3])), 'constant', # constant_values=(pad_value, pad_value)) return img_pad # def depth_to_bins(self, depth): # """ # Discretize depth into depth bins # Mark invalid padding area as cfg.MODEL.DECODER_OUTPUT_C + 1 # :param depth: 1-channel depth, [1, h, w] # :return: depth bins [1, h, w] # """ # invalid_mask = depth < 1e-8 # depth[depth < cfg.DATASET.DEPTH_MIN] = cfg.DATASET.DEPTH_MIN # depth[depth > cfg.DATASET.DEPTH_MAX] = cfg.DATASET.DEPTH_MAX # bins = ((torch.log10(depth) - cfg.DATASET.DEPTH_MIN_LOG) / cfg.DATASET.DEPTH_BIN_INTERVAL).to(torch.int) # bins[invalid_mask] = cfg.MODEL.DECODER_OUTPUT_C + 1 # bins[bins == cfg.MODEL.DECODER_OUTPUT_C] = cfg.MODEL.DECODER_OUTPUT_C - 1 # depth[invalid_mask] = -1.0 # return bins def scale_torch(self, img): """ Scale the image and output it in torch.tensor. :param img: input rgb is in shape [H, W, C], input depth/disp is in shape [H, W] :param scale: the scale factor. float :return: img. [C, H, W] """ if len(img.shape) == 2: img = img[np.newaxis, :, :] if img.shape[2] == 3: transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(cfg.DATASET.RGB_PIXEL_MEANS, cfg.DATASET.RGB_PIXEL_VARS)]) img = transform(img) else: img = img.astype(np.float32) img = torch.from_numpy(img) return img def load_depth(self, anno_index, rgb): """ Load disparity, depth, and mask maps :return disp: disparity map, np.float depth: depth map, np.float sem_mask: semantic masks, including sky, road, np.uint8 ins_mask: plane instance masks, np.uint8 """ # load depth depth = cv2.imread(self.depth_paths[anno_index], -1) depth, mask_valid = self.preprocess_depth(depth, self.depth_paths[anno_index]) # load semantic mask, such as road, sky if len(self.rgb_paths) == len(self.sem_masks) and self.sem_masks[anno_index] is not None: sem_mask = cv2.imread(self.sem_masks[anno_index], -1).astype(np.uint8) else: sem_mask = np.zeros(depth.shape, dtype=np.uint8) sky_mask = sem_mask == 17 return depth, sky_mask, mask_valid def load_training_data(self, anno_index, rgb): """ Load disparity, depth, and mask maps :return disp: disparity map, np.float depth: depth map, np.float sem_mask: semantic masks, including sky, road, np.uint8 ins_mask: plane instance masks, np.uint8 """ # load depth, rgb, disp if (self.depth_paths[anno_index] != None) and (self.disp_paths[anno_index] != None): # dataset has both depth and disp disp = cv2.imread(self.disp_paths[anno_index], -1) disp = (disp / (disp.max() + 1e-8) * 60000).astype(np.uint16) depth = cv2.imread(self.depth_paths[anno_index], -1) depth = (depth / (depth.max() + 1e-8) * 60000).astype(np.uint16) depth_path = self.depth_paths[anno_index] elif self.disp_paths[anno_index] != None: # dataset only has disparity disp = cv2.imread(self.disp_paths[anno_index], -1) disp_mask = disp < 1e-8 depth = 1 / (disp + 1e-8) depth[disp_mask] = 0 depth = (depth / (depth.max() + 1e-8) * 60000).astype(np.uint16) depth_path = self.disp_paths[anno_index] elif self.depth_paths[anno_index] != None: # dataset only has depth depth_path = self.depth_paths[anno_index] depth = cv2.imread(self.depth_paths[anno_index], -1) depth = (self.loading_check(depth, depth_path)).astype(np.uint16) depth_mask = depth < 1e-8 disp = 1 / (depth + 1e-8) disp[depth_mask] = 0 disp = (disp / (disp.max() + 1e-8) * 60000).astype(np.uint16) else: depth = np.full((rgb.shape[0], rgb.shape[1]), 0, dtype=np.uint16) disp = np.full((rgb.shape[0], rgb.shape[1]), 0, dtype=np.uint16) depth_path = 'None' # load semantic mask, such as road, sky if len(self.rgb_paths) == len(self.sem_masks) and self.sem_masks[anno_index] is not None: sem_mask = cv2.imread(self.sem_masks[anno_index], -1).astype(np.uint8) else: sem_mask = np.zeros(disp.shape, dtype=np.uint8) # load planes mask if len(self.rgb_paths) == len(self.ins_paths) and self.ins_paths[anno_index] is not None: ins_planes_mask = cv2.imread(self.ins_paths[anno_index], -1).astype(np.uint8) else: ins_planes_mask = np.zeros(disp.shape, dtype=np.uint8) sky_mask = sem_mask == 17 road_mask = sem_mask == 49 invalid_disp = disp < 1e-8 invalid_depth = depth < 1e-8 return disp, depth, invalid_disp, invalid_depth, ins_planes_mask, sky_mask, road_mask, depth_path #return disp, depth, sem_mask, depth_path, ins_planes_mask def preprocess_depth(self, depth, img_path): if 'diml' in img_path.lower(): drange = 65535.0 elif 'taskonomy' in img_path.lower(): depth[depth > 23000] = 0 drange = 23000.0 else: #depth_filter1 = depth[depth > 1e-8] #drange = (depth_filter1.max() - depth_filter1.min()) drange = depth.max() depth_norm = depth / (drange + 1e-8) mask_valid = (depth_norm > 1e-8).astype(np.float) return depth_norm, mask_valid def loading_check(self, depth, depth_path): if 'taskonomy' in depth_path: # invalid regions in taskonomy are set to 65535 originally depth[depth >= 28000] = 0 if '3d-ken-burns' in depth_path: # maybe sky regions depth[depth >= 47000] = 0 return depth def __len__(self): return self.data_size # def name(self): # return 'DiverseDepth'
{ "repo_name": "aim-uofa/AdelaiDepth", "stars": "926", "repo_language": "Python", "file_name": "train.py", "mime_type": "text/x-script.python" }
from tools.parse_arg_base import BaseOptions class TrainOptions(BaseOptions): def initialize(self, parser): parser = BaseOptions.initialize(self, parser) parser.add_argument('--phase', type=str, default='train', help='Training flag') parser.add_argument('--phase_anno', type=str, default='train', help='Annotations file name') self.isTrain = True return parser
{ "repo_name": "aim-uofa/AdelaiDepth", "stars": "926", "repo_language": "Python", "file_name": "train.py", "mime_type": "text/x-script.python" }
from tools.parse_arg_base import BaseOptions class ValOptions(BaseOptions): def initialize(self, parser): parser = BaseOptions.initialize(self, parser) parser.add_argument('--phase', type=str, default='val', help='Validation flag') parser.add_argument('--phase_anno', type=str, default='val', help='Annotations file name') return parser
{ "repo_name": "aim-uofa/AdelaiDepth", "stars": "926", "repo_language": "Python", "file_name": "train.py", "mime_type": "text/x-script.python" }
import os import cv2 import json import scipy.io as sio import numpy as np import torch from torchvision.transforms import transforms import matplotlib.pyplot as plt from lib.utils.logging import setup_logging, SmoothedValue from lib.models.multi_depth_model_auxiv2 import RelDepthModel from lib.utils.net_tools import load_ckpt from lib.utils.evaluate_depth_error import evaluate_rel_err, recover_metric_depth from lib.configs.config import cfg, merge_cfg_from_file from tools.parse_arg_test import TestOptions logger = setup_logging(__name__) def scale_torch(img, scale): """ Scale the image and output it in torch.tensor. :param img: input image. [C, H, W] :param scale: the scale factor. float :return: img. [C, H, W] """ img = np.transpose(img, (2, 0, 1)) img = img.astype(np.float32) img /= scale img = torch.from_numpy(img.copy()) img = transforms.Normalize(cfg.DATASET.RGB_PIXEL_MEANS, cfg.DATASET.RGB_PIXEL_VARS)(img) return img if __name__ == '__main__': test_args = TestOptions().parse() test_args.thread = 1 test_args.batchsize = 1 # load model model = RelDepthModel() model.eval() # load checkpoint if test_args.load_ckpt: load_ckpt(test_args, model) model.cuda() model = torch.nn.DataParallel(model) # base_dir = '/home/yvan/DeepLearning/all-datasets' # annos_path = os.path.join(base_dir, test_args.dataset_list[0], 'annotations/test_annotations.json') # f = open(annos_path) # annos = json.load(f) # f.close() imgs = sio.loadmat('../datasets/test.mat') rgbs = imgs['rgbs'] depths = imgs['depths'] test_datasize = rgbs.shape[0] logger.info('{:>15}: {:<30}'.format('test_data_size', test_datasize)) # test smoothed_absRel = SmoothedValue(test_datasize) smoothed_rms = SmoothedValue(test_datasize) smoothed_logRms = SmoothedValue(test_datasize) smoothed_squaRel = SmoothedValue(test_datasize) smoothed_silog = SmoothedValue(test_datasize) smoothed_silog2 = SmoothedValue(test_datasize) smoothed_log10 = SmoothedValue(test_datasize) smoothed_delta1 = SmoothedValue(test_datasize) smoothed_delta2 = SmoothedValue(test_datasize) smoothed_delta3 = SmoothedValue(test_datasize) smoothed_whdr = SmoothedValue(test_datasize) smoothed_criteria = {'err_absRel': smoothed_absRel, 'err_squaRel': smoothed_squaRel, 'err_rms': smoothed_rms, 'err_silog': smoothed_silog, 'err_logRms': smoothed_logRms, 'err_silog2': smoothed_silog2, 'err_delta1': smoothed_delta1, 'err_delta2': smoothed_delta2, 'err_delta3': smoothed_delta3, 'err_log10': smoothed_log10, 'err_whdr': smoothed_whdr} for i in range(test_datasize): if i % 100 == 0: logger.info('processing : ' + str(i) + ' / ' + str(test_datasize)) rgb = rgbs[i].transpose((2, 1, 0)) #rgb depth = depths[i].transpose((1, 0)) mask_invalid = depth < 1e-8 mask_invalid[45:471, 41:601] = 1 mask_invalid = mask_invalid.astype(np.bool) # resize input to [385, 385], same to training setting rgb_resize = cv2.resize(rgb, (448, 448)) img_torch = scale_torch(rgb_resize, 255) img_torch = img_torch[None, :, :, :].cuda() with torch.no_grad(): pred_depth, pred_disp = model.module.depth_model(img_torch) pred_depth_resize = cv2.resize(pred_depth.cpu().numpy().squeeze(), (rgb.shape[1], rgb.shape[0])) # Recover metric depth pred_depth_metric = recover_metric_depth(pred_depth_resize, depth) # evaluate smoothed_criteria = evaluate_rel_err(pred_depth_metric, depth, smoothed_criteria) model_name = test_args.load_ckpt.split('/')[-1].split('.')[0] image_dir = os.path.join(cfg.ROOT_DIR, './evaluation', cfg.MODEL.ENCODER, model_name + '_nyu') os.makedirs(image_dir, exist_ok=True) img_name = '%04d.png' %i plt.imsave(os.path.join(image_dir, img_name.replace('.png', '_pred.png')), pred_depth_metric, cmap='rainbow') cv2.imwrite(os.path.join(image_dir, img_name.replace('.png', '_rgb.png')), np.squeeze(rgb)[:, :, ::-1]) plt.imsave(os.path.join(image_dir, img_name.replace('.png', '_gt.png')), np.squeeze(depth), cmap='rainbow') # cv2.imwrite(os.path.join(image_dir, img_name.replace('.png', '_gtraw.png')), (pred_depth_metric * 6000).astype(np.uint16)) print("###############WHDR ERROR: %f", smoothed_criteria['err_whdr'].GetGlobalAverageValue()) print("###############absREL ERROR: %f", smoothed_criteria['err_absRel'].GetGlobalAverageValue()) print("###############silog ERROR: %f", np.sqrt(smoothed_criteria['err_silog2'].GetGlobalAverageValue() - ( smoothed_criteria['err_silog'].GetGlobalAverageValue()) ** 2)) print("###############log10 ERROR: %f", smoothed_criteria['err_log10'].GetGlobalAverageValue()) print("###############RMS ERROR: %f", np.sqrt(smoothed_criteria['err_rms'].GetGlobalAverageValue())) print("###############delta_1 ERROR: %f", smoothed_criteria['err_delta1'].GetGlobalAverageValue()) print("###############delta_2 ERROR: %f", smoothed_criteria['err_delta2'].GetGlobalAverageValue()) print("###############delta_3 ERROR: %f", smoothed_criteria['err_delta3'].GetGlobalAverageValue()) print("###############squaRel ERROR: %f", smoothed_criteria['err_squaRel'].GetGlobalAverageValue()) print("###############logRms ERROR: %f", np.sqrt(smoothed_criteria['err_logRms'].GetGlobalAverageValue()))
{ "repo_name": "aim-uofa/AdelaiDepth", "stars": "926", "repo_language": "Python", "file_name": "train.py", "mime_type": "text/x-script.python" }
import argparse class BaseOptions(): def __init__(self): self.initialized = False def initialize(self, parser): parser.add_argument('--backbone', type=str, default='resnet50', help='Select backbone type, resnet50 or resnext101') parser.add_argument('--batchsize', type=int, default=2, help='Batch size') parser.add_argument('--base_lr', type=float, default=0.001, help='Initial learning rate') parser.add_argument('--load_ckpt', help='Checkpoint path to load') parser.add_argument('--resume', action='store_true', help='Resume to train') parser.add_argument('--epoch', default=50, type=int, help='Total training epochs') parser.add_argument('--dataset_list', default=None, nargs='+', help='The names of multiple datasets') parser.add_argument('--loss_mode', default='_vnl_ssil_ranking_', help='Select loss to supervise, joint or ranking') parser.add_argument('--lr_scheduler_multiepochs', default=[10, 25, 40], nargs='+', type=int, help='Learning rate scheduler step') parser.add_argument('--val_step', default=5000, type=int, help='Validation steps') parser.add_argument('--snapshot_iters', default=5000, type=int, help='Checkpoint save iters') parser.add_argument('--log_interval', default=10, type=int, help='Log print iters') parser.add_argument('--output_dir', type=str, default='./output', help='Output dir') parser.add_argument('--use_tfboard', action='store_true', help='Tensorboard to log training info') parser.add_argument('--dataroot', default='./datasets', required=True, help='Path to images') parser.add_argument('--dataset', default='multi', help='Dataset loader name') parser.add_argument('--scale_decoder_lr', type=float, default=1, help='Scale learning rate for the decoder') parser.add_argument('--thread', default=0, type=int, help='Thread for loading data') parser.add_argument('--start_step', default=0, type=int, help='Set start training steps') parser.add_argument('--sample_ratio_steps', default=10000, type=int, help='Step for increasing sample ratio') parser.add_argument('--sample_start_ratio', default=0.1, type=float, help='Start sample ratio') parser.add_argument('--local_rank', type=int, default=0, help='Rank ID for processes') parser.add_argument('--nnodes', type=int, default=1, help='Amount of nodes') parser.add_argument('--node_rank', type=int, default=0, help='Rank of current node') parser.add_argument('--dist_url', type=str, default='tcp://127.0.0.1:22', help='URL specifying how to initialize the process group') # parser.add_argument('--optim', default='SGD', help='Select optimizer, SGD or Adam') # parser.add_argument('--start_epoch', default=0, type=int, help='Set training epochs') # parser.add_argument('--results_dir', type=str, default='./evaluation', help='Output dir') # parser.add_argument('--diff_loss_weight', default=1, type=int, help='Step for increasing sample ratio') self.initialized = True return parser def parse(self): parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) self.parser = self.initialize(parser) self.opt = parser.parse_args() return self.opt def print_options(opt, logger=None): message = '' message += '----------------- Options ---------------\n' for k, v in sorted(vars(opt).items()): message += '{:>25}: {}\n'.format(str(k), str(v)) message += '----------------- End -------------------' logger.info(message)
{ "repo_name": "aim-uofa/AdelaiDepth", "stars": "926", "repo_language": "Python", "file_name": "train.py", "mime_type": "text/x-script.python" }
from tools.parse_arg_base import BaseOptions class TestOptions(BaseOptions): def initialize(self, parser): parser = BaseOptions.initialize(self, parser) parser.add_argument('--phase', type=str, default='test', help='test flag') parser.add_argument('--phase_anno', type=str, default='test', help='eigen/eigen_test, Annotations file name') return parser
{ "repo_name": "aim-uofa/AdelaiDepth", "stars": "926", "repo_language": "Python", "file_name": "train.py", "mime_type": "text/x-script.python" }
import math import traceback import errno import os import os.path as osp import json import torch.distributed as dist import torch.multiprocessing as mp from multiprocessing.sharedctypes import Value from data.load_dataset_distributed import MultipleDataLoaderDistributed from lib.models.multi_depth_model_auxiv2 import * from lib.configs.config import cfg, merge_cfg_from_file, print_configs from lib.utils.training_stats import TrainingStats from lib.utils.evaluate_depth_error import validate_rel_depth_err, recover_metric_depth from lib.utils.lr_scheduler_custom import make_lr_scheduler from lib.utils.comm import is_pytorch_1_1_0_or_later, get_world_size from lib.utils.net_tools import save_ckpt, load_ckpt from lib.utils.logging import setup_distributed_logger, SmoothedValue from tools.parse_arg_base import print_options from tools.parse_arg_train import TrainOptions from tools.parse_arg_val import ValOptions def main_process(dist, rank) -> bool: return not dist or (dist and rank == 0) def increase_sample_ratio_steps(step, base_ratio=0.1, step_size=10000): ratio = min(base_ratio * (int(step / step_size) + 1), 1.0) return ratio def reduce_loss_dict(loss_dict): """ Reduce the loss dictionary from all processes so that process with rank 0 has the averaged results. Returns a dict with the same fields as loss_dict, after reduction. """ world_size = get_world_size() if world_size < 2: return loss_dict with torch.no_grad(): loss_names = [] all_losses = [] for k in sorted(loss_dict.keys()): loss_names.append(k) all_losses.append(loss_dict[k]) all_losses = torch.stack(all_losses, dim=0) torch.distributed.reduce(all_losses, dst=0) if torch.distributed.get_rank() == 0: # only main process gets accumulated, so only divide by # world_size in this case all_losses /= world_size reduced_losses = {k: v for k, v in zip(loss_names, all_losses)} return reduced_losses def val(val_dataloader, model): """ Validate the model. """ print('validating...') smoothed_absRel = SmoothedValue(len(val_dataloader)) smoothed_whdr = SmoothedValue(len(val_dataloader)) smoothed_criteria = {'err_absRel': smoothed_absRel, 'err_whdr': smoothed_whdr} for i, data in enumerate(val_dataloader): out = model.module.inference(data) pred_depth = torch.squeeze(out['pred_depth']) pred_depth_resize = cv2.resize(pred_depth.cpu().numpy(), (torch.squeeze(data['gt_depth']).shape[1], torch.squeeze(data['gt_depth']).shape[0])) pred_depth_metric = recover_metric_depth(pred_depth_resize, data['gt_depth']) smoothed_criteria = validate_rel_depth_err(pred_depth_metric, data['gt_depth'], smoothed_criteria, scale=1.0) return {'abs_rel': smoothed_criteria['err_absRel'].GetGlobalAverageValue(), 'whdr': smoothed_criteria['err_whdr'].GetGlobalAverageValue()} def do_train(train_dataloader, val_dataloader, train_args, model, save_to_disk, scheduler, optimizer, val_err, logger, tblogger=None): # training status for logging if save_to_disk: training_stats = TrainingStats(train_args, cfg.TRAIN.LOG_INTERVAL, tblogger if train_args.use_tfboard else None) dataloader_iterator = iter(train_dataloader) start_step = train_args.start_step total_iters = cfg.TRAIN.MAX_ITER train_datasize = train_dataloader.batch_sampler.sampler.total_sampled_size pytorch_1_1_0_or_later = is_pytorch_1_1_0_or_later() tmp_i = 0 try: for step in range(start_step, total_iters): if step % train_args.sample_ratio_steps == 0 and step != 0: sample_ratio = increase_sample_ratio_steps(step, base_ratio=train_args.sample_start_ratio, step_size=train_args.sample_ratio_steps) train_dataloader, curr_sample_size = MultipleDataLoaderDistributed(train_args, sample_ratio=sample_ratio) dataloader_iterator = iter(train_dataloader) logger.info('Sample ratio: %02f, current sampled datasize: %d' % (sample_ratio, np.sum(curr_sample_size))) epoch = int(step * train_args.batchsize*train_args.world_size / train_datasize) if save_to_disk: training_stats.IterTic() # get the next data batch try: data = next(dataloader_iterator) except: dataloader_iterator = iter(train_dataloader) data = next(dataloader_iterator) out = model(data) losses_dict = out['losses'] optimizer.optim(losses_dict) #################Check data loading###################### # tmp_path_base = '/home/yvan/DeepLearning/Depth/DiverseDepth-github/DiverseDepth/datasets/x/' # rgb = data['A'][1, ...].permute(1, 2, 0).squeeze() # rgb =rgb * torch.tensor(cfg.DATASET.RGB_PIXEL_VARS)[None, None, :] + torch.tensor(cfg.DATASET.RGB_PIXEL_MEANS)[None, None, :] # rgb = rgb * 255 # rgb = rgb.cpu().numpy().astype(np.uint8) # depth = (data['B'][1, ...].squeeze().cpu().numpy()*1000) # depth[depth<0] = 0 # depth = depth.astype(np.uint16) # plt.imsave(tmp_path_base+'%04d_r.jpg' % tmp_i, rgb) # plt.imsave(tmp_path_base+'%04d_d.png' % tmp_i, depth, cmap='rainbow') # tmp_i +=1 ######################################################### # reduce losses over all GPUs for logging purposes loss_dict_reduced = reduce_loss_dict(losses_dict) scheduler.step() if save_to_disk: training_stats.UpdateIterStats(loss_dict_reduced) training_stats.IterToc() training_stats.LogIterStats(step, epoch, optimizer.optimizer, val_err[0]) # validate the model if step % cfg.TRAIN.VAL_STEP == 0 and val_dataloader is not None and step != 0: model.eval() val_err[0] = val(val_dataloader, model) # training mode model.train() # save checkpoint if step % cfg.TRAIN.SNAPSHOT_ITERS == 0 and step != 0 and save_to_disk: save_ckpt(train_args, step, epoch, model, optimizer.optimizer, scheduler, val_err[0]) except (RuntimeError, KeyboardInterrupt): stack_trace = traceback.format_exc() print(stack_trace) finally: if train_args.use_tfboard and main_process(dist=train_args.distributed, rank=train_args.global_rank): tblogger.close() def main_worker(local_rank: int, ngpus_per_node: int, train_args, val_args): train_args.global_rank = train_args.node_rank * ngpus_per_node + local_rank train_args.local_rank = local_rank val_args.global_rank = train_args.global_rank val_args.local_rank = local_rank merge_cfg_from_file(train_args) global logger # Set logger log_output_dir = cfg.TRAIN.LOG_DIR if log_output_dir: try: os.makedirs(log_output_dir) except OSError as e: if e.errno != errno.EEXIST: raise logger = setup_distributed_logger("lib", log_output_dir, local_rank, cfg.TRAIN.RUN_NAME + '.txt') tblogger = None if train_args.use_tfboard and local_rank == 0: from tensorboardX import SummaryWriter tblogger = SummaryWriter(cfg.TRAIN.LOG_DIR) # init if train_args.distributed: torch.cuda.set_device(local_rank) dist.init_process_group(backend='nccl', init_method=train_args.dist_url, world_size=train_args.world_size, rank=train_args.global_rank) # load model model = RelDepthModel() if train_args.world_size>1: assert is_pytorch_1_1_0_or_later(), \ "SyncBatchNorm is only available in pytorch >= 1.1.0" model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model) logger.info('Using SyncBN!') if train_args.distributed: model = torch.nn.parallel.DistributedDataParallel( model.cuda(), device_ids=[local_rank], output_device=local_rank) else: model = torch.nn.DataParallel(model.cuda()) val_err = [{'abs_rel': 0, 'whdr': 0}] # Print configs and logs print_options(train_args, logger) # training and validation dataloader train_dataloader, train_sample_size = MultipleDataLoaderDistributed(train_args) val_dataloader, val_sample_size = MultipleDataLoaderDistributed(val_args) cfg.TRAIN.LR_SCHEDULER_MULTISTEPS = np.array(train_args.lr_scheduler_multiepochs) * math.ceil(np.sum(train_sample_size)/ (train_args.world_size * train_args.batchsize)) # Optimizer optimizer = ModelOptimizer(model) # lr_optim_lambda = lambda iter: (1.0 - iter / (float(total_iters))) ** 0.9 # scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer.optimizer, lr_lambda=lr_optim_lambda) # lr_scheduler_step = 15000 # scheduler = torch.optim.lr_scheduler.StepLR(optimizer.optimizer, step_size=lr_scheduler_step, gamma=0.9) scheduler = make_lr_scheduler(cfg=cfg, optimizer=optimizer.optimizer) if train_args.load_ckpt: load_ckpt(train_args, model, optimizer.optimizer, scheduler, val_err) # obtain the current sample ratio sample_ratio = increase_sample_ratio_steps(train_args.start_step, base_ratio=train_args.sample_start_ratio, step_size=train_args.sample_ratio_steps) # reconstruct the train_dataloader with the new sample_ratio train_dataloader, train_sample_size = MultipleDataLoaderDistributed(train_args, sample_ratio=sample_ratio) total_iters = math.ceil(np.sum(train_sample_size)/ (train_args.world_size * train_args.batchsize)) * train_args.epoch cfg.TRAIN.MAX_ITER = total_iters cfg.TRAIN.GPU_NUM = train_args.world_size print_configs(cfg) save_to_disk = main_process(train_args.distributed, local_rank) do_train(train_dataloader, val_dataloader, train_args, model, save_to_disk, scheduler, optimizer, val_err, logger, tblogger) def main(): # Train args train_opt = TrainOptions() train_args = train_opt.parse() # Validation args val_opt = ValOptions() val_args = val_opt.parse() val_args.batchsize = 1 val_args.thread = 0 # Validation datasets val_datasets = [] for dataset_name in val_args.dataset_list: val_annos_path = osp.join(cfg.ROOT_DIR, val_args.dataroot, dataset_name, 'annotations', 'val_annotations.json') if not osp.exists(val_annos_path): continue with open(val_annos_path, 'r') as f: anno = json.load(f)[0] if 'depth_path' not in anno: continue depth_path_demo = osp.join(cfg.ROOT_DIR, val_args.dataroot, anno['depth_path']) depth_demo = cv2.imread(depth_path_demo, -1) if depth_demo is not None: val_datasets.append(dataset_name) val_args.dataset_list = val_datasets print('Using PyTorch version: ', torch.__version__, torch.version.cuda) ngpus_per_node = torch.cuda.device_count() train_args.world_size = ngpus_per_node * train_args.nnodes val_args.world_size = ngpus_per_node * train_args.nnodes train_args.distributed = ngpus_per_node > 1 # Randomize args.dist_url to avoid conflicts on same machine train_args.dist_url = train_args.dist_url + str(os.getpid() % 100).zfill(2) if train_args.distributed: mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, train_args, val_args)) else: main_worker(0, ngpus_per_node, train_args, val_args) if __name__=='__main__': main()
{ "repo_name": "aim-uofa/AdelaiDepth", "stars": "926", "repo_language": "Python", "file_name": "train.py", "mime_type": "text/x-script.python" }
# Community Participation Guidelines This repository is governed by Mozilla's code of conduct and etiquette guidelines. For more details, please read the [Mozilla Community Participation Guidelines](https://www.mozilla.org/about/governance/policies/participation/). ## How to Report For more information on how to report violations of the Community Participation Guidelines, please read our '[How to Report](https://www.mozilla.org/about/governance/policies/participation/reporting/)' page. <!-- ## Project Specific Etiquette In some cases, there will be additional project etiquette i.e.: (https://bugzilla.mozilla.org/page.cgi?id=etiquette.html). Please update for your project. -->
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }
# dom-examples Code examples that accompany various MDN DOM and Web API documentation pages. * The "abort-api" directory contains an example of how to use the "Abort API" (aka [AbortSignal](https://dom.spec.whatwg.org/#interface-AbortSignal) and [AbortController](https://dom.spec.whatwg.org/#interface-abortcontroller)). [Run the example live](https://mdn.github.io/dom-examples/abort-api/). * The "audiocontext-setsinkid" directory contains an example of how to use the [`AudioContext.setSinkId()`](https://developer.mozilla.org/en-US/docs/Web/API/AudioContext/setSinkId) method and related features. [Run the example live](https://mdn.github.io/dom-examples/audiocontext-setsinkid/). * The "auxclick" directory contains a simple example demonstrating the new <code>auxclick</code> event type. See [GlobalEventHandlers.auxclick](https://developer.mozilla.org/en-US/docs/Web/API/GlobalEventHandlers/onauxclick) for more details, or [run the example live](https://mdn.github.io/dom-examples/auxclick/). * The "canvas" directory contains an example "chroma-keying" demonstrating how to use the Canvas API to manipulate videos: see [Manipulating video using canvas](https://developer.mozilla.org/en-US/docs/Web/API/Canvas_API/Manipulating_video_using_canvas) or [run the example live](https://mdn.github.io/dom-examples/canvas/chroma-keying/). * The "channel-messaging-basic" directory contains a simple example demonstrating the basics of channel messaging; see [Channel Messaging API](https://developer.mozilla.org/en-US/docs/Web/API/Channel_Messaging_API) or [run the example live](https://mdn.github.io/dom-examples/channel-messaging-basic/). * The "channel-messaging-multimessage" directory contains another channel messaging demo, showing how multiple messages can be sent between browsing contexts. See [Channel Messaging API](https://developer.mozilla.org/en-US/docs/Web/API/Channel_Messaging_API) for more details. [Run the demo live](https://mdn.github.io/dom-examples/channel-messaging-multimessage/). * The "css-painting" directory contains examples demonstrating the [CSS Painting API](https://developer.mozilla.org/en-US/docs/Web/API/CSS_Painting_API). See the [examples live](https://mdn.github.io/dom-examples/css-painting). * The "drag-and-drop" directory is for examples and demos of the [HTML Drag and Drop](https://developer.mozilla.org/en-US/docs/Web/API/HTML_Drag_and_Drop_API) standard. * The "fullscreen-api" directory is for examples and demos of the [Fullscreen API](https://wiki.developer.mozilla.org/en-US/docs/Web/API/Fullscreen_API). Run the [example live](https://mdn.github.io/dom-examples/fullscreen-api/). * The "indexeddb-api" directory contains a demo for the [IndexedDB API](https://mdn.github.io/dom-examples/indexeddb-api/index.html). * The "insert-adjacent" directory contains simple demos for [insertAdjacentElement](https://mdn.github.io/dom-examples/insert-adjacent/insertAdjacentElement.html) and [insertAdjacentText](https://mdn.github.io/dom-examples/insert-adjacent/insertAdjacentText.html). * The "matchmedia" directory contains a simple demo to test matchMedia functionality. See [Window.matchMedia](https://developer.mozilla.org/en-US/docs/Web/API/Window/matchMedia) for more details. [Run the demo live](https://mdn.github.io/dom-examples/matchmedia/). * The "mediaquerylist" directory contains a simple demo to test more advanced matchMedia/mediaquerylist functionality. See [MediaQueryList](https://developer.mozilla.org/en-US/docs/Web/API/MediaQueryList) for more details. [Run the demo live](https://mdn.github.io/dom-examples/mediaquerylist/index.html). * The "media" directory contains examples and demos showing how to use HTML and DOM [media elements and APIs](https://developer.mozilla.org/en-US/docs/Web/Media). * The "payment-request" directory contains examples of the [Payment Request API](https://developer.mozilla.org/en-US/docs/Web/API/Payment_Request_API). * The "pointerevents" directory is for examples and demos of the [Pointer Events](https://developer.mozilla.org/en-US/docs/Web/API/Pointer_events) standard. * The "pointer-lock" directory contains a simple demo to show usage of the Pointer Lock API. You can find more explanation of how the demo works at the main MDN [Pointer Lock API](https://developer.mozilla.org/en-US/docs/Web/API/Pointer_Lock_API) page. [Run the demo live](https://mdn.github.io/dom-examples/pointer-lock/). * The "popover-api" directory is for examples and demos of the [Popover API](https://developer.mozilla.org/en-US/docs/Web/API/Popover_API) standard. Go to the [Popover API demo index](popover-api/) to see what's available. * The "reporting-api" directory contains a couple of simple demos to show usage of the Reprting API. You can find more explanation of how the API works in the main MDN [Reporting API](https://developer.mozilla.org/en-US/docs/Web/API/Reporting_API) docs. [Run the deprecation report demo live](https://mdn.github.io/dom-examples/reporting-api/deprecation_report.html). * The "resize-event" directory contains a simple demo to show how you can use the [resize event](https://developer.mozilla.org/en-US/docs/Web/API/Window/resize_event). Resize the browser window either by height or width to see the size of your current window. [Run the demo live](https://mdn.github.io/dom-examples/resize-event). * The "screenleft-screentop" directory contains a demo to show how you could use the [Window.screenLeft](https://developer.mozilla.org/en-US/docs/Web/API/Window/screenLeft) and [Window.screenTop](https://developer.mozilla.org/en-US/docs/Web/API/Window/screenTop) properties to draw a circle on a canvas that always stays in the same physical place on the screen when you move your browser window. [Run the demo live](https://mdn.github.io/dom-examples/screenleft-screentop/). * The "scrolltooptions" directory contains a demo to show how you could use the [ScrollToOptions](https://developer.mozilla.org/en-US/docs/Web/API/ScrollToOptions) dictionary along with the [Window.ScrollTo()](https://developer.mozilla.org/en-US/docs/Web/API/Window/scrollTo) method to programmatically scroll a web page. [Run the demo live](https://mdn.github.io/dom-examples/scrolltooptions/). * The "server-sent-events" directory contains a very simple SSE demo that uses PHP to create the server. You can find more information in our [Using server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events) article. To run the demo you'll need to serve the files from a server that supports PHP; [MAMP](https://www.mamp.info/en/) is a good PHP test server environment. * The "streams" directory contains demos of the Streams API for using low-level I/O processing. * The "touchevents" directory is for examples and demos of the [Touch Events](https://developer.mozilla.org/en-US/docs/Web/API/Touch_events) standard. * The "web-animations-api" directory contains [Web Animation API](https://developer.mozilla.org/en-US/docs/Web/API/Web_Animations_API) demos. See the [web animations README](web-animations-api/README.md) for more information. * The "web-storage" directory contains a simple demo to show usage of the Web Storage API. For more detail on how it works, read [Using the Web Storage API](https://developer.mozilla.org/en-US/docs/Web/API/Web_Storage_API/Using_the_Web_Storage_API). [View the demo live](https://mdn.github.io/dom-examples/web-storage/). * The "view-transitions" directory contains a simple demo to show usage of the [View Transitions API](https://developer.mozilla.org/en-US/docs/Web/API/View_Transitions_API). [View the demo live](https://mdn.github.io/dom-examples/view-transitions/). * The "web-share" directory contains a simple demo to show usage of the [Web Share API](https://developer.mozilla.org/en-US/docs/Web/API/Navigator/share). [View the demo live](https://mdn.github.io/dom-examples/web-share/). * The "web-workers" directory contains a simple web worker to demonstrate how [Web Workers](https://developer.mozilla.org/en-US/docs/Web/API/Web_Workers_API) work. [View the demo live](https://mdn.github.io/dom-examples/web-workers/simple-web-worker/). * The ["webgl-examples"](webgl-examples/README.md) directory contains a number of WebGL examples that demonstrate the [WebGL API](https://developer.mozilla.org/en-US/docs/Web/API/WebGL_API), which is used for 2D and 3D graphics on the web. * The "webgpu-compute-demo" directory contains an example that demonstrates basic usage of the [WebGPU API](https://developer.mozilla.org/en-US/docs/Web/API/WebGPU_API) compute pipeline, which is used for performing general computation on the GPU. [View the demo live](https://mdn.github.io/dom-examples/webgpu-compute-demo/). * The "webgpu-render-demo" directory contains an example that demonstrates basic usage of the [WebGPU API](https://developer.mozilla.org/en-US/docs/Web/API/WebGPU_API) render pipeline, which is used for rendering high-performance graphics via the GPU. [View the demo live](https://mdn.github.io/dom-examples/webgpu-render-demo/).
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }
<!DOCTYPE HTML> <html lang="en-US"> <head> <meta charset="UTF-8"> <meta name="viewport" content="width=380"> <script src="scripts/todo.js"></script> <title>To-do list with Notifications</title> <!-- Icon originated from design by Sabine Wollender: http://thenounproject.com/wosamo/ --> <link rel="icon" type="image/png" href="img/icon-128.png"> <link href='https://fonts.googleapis.com/css?family=Donegal+One|Lily+Script+One' rel='stylesheet' type='text/css'> <link href="style/style.css" type="text/css" rel="stylesheet"> </head> <body> <h1>To-do list</h1> <div class="task-box"> <ul id="task-list"> </ul> </div> <div class="form-box"> <h2>Add new to-do item.</h2> <form id="task-form" action="index.html"> <div class="full-width"><label for="title">Task title:</label><input type="text" id="title" required></div> <div class="half-width"><label for="deadline-hours">Hours (hh):</label><input type="number" id="deadline-hours" required></div> <div class="half-width"><label for="deadline-minutes">Mins (mm):</label><input type="number" id="deadline-minutes" required></div> <div class="third-width"><label for="deadline-day">Day:</label> <select id="deadline-day" required> <option value="01">01</option> <option value="02">02</option> <option value="03">03</option> <option value="04">04</option> <option value="05">05</option> <option value="06">06</option> <option value="07">07</option> <option value="08">08</option> <option value="09">09</option> <option value="10">10</option> <option value="11">11</option> <option value="12">12</option> <option value="13">13</option> <option value="14">14</option> <option value="15">15</option> <option value="16">16</option> <option value="17">17</option> <option value="18">18</option> <option value="19">19</option> <option value="20">20</option> <option value="21">21</option> <option value="22">22</option> <option value="23">23</option> <option value="24">24</option> <option value="25">25</option> <option value="26">26</option> <option value="27">27</option> <option value="28">28</option> <option value="29">29</option> <option value="30">30</option> <option value="31">31</option> </select></div> <div class="third-width"><label for="deadline-month">Month:</label> <select id="deadline-month" required> <option value="January">January</option> <option value="February">February</option> <option value="March">March</option> <option value="April">April</option> <option value="May">May</option> <option value="June">June</option> <option value="July">July</option> <option value="August">August</option> <option value="September">September</option> <option value="October">October</option> <option value="November">November</option> <option value="December">December</option> </select></div> <div class="third-width"><label for="deadline-year">Year:</label> <select id="deadline-year" required> <option value="2025">2025</option> <option value="2024">2024</option> <option value="2023">2023</option> <option value="2022">2022</option> <option value="2021">2021</option> <option value="2020">2020</option> <option value="2019">2019</option> <option value="2018">2018</option> </select></div> <div><input type="submit" id="submit" value="Add Task"></div> <div></div> </form> </div> <div id="toolbar"> <ul id="notifications"> </ul> <button id="enable"> Enable notifications </button> </div> </body> </html>
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }
to-do-notifications =================== This is an Enhanced version of my basic to-do app, which stores to-do items via IndexedDB, and then also aims to provide notifications when to-do item deadlines are up, via the Notification and Vibration APIs. The IndexedDB and Notification API functionality all works on Firefox desktop, Firefox Android, Firefox OS, Chrome, and IE 10+. The Vibration API stuff works on Firefox OS and Firefox for Android. You can [try it out live](https://mdn.github.io/dom-examples/to-do-notifications/).
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }
window.onload = () => { const MONTHS = ['January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December']; // Hold an instance of a db object for us to store the IndexedDB data in let db; // Create a reference to the notifications list in the bottom of the app; we will write database messages into this list by // appending list items as children of this element const note = document.getElementById('notifications'); // All other UI elements we need for the app const taskList = document.getElementById('task-list'); const taskForm = document.getElementById('task-form'); const title = document.getElementById('title'); const hours = document.getElementById('deadline-hours'); const minutes = document.getElementById('deadline-minutes'); const day = document.getElementById('deadline-day'); const month = document.getElementById('deadline-month'); const year = document.getElementById('deadline-year'); const notificationBtn = document.getElementById('enable'); // Do an initial check to see what the notification permission state is if (Notification.permission === 'denied' || Notification.permission === 'default') { notificationBtn.style.display = 'block'; } else { notificationBtn.style.display = 'none'; } note.appendChild(createListItem('App initialised.')); // Let us open our database const DBOpenRequest = window.indexedDB.open('toDoList', 4); // Register two event handlers to act on the database being opened successfully, or not DBOpenRequest.onerror = (event) => { note.appendChild(createListItem('Error loading database.')); }; DBOpenRequest.onsuccess = (event) => { note.appendChild(createListItem('Database initialised.')); // Store the result of opening the database in the db variable. This is used a lot below db = DBOpenRequest.result; // Run the displayData() function to populate the task list with all the to-do list data already in the IndexedDB displayData(); }; // This event handles the event whereby a new version of the database needs to be created // Either one has not been created before, or a new version number has been submitted via the // window.indexedDB.open line above //it is only implemented in recent browsers DBOpenRequest.onupgradeneeded = (event) => { db = event.target.result; db.onerror = (event) => { note.appendChild(createListItem('Error loading database.')); }; // Create an objectStore for this database const objectStore = db.createObjectStore('toDoList', { keyPath: 'taskTitle' }); // Define what data items the objectStore will contain objectStore.createIndex('hours', 'hours', { unique: false }); objectStore.createIndex('minutes', 'minutes', { unique: false }); objectStore.createIndex('day', 'day', { unique: false }); objectStore.createIndex('month', 'month', { unique: false }); objectStore.createIndex('year', 'year', { unique: false }); objectStore.createIndex('notified', 'notified', { unique: false }); note.appendChild(createListItem('Object store created.')); }; function displayData() { // First clear the content of the task list so that you don't get a huge long list of duplicate stuff each time // the display is updated. while (taskList.firstChild) { taskList.removeChild(taskList.lastChild); } // Open our object store and then get a cursor list of all the different data items in the IDB to iterate through const objectStore = db.transaction('toDoList').objectStore('toDoList'); objectStore.openCursor().onsuccess = (event) => { const cursor = event.target.result; // Check if there are no (more) cursor items to iterate through if (!cursor) { // No more items to iterate through, we quit. note.appendChild(createListItem('Entries all displayed.')); return; } // Check which suffix the deadline day of the month needs const { hours, minutes, day, month, year, notified, taskTitle } = cursor.value; const ordDay = ordinal(day); // Build the to-do list entry and put it into the list item. const toDoText = `${taskTitle} — ${hours}:${minutes}, ${month} ${ordDay} ${year}.`; const listItem = createListItem(toDoText); if (notified === 'yes') { listItem.style.textDecoration = 'line-through'; listItem.style.color = 'rgba(255, 0, 0, 0.5)'; } // Put the item item inside the task list taskList.appendChild(listItem); // Create a delete button inside each list item, const deleteButton = document.createElement('button'); listItem.appendChild(deleteButton); deleteButton.textContent = 'X'; // Set a data attribute on our delete button to associate the task it relates to. deleteButton.setAttribute('data-task', taskTitle); // Associate action (deletion) when clicked deleteButton.onclick = (event) => { deleteItem(event); }; // continue on to the next item in the cursor cursor.continue(); }; }; // Add listener for clicking the submit button taskForm.addEventListener('submit', addData, false); function addData(e) { // Prevent default, as we don't want the form to submit in the conventional way e.preventDefault(); // Stop the form submitting if any values are left empty. // This should never happen as there is the required attribute if (title.value === '' || hours.value === null || minutes.value === null || day.value === '' || month.value === '' || year.value === null) { note.appendChild(createListItem('Data not submitted — form incomplete.')); return; } // Grab the values entered into the form fields and store them in an object ready for being inserted into the IndexedDB const newItem = [ { taskTitle: title.value, hours: hours.value, minutes: minutes.value, day: day.value, month: month.value, year: year.value, notified: 'no' }, ]; // Open a read/write DB transaction, ready for adding the data const transaction = db.transaction(['toDoList'], 'readwrite'); // Report on the success of the transaction completing, when everything is done transaction.oncomplete = () => { note.appendChild(createListItem('Transaction completed: database modification finished.')); // Update the display of data to show the newly added item, by running displayData() again. displayData(); }; // Handler for any unexpected error transaction.onerror = () => { note.appendChild(createListItem(`Transaction not opened due to error: ${transaction.error}`)); }; // Call an object store that's already been added to the database const objectStore = transaction.objectStore('toDoList'); console.log(objectStore.indexNames); console.log(objectStore.keyPath); console.log(objectStore.name); console.log(objectStore.transaction); console.log(objectStore.autoIncrement); // Make a request to add our newItem object to the object store const objectStoreRequest = objectStore.add(newItem[0]); objectStoreRequest.onsuccess = (event) => { // Report the success of our request // (to detect whether it has been succesfully // added to the database, you'd look at transaction.oncomplete) note.appendChild(createListItem('Request successful.')); // Clear the form, ready for adding the next entry title.value = ''; hours.value = null; minutes.value = null; day.value = 01; month.value = 'January'; year.value = 2020; }; }; function deleteItem(event) { // Retrieve the name of the task we want to delete const dataTask = event.target.getAttribute('data-task'); // Open a database transaction and delete the task, finding it by the name we retrieved above const transaction = db.transaction(['toDoList'], 'readwrite'); transaction.objectStore('toDoList').delete(dataTask); // Report that the data item has been deleted transaction.oncomplete = () => { // Delete the parent of the button, which is the list item, so it is no longer displayed event.target.parentNode.parentNode.removeChild(event.target.parentNode); note.appendChild(createListItem(`Task "${dataTask}" deleted.`)); }; }; // Check whether the deadline for each task is up or not, and responds appropriately function checkDeadlines() { // First of all check whether notifications are enabled or denied if (Notification.permission === 'denied' || Notification.permission === 'default') { notificationBtn.style.display = 'block'; } else { notificationBtn.style.display = 'none'; } // Grab the current time and date const now = new Date(); // From the now variable, store the current minutes, hours, day of the month, month, year and seconds const minuteCheck = now.getMinutes(); const hourCheck = now.getHours(); const dayCheck = now.getDate(); // Do not use getDay() that returns the day of the week, 1 to 7 const monthCheck = now.getMonth(); const yearCheck = now.getFullYear(); // Do not use getYear() that is deprecated. // Open a new transaction const objectStore = db.transaction(['toDoList'], 'readwrite').objectStore('toDoList'); // Open a cursor to iterate through all the data items in the IndexedDB objectStore.openCursor().onsuccess = (event) => { const cursor = event.target.result; if (!cursor) return; const { hours, minutes, day, month, year, notified, taskTitle } = cursor.value; // convert the month names we have installed in the IDB into a month number that JavaScript will understand. // The JavaScript date object creates month values as a number between 0 and 11. const monthNumber = MONTHS.indexOf(month); if (monthNumber === -1) throw new Error('Incorrect month entered in database.'); // Check if the current hours, minutes, day, month and year values match the stored values for each task. // The parseInt() function transforms the value from a string to a number for comparison // (taking care of leading zeros, and removing spaces and underscores from the string). let matched = parseInt(hours) === hourCheck; matched &&= parseInt(minutes) === minuteCheck; matched &&= parseInt(day) === dayCheck; matched &&= parseInt(monthNumber) === monthCheck; matched &&= parseInt(year) === yearCheck; if (matched && notified === 'no') { // If the numbers all do match, run the createNotification() function to create a system notification // but only if the permission is set if (Notification.permission === 'granted') { createNotification(taskTitle); } } // Move on to the next cursor item cursor.continue(); }; }; // Ask for permission when the 'Enable notifications' button is clicked function askNotificationPermission() { // Function to actually ask the permissions function handlePermission(permission) { // Whatever the user answers, we make sure Chrome stores the information if (!Reflect.has(Notification, 'permission')) { Notification.permission = permission; } // Set the button to shown or hidden, depending on what the user answers if (Notification.permission === 'denied' || Notification.permission === 'default') { notificationBtn.style.display = 'block'; } else { notificationBtn.style.display = 'none'; } }; // Check if the browser supports notifications if (!Reflect.has(window, 'Notification')) { console.log('This browser does not support notifications.'); } else { if (checkNotificationPromise()) { Notification.requestPermission().then(handlePermission); } else { Notification.requestPermission(handlePermission); } } }; // Check whether browser supports the promise version of requestPermission() // Safari only supports the old callback-based version function checkNotificationPromise() { try { Notification.requestPermission().then(); } catch(e) { return false; } return true; }; // Wire up notification permission functionality to 'Enable notifications' button notificationBtn.addEventListener('click', askNotificationPermission); function createListItem(contents) { const listItem = document.createElement('li'); listItem.textContent = contents; return listItem; }; // Create a notification with the given title function createNotification(title) { // Create and show the notification const img = '/to-do-notifications/img/icon-128.png'; const text = `HEY! Your task "${title}" is now overdue.`; const notification = new Notification('To do list', { body: text, icon: img }); // We need to update the value of notified to 'yes' in this particular data object, so the // notification won't be set off on it again // First open up a transaction const objectStore = db.transaction(['toDoList'], 'readwrite').objectStore('toDoList'); // Get the to-do list object that has this title as its title const objectStoreTitleRequest = objectStore.get(title); objectStoreTitleRequest.onsuccess = () => { // Grab the data object returned as the result const data = objectStoreTitleRequest.result; // Update the notified value in the object to 'yes' data.notified = 'yes'; // Create another request that inserts the item back into the database const updateTitleRequest = objectStore.put(data); // When this new request succeeds, run the displayData() function again to update the display updateTitleRequest.onsuccess = () => { displayData(); }; }; }; // Using a setInterval to run the checkDeadlines() function every second setInterval(checkDeadlines, 1000); } // Helper function returning the day of the month followed by an ordinal (st, nd, or rd) function ordinal(day) { const n = day.toString(); const last = n.slice(-1); if (last === '1' && n !== '11') return `${n}st`; if (last === '2' && n !== '12') return `${n}nd`; if (last === '3' && n !== '13') return `${n}rd`; return `${n}th`; };
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }
/* Basic set up + sizing for containers */ html, body { margin: 0; } html { width: 100%; height: 100%; font-size: 10px; font-family: Georgia, "Times New Roman", Times, serif; background: #111; } body { width: 50rem; position: relative; background: #d88; margin: 0 auto; border-left: 2px solid #d33; border-right: 2px solid #d33; } h1, h2 { text-align: center; background: #d88; font-family: Arial, Helvetica, sans-serif; } h1 { font-size: 6rem; margin: 0; background: #d66; } h2 { font-size: 2.4rem; } /* Bottom toolbar styling */ #toolbar { position: relative; height: 6rem; width: 100%; background: #d66; border-top: 2px solid #d33; border-bottom: 2px solid #d33; } #enable, input[type="submit"] { line-height: 1.8; font-size: 1.3rem; border-radius: 5px; border: 1px solid black; color: black; text-shadow: 1px 1px 1px black; border: 1px solid rgba(0, 0, 0, 0.1); box-shadow: inset 0px 5px 3px rgba(255, 255, 255, 0.2), inset 0px -5px 3px rgba(0, 0, 0, 0.2); } #enable { position: absolute; bottom: 0.3rem; right: 0.3rem; } #notifications { margin: 0; position: relative; padding: 0.3rem; background: #ddd; position: absolute; top: 0rem; left: 0rem; height: 5.4rem; width: 50%; overflow: auto; line-height: 1.2; } #notifications li { margin-left: 1.5rem; } /* New item form styling */ .form-box { background: #d66; width: 85%; padding: 1rem; margin: 2rem auto; box-shadow: 2px 2px 5px rgba(0, 0, 0, 0.7); } form div { margin-bottom: 1rem; } form .full-width { margin: 1rem auto 2rem; width: 100%; } form .half-width { width: 50%; float: left; } form .third-width { width: 33%; float: left; } form div label { width: 10rem; float: left; padding-right: 1rem; font-size: 1.6rem; line-height: 1.6; } form .full-width input { width: 30rem; } form .half-width input { width: 8.75rem; } form .third-width select { width: 13.5rem; } form div input[type="submit"] { clear: both; width: 20rem; display: block; height: 3rem; margin: 0 auto; position: relative; top: 0.5rem; } /* || tasks box */ .task-box { width: 85%; padding: 1rem; margin: 2rem auto; font-size: 1.8rem; } .task-box ul { margin: 0; padding: 0; } .task-box li { list-style-type: none; padding: 1rem; border-bottom: 2px solid #d33; } .task-box li:last-child { border-bottom: none; } .task-box li:last-child { margin-bottom: 0rem; } .task-box button { margin-left: 2rem; font-size: 1.6rem; border: 1px solid #eee; border-radius: 5px; box-shadow: inset 0 -2px 5px rgba(0, 0, 0, 0.5) 1px 1px 1px black; } /* setting cursor for interactive controls */ button, input[type="submit"], select { cursor: pointer; } /* media query for small screens */ @media (max-width: 32rem) { body { width: 100%; border-left: none; border-right: none; } form div { clear: both; } form .full-width { margin: 1rem auto; } form .half-width { width: 100%; float: none; } form .third-width { width: 100%; float: none; } form div label { width: 36%; padding-left: 1rem; } form input, form select, form label { line-height: 2.5rem; font-size: 2rem; } form .full-width input { width: 50%; } form .half-width input { width: 50%; } form .third-width select { width: 50%; } #enable { right: 1rem; } }
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }
<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8" /> <meta http-equiv="X-UA-Compatible" content="IE=edge" /> <meta name="viewport" content="width=device-width" /> <title>Web Speech API</title> <link rel="stylesheet" href="style.css" /> </head> <body> <h1>Pick your test</h1> <ul> <li><a href="phrase-matcher/index.html">Phrase matcher</a></li> <li> <a href="speak-easy-synthesis/index.html">Speak easy synthesis</a> </li> <li> <a href="speech-color-changer/index.html">Speech color changer</a> </li> </ul> <h2>More information</h2> <ul> <li> <a href="https://developer.mozilla.org/en-US/docs/Web/API/Web_Speech_API" >Web Speech API</a > </li> <li> <a href="https://github.com/mdn/dom-examples/tree/main/web-speech-api" >The code used on these pages.</a > </li> </ul> </body> </html>
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }
# web-speech-api Code for demos illustrating features of the Web Speech API. See [Web_Speech_API](https://developer.mozilla.org/en-US/docs/Web/API/Web_Speech_API) for more details. > For the latest browser support, please have a look at the browser compatibility table here: [SpeechRecognition](https://developer.mozilla.org/en-US/docs/Web/API/SpeechRecognition#browser_compatibility) and [Web Speech API](https://developer.mozilla.org/en-US/docs/Web/API/Web_Speech_API#browser_compatibility). ## Speech color changer demo [Run recognition demo live](https://mdn.github.io/dom-examples/web-speech-api/speech-color-changer/) Tap the screen then say a colour — the grammar string contains a large number of HTML keywords to choose from, although we've removed most of the multiple word colors to remove ambiguity. We did keep goldenrod, cos, well. This current works only on Chrome/Chrome Mobile. To get this code running successfully, you'll need to run the code through a web server (localhost will work.) ## Phrase matcher demo Speak the phrase and then see if the recognition engine successfully recognises it — this is another demo that relies on speech recognition, written for a research team at the University of Nebraska at Kearney. This current works only on Chrome/Chrome Mobile. To get this code running successfully, you'll need to run the code through a web server (localhost will work.) [Run phrase matcher demo live](https://mdn.github.io/dom-examples/web-speech-api/phrase-matcher/) ## Speak easy synthesis demo [Run synthesis demo live](https://mdn.github.io/dom-examples/web-speech-api/speak-easy-synthesis/) Type words in the input then submit the form to hear it spoken. You can also select the different voices available on the system, and alter the rate and pitch. This currently works in Chrome, Firefox and Safari.
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }
<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8" /> <meta http-equiv="X-UA-Compatible" content="IE=edge" /> <meta name="viewport" content="width=device-width" /> <title>Phrase matcher</title> <link rel="stylesheet" href="style.css" /> </head> <body> <h1>Phrase matcher</h1> <p>Press the button then say the phrase to test the recognition.</p> <button>Start new test</button> <div> <p class="phrase">Phrase...</p> <p class="result">Right or wrong?</p> <p class="output">...diagnostic messages</p> </div> <script src="script.js"></script> </body> </html>
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }
body, html { margin: 0; } html { height: 100%; background-color: teal; } body { height: inherit; overflow: hidden; } h1, p { font-family: sans-serif; text-align: center; } div p { padding: 20px; background-color: rgba(0,0,0,0.2); } div { overflow: auto; position: absolute; bottom: 0; right: 0; left: 0; } button { margin: 0 auto; display: block; font-size: 1.1rem; width: 170px; line-height: 2; margin-top: 30px; } @media all and (max-height: 410px) { div { position: static; } } .phrase { font-weight: bold; } .output { font-style: italic; }
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }
var SpeechRecognition = SpeechRecognition || webkitSpeechRecognition; var SpeechGrammarList = SpeechGrammarList || webkitSpeechGrammarList; var SpeechRecognitionEvent = SpeechRecognitionEvent || webkitSpeechRecognitionEvent; var phrases = [ 'I love to sing because it\'s fun', 'where are you going', 'can I call you tomorrow', 'why did you talk while I was talking', 'she enjoys reading books and playing games', 'where are you going', 'have a great day', 'she sells seashells on the seashore' ]; var phrasePara = document.querySelector('.phrase'); var resultPara = document.querySelector('.result'); var diagnosticPara = document.querySelector('.output'); var testBtn = document.querySelector('button'); function randomPhrase() { var number = Math.floor(Math.random() * phrases.length); return number; } function testSpeech() { testBtn.disabled = true; testBtn.textContent = 'Test in progress'; var phrase = phrases[randomPhrase()]; // To ensure case consistency while checking with the returned output text phrase = phrase.toLowerCase(); phrasePara.textContent = phrase; resultPara.textContent = 'Right or wrong?'; resultPara.style.background = 'rgba(0,0,0,0.2)'; diagnosticPara.textContent = '...diagnostic messages'; var grammar = '#JSGF V1.0; grammar phrase; public <phrase> = ' + phrase +';'; var recognition = new SpeechRecognition(); var speechRecognitionList = new SpeechGrammarList(); speechRecognitionList.addFromString(grammar, 1); recognition.grammars = speechRecognitionList; recognition.lang = 'en-US'; recognition.interimResults = false; recognition.maxAlternatives = 1; recognition.start(); recognition.onresult = function(event) { // The SpeechRecognitionEvent results property returns a SpeechRecognitionResultList object // The SpeechRecognitionResultList object contains SpeechRecognitionResult objects. // It has a getter so it can be accessed like an array // The first [0] returns the SpeechRecognitionResult at position 0. // Each SpeechRecognitionResult object contains SpeechRecognitionAlternative objects that contain individual results. // These also have getters so they can be accessed like arrays. // The second [0] returns the SpeechRecognitionAlternative at position 0. // We then return the transcript property of the SpeechRecognitionAlternative object var speechResult = event.results[0][0].transcript.toLowerCase(); diagnosticPara.textContent = 'Speech received: ' + speechResult + '.'; if(speechResult === phrase) { resultPara.textContent = 'I heard the correct phrase!'; resultPara.style.background = 'lime'; } else { resultPara.textContent = 'That didn\'t sound right.'; resultPara.style.background = 'red'; } console.log('Confidence: ' + event.results[0][0].confidence); } recognition.onspeechend = function() { recognition.stop(); testBtn.disabled = false; testBtn.textContent = 'Start new test'; } recognition.onerror = function(event) { testBtn.disabled = false; testBtn.textContent = 'Start new test'; diagnosticPara.textContent = 'Error occurred in recognition: ' + event.error; } recognition.onaudiostart = function(event) { //Fired when the user agent has started to capture audio. console.log('SpeechRecognition.onaudiostart'); } recognition.onaudioend = function(event) { //Fired when the user agent has finished capturing audio. console.log('SpeechRecognition.onaudioend'); } recognition.onend = function(event) { //Fired when the speech recognition service has disconnected. console.log('SpeechRecognition.onend'); } recognition.onnomatch = function(event) { //Fired when the speech recognition service returns a final result with no significant recognition. This may involve some degree of recognition, which doesn't meet or exceed the confidence threshold. console.log('SpeechRecognition.onnomatch'); } recognition.onsoundstart = function(event) { //Fired when any sound — recognisable speech or not — has been detected. console.log('SpeechRecognition.onsoundstart'); } recognition.onsoundend = function(event) { //Fired when any sound — recognisable speech or not — has stopped being detected. console.log('SpeechRecognition.onsoundend'); } recognition.onspeechstart = function (event) { //Fired when sound that is recognised by the speech recognition service as speech has been detected. console.log('SpeechRecognition.onspeechstart'); } recognition.onstart = function(event) { //Fired when the speech recognition service has begun listening to incoming audio with intent to recognize grammars associated with the current SpeechRecognition. console.log('SpeechRecognition.onstart'); } } testBtn.addEventListener('click', testSpeech);
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }
<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8" /> <meta http-equiv="X-UA-Compatible" content="IE=edge" /> <meta name="viewport" content="width=device-width" /> <title>Speech synthesiser</title> <link rel="stylesheet" href="style.css" /> </head> <body> <h1>Speech synthesiser</h1> <p> Enter some text in the input below and press return or the "play" button to hear it. change voices using the dropdown menu. </p> <form> <label for="txt">Enter text</label> <input id="txt" type="text" class="txt" /> <div> <label for="rate">Rate</label ><input type="range" min="0.5" max="2" value="1" step="0.1" id="rate" /> <div class="rate-value">1</div> <div class="clearfix"></div> </div> <div> <label for="pitch">Pitch</label ><input type="range" min="0" max="2" value="1" step="0.1" id="pitch" /> <div class="pitch-value">1</div> <div class="clearfix"></div> </div> <select></select> <div class="controls"> <button id="play" type="submit">Play</button> </div> </form> <script src="script.js"></script> </body> </html>
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }
body, html { margin: 0; } html { height: 100%; } body { height: 90%; max-width: 800px; margin: 0 auto; } h1, p { font-family: sans-serif; text-align: center; padding: 20px; } .txt, select, form > div { display: block; margin: 0 auto; font-family: sans-serif; font-size: 16px; padding: 5px; } .txt { width: 80%; } select { width: 83%; } form > div { width: 81%; } .txt, form > div { margin-bottom: 10px; overflow: auto; } .clearfix { clear: both; } label { float: left; width: 10%; line-height: 1.5; } .rate-value, .pitch-value { float: right; width: 5%; line-height: 1.5; } #rate, #pitch { float: right; width: 81%; } .controls { text-align: center; margin-top: 10px; } .controls button { padding: 10px; }
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }
const synth = window.speechSynthesis; const inputForm = document.querySelector("form"); const inputTxt = document.querySelector(".txt"); const voiceSelect = document.querySelector("select"); const pitch = document.querySelector("#pitch"); const pitchValue = document.querySelector(".pitch-value"); const rate = document.querySelector("#rate"); const rateValue = document.querySelector(".rate-value"); let voices = []; function populateVoiceList() { voices = synth.getVoices().sort(function (a, b) { const aname = a.name.toUpperCase(); const bname = b.name.toUpperCase(); if (aname < bname) { return -1; } else if (aname == bname) { return 0; } else { return +1; } }); const selectedIndex = voiceSelect.selectedIndex < 0 ? 0 : voiceSelect.selectedIndex; voiceSelect.innerHTML = ""; for (let i = 0; i < voices.length; i++) { const option = document.createElement("option"); option.textContent = `${voices[i].name} (${voices[i].lang})`; if (voices[i].default) { option.textContent += " -- DEFAULT"; } option.setAttribute("data-lang", voices[i].lang); option.setAttribute("data-name", voices[i].name); voiceSelect.appendChild(option); } voiceSelect.selectedIndex = selectedIndex; } populateVoiceList(); if (speechSynthesis.onvoiceschanged !== undefined) { speechSynthesis.onvoiceschanged = populateVoiceList; } function speak() { if (synth.speaking) { console.error("speechSynthesis.speaking"); return; } if (inputTxt.value !== "") { const utterThis = new SpeechSynthesisUtterance(inputTxt.value); utterThis.onend = function (event) { console.log("SpeechSynthesisUtterance.onend"); }; utterThis.onerror = function (event) { console.error("SpeechSynthesisUtterance.onerror"); }; const selectedOption = voiceSelect.selectedOptions[0].getAttribute("data-name"); for (let i = 0; i < voices.length; i++) { if (voices[i].name === selectedOption) { utterThis.voice = voices[i]; break; } } utterThis.pitch = pitch.value; utterThis.rate = rate.value; synth.speak(utterThis); } } inputForm.onsubmit = function (event) { event.preventDefault(); speak(); inputTxt.blur(); }; pitch.onchange = function () { pitchValue.textContent = pitch.value; }; rate.onchange = function () { rateValue.textContent = rate.value; }; voiceSelect.onchange = function () { speak(); };
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }
<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8" /> <meta http-equiv="X-UA-Compatible" content="IE=edge" /> <meta name="viewport" content="width=device-width" /> <title>Speech color changer</title> <link rel="stylesheet" href="style.css" /> </head> <body> <h1>Speech color changer</h1> <p class="hints"></p> <div> <p class="output"><em>...diagnostic messages</em></p> </div> <script src="script.js"></script> </body> </html>
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }
body, html { margin: 0; } html { height: 100%; } body { height: inherit; overflow: hidden; max-width: 800px; margin: 0 auto; } h1, p { font-family: sans-serif; text-align: center; padding: 20px; } div { height: 100px; overflow: auto; position: absolute; bottom: 0px; right: 0; left: 0; background-color: rgba(255,255,255,0.2); } ul { margin: 0; } .hints span { text-shadow: 0px 0px 6px rgba(255,255,255,0.7); }
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }
var SpeechRecognition = SpeechRecognition || webkitSpeechRecognition var SpeechGrammarList = SpeechGrammarList || window.webkitSpeechGrammarList var SpeechRecognitionEvent = SpeechRecognitionEvent || webkitSpeechRecognitionEvent var colors = [ 'aqua' , 'azure' , 'beige', 'bisque', 'black', 'blue', 'brown', 'chocolate', 'coral', 'crimson', 'cyan', 'fuchsia', 'ghostwhite', 'gold', 'goldenrod', 'gray', 'green', 'indigo', 'ivory', 'khaki', 'lavender', 'lime', 'linen', 'magenta', 'maroon', 'moccasin', 'navy', 'olive', 'orange', 'orchid', 'peru', 'pink', 'plum', 'purple', 'red', 'salmon', 'sienna', 'silver', 'snow', 'tan', 'teal', 'thistle', 'tomato', 'turquoise', 'violet', 'white', 'yellow']; var recognition = new SpeechRecognition(); if (SpeechGrammarList) { // SpeechGrammarList is not currently available in Safari, and does not have any effect in any other browser. // This code is provided as a demonstration of possible capability. You may choose not to use it. var speechRecognitionList = new SpeechGrammarList(); var grammar = '#JSGF V1.0; grammar colors; public <color> = ' + colors.join(' | ') + ' ;' speechRecognitionList.addFromString(grammar, 1); recognition.grammars = speechRecognitionList; } recognition.continuous = false; recognition.lang = 'en-US'; recognition.interimResults = false; recognition.maxAlternatives = 1; var diagnostic = document.querySelector('.output'); var bg = document.querySelector('html'); var hints = document.querySelector('.hints'); var colorHTML= ''; colors.forEach(function(v, i, a){ console.log(v, i); colorHTML += '<span style="background-color:' + v + ';"> ' + v + ' </span>'; }); hints.innerHTML = 'Tap/click then say a color to change the background color of the app. Try ' + colorHTML + '.'; document.body.onclick = function() { recognition.start(); console.log('Ready to receive a color command.'); } recognition.onresult = function(event) { // The SpeechRecognitionEvent results property returns a SpeechRecognitionResultList object // The SpeechRecognitionResultList object contains SpeechRecognitionResult objects. // It has a getter so it can be accessed like an array // The first [0] returns the SpeechRecognitionResult at the last position. // Each SpeechRecognitionResult object contains SpeechRecognitionAlternative objects that contain individual results. // These also have getters so they can be accessed like arrays. // The second [0] returns the SpeechRecognitionAlternative at position 0. // We then return the transcript property of the SpeechRecognitionAlternative object var color = event.results[0][0].transcript; diagnostic.textContent = 'Result received: ' + color + '.'; bg.style.backgroundColor = color; console.log('Confidence: ' + event.results[0][0].confidence); } recognition.onspeechend = function() { recognition.stop(); } recognition.onnomatch = function(event) { diagnostic.textContent = "I didn't recognise that color."; } recognition.onerror = function(event) { diagnostic.textContent = 'Error occurred in recognition: ' + event.error; }
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }
<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"> <title>screenX/screenTop and ScreenY/ScreenLeft example</title> <style> canvas { display: block; margin: 50px auto; } </style> </head> <body> <p></p> <canvas> <p>Your browser doesn't support canvas. Boo hoo!</p> </canvas> <script> const canvasElem = document.querySelector('canvas'); let width = canvasElem.width = 640; let height = canvasElem.height = 480 let ctx = canvasElem.getContext('2d'); function degToRad(degrees) { return degrees * Math.PI / 180; }; const pElem = document.querySelector('p'); let initialTop, initialLeft; if(!window.screenLeft) { window.screenLeft = window.screenX; window.screenTop = window.screenY; } initialLeft = window.screenLeft + canvasElem.offsetLeft; initialTop = window.screenTop + canvasElem.offsetTop; function positionElem() { let newLeft = window.screenLeft + canvasElem.offsetLeft; let newTop = window.screenTop + canvasElem.offsetTop; let leftUpdate = initialLeft - newLeft; let topUpdate = initialTop - newTop; ctx.fillStyle = 'rgb(0, 0, 0)'; ctx.fillRect(0, 0, width, height); ctx.fillStyle = 'rgb(0, 0, 255)'; ctx.beginPath(); ctx.arc(leftUpdate + (width/2), topUpdate + (height/2) + 35, 50, degToRad(0), degToRad(360), false); ctx.fill(); pElem.textContent = 'Window.screenLeft: ' + window.screenLeft + ', Window.screenTop: ' + window.screenTop; window.requestAnimationFrame(positionElem); } window.requestAnimationFrame(positionElem); </script> </body> </html>
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }
<!DOCTYPE html> <html lang="en"> <head> <title>insertAdjacentElement() demo</title> <style> div { width: 50px; height: 50px; margin: 3px; border: 3px solid black; display: inline-block; background-color: red; } </style> </head> <body> <p>Click colored box to select it, then use the first two buttons below to insert elements before and after your selection.</p> <section> <div></div><div></div><div></div><div></div> </section> <button class="before">Insert before</button> <button class="after">Insert after</button> <button class="reset">Reset demo</button> </body> <script> const beforeBtn = document.querySelector('.before'); const afterBtn = document.querySelector('.after'); const resetBtn = document.querySelector('.reset'); const container = document.querySelector('section'); let activeElem; resetBtn.addEventListener('click', () => { while (container.firstChild) { container.removeChild(container.firstChild); } for (let i = 0; i <=3; i++) { let tempDiv = document.createElement('div'); container.appendChild(tempDiv); setListener(tempDiv); } }); beforeBtn.addEventListener('click', function() { let tempDiv = document.createElement('div'); tempDiv.style.backgroundColor = randomColor(); if (activeElem) { activeElem.insertAdjacentElement('beforebegin',tempDiv); } setListener(tempDiv); }); afterBtn.addEventListener('click', function() { let tempDiv = document.createElement('div'); tempDiv.style.backgroundColor = randomColor(); if (activeElem) { activeElem.insertAdjacentElement('afterend',tempDiv); } setListener(tempDiv); }); function setListener(elem) { elem.addEventListener('click', function() { const allElems = document.querySelectorAll('section div'); for (let i = 0; i < allElems.length; i++) { allElems[i].style.border = '3px solid black'; } elem.style.border = '3px solid aqua'; activeElem = elem; }) }; function randomColor() { function random() { const result = Math.floor(Math.random() * 255); return result; } return 'rgb(' + random() + ',' + random() + ',' + random() + ')'; } function init() { const initElems = document.querySelectorAll('section div'); for (let i = 0; i < initElems.length; i++) { setListener(initElems[i]); } }; init(); </script> </html>
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN"> <html lang="en"> <head> <title>insertAdjacentText() demo</title> <style> section p { font-size: 1.1rem; color: blue; } input { margin-bottom: 0.5rem; } </style> </head> <body> <p>Enter some text to add, then use the first two buttons below to insert your text after the existing text.</p> <section> <p>This is my text</p> </section> <input type="text"><br> <button class="before">Insert before</button> <button class="after">Insert after</button> <button class="reset">Reset demo</button> </body> <script> var beforeBtn = document.querySelector('.before'); var afterBtn = document.querySelector('.after'); var resetBtn = document.querySelector('.reset'); var para = document.querySelector('section p'); var initContent = para.textContent; var textInput = document.querySelector('input'); resetBtn.addEventListener('click', function() { para.textContent = initContent; textInput.value = ''; }); beforeBtn.addEventListener('click', function() { para.insertAdjacentText('afterbegin',textInput.value); }); afterBtn.addEventListener('click', function() { para.insertAdjacentText('beforeend',textInput.value); }); </script> </html>
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }
<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"> <title>HTMLDialogElement example</title> <meta name="viewport" content="width=device-width, initial-scale=1.0"> </head> <body> <!-- Simple pop-up dialog box, containing a form --> <dialog id="favDialog"> <form method="dialog"> <p> <label for="favAnimal">Favorite animal:</label> <select id="favAnimal" name="favAnimal"> <option></option> <option>Brine shrimp</option> <option>Red panda</option> <option>Spider monkey</option> </select> </p> <div> <button id="cancel" type="reset">Cancel</button> <button type="submit">Confirm</button> </div> </form> </dialog> <div> <button id="updateDetails">Update details</button> </div> <script> (function() { const updateButton = document.getElementById('updateDetails'); const cancelButton = document.getElementById('cancel'); const dialog = document.getElementById('favDialog'); dialog.returnValue = 'favAnimal'; function openCheck(dialog) { if(dialog.open) { console.log('Dialog open'); } else { console.log('Dialog closed'); } } // Update button opens a modal dialog updateButton.addEventListener('click', function() { dialog.showModal(); openCheck(dialog); }); // Form cancel button closes the dialog box cancelButton.addEventListener('click', function() { dialog.close('animalNotChosen'); openCheck(dialog); }); })(); </script> </body> </html>
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }
<!DOCTYPE html> <html lang="en-US"> <head> <meta charset="utf-8" /> <meta http-equiv="X-UA-Compatible" content="IE=edge" /> <meta name="viewport" content="width=device-width" /> <title>Channel messaging demo</title> </head> <body> <h1>Channel messaging demo</h1> <p class="output">My body</p> <iframe src="page2.html" width="480" height="320"></iframe> <script> const channel = new MessageChannel(); const output = document.querySelector('.output'); const iframe = document.querySelector('iframe'); // Wait for the iframe to load iframe.addEventListener("load", onLoad); function onLoad() { // Listen for messages on port1 channel.port1.onmessage = onMessage; // Transfer port2 to the iframe iframe.contentWindow.postMessage("Hello from the main page!", "*", [ channel.port2, ]); } // Handle messages received on port1 function onMessage(e) { output.innerHTML = e.data; } </script> </body> </html>
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }
<!DOCTYPE html> <html lang="en-US"> <head> <meta charset="utf-8" /> <meta http-equiv="X-UA-Compatible" content="IE=edge" /> <meta name="viewport" content="width=device-width" /> <title>My page title</title> </head> <body> <p class="output">iFrame body</p> <script> const output = document.querySelector(".output"); window.addEventListener("message", onMessage); function onMessage(e) { output.innerHTML = e.data; // Use the transfered port to post a message back to the main frame e.ports[0].postMessage("Message back from the IFrame"); } </script> </body> </html>
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }
<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"> <meta http-equiv="X-UA-Compatible" content="IE=edge"> <meta name="viewport" content="width=device-width"> <title>Channel messaging demo</title> <link href="http://fonts.googleapis.com/css?family=Open+Sans+Condensed:300|Lobster+Two" rel="stylesheet" type="text/css" /> <link rel="stylesheet" href="style.css" /> </head> <body> <h1>Channel messaging demo</h1> <p id="message-output">Message not yet sent</p> <form> <label for="message-input">Send a message</label> <input type="text" id="message-input" autofocus /> <button>Send Message</button> </form> <iframe src="page2.html" width="480" height="320"></iframe> <script> const input = document.getElementById("message-input"); const output = document.getElementById("message-output"); const button = document.querySelector("button"); const iframe = document.querySelector("iframe"); const channel = new MessageChannel(); const port1 = channel.port1; // Wait for the iframe to load iframe.addEventListener("load", onLoad); function onLoad() { // Listen for button clicks button.addEventListener("click", onClick); // Listen for messages on port1 port1.onmessage = onMessage; // Transfer port2 to the iframe iframe.contentWindow.postMessage("init", "*", [channel.port2]); } // Post a message on port1 when the button is clicked function onClick(e) { e.preventDefault(); port1.postMessage(input.value); } // Handle messages received on port1 function onMessage(e) { output.innerHTML = e.data; input.value = ""; } </script> </body> </html>
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }
<!DOCTYPE html> <html lang="en"> <head> <meta name="viewport" content="width=device-width" /> <meta charset="utf-8"> <meta http-equiv="X-UA-Compatible" content="IE=edge"> <meta name="viewport" content="width=device-width"> <title>My page title</title> <link href="http://fonts.googleapis.com/css?family=Open+Sans+Condensed:300|Lobster+Two" rel="stylesheet" type="text/css" /> <link rel="stylesheet" href="style.css" /> </head> <body> <ul></ul> <script> const list = document.querySelector("ul"); let port2; // Listen for the intial port transfer message window.addEventListener("message", initPort); // Setup the transfered port function initPort(e) { port2 = e.ports[0]; port2.onmessage = onMessage; } // Handle messages received on port2 function onMessage(e) { const listItem = document.createElement("li"); listItem.textContent = e.data; list.appendChild(listItem); port2.postMessage('Message received by IFrame: "' + e.data + '"'); } </script> </body> </html>
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }
html,body { margin: 0; font-family: 'Open Sans Condensed', sans-serif; } body { width: 90%; max-width: 480px; margin: 0 auto; } form { width: 60%; margin: 10px auto; } form input { width: 55%; } form label { width: 40%; margin-right: 5%; } form button { width: 60%; display: block; margin: 10px auto 0; } p { margin: 10px 0; } h1, p { text-align: center; } h1 { font-family: 'Lobster Two', cursive; } ul { width: 90%; }
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }
# fetch-examples A repository of Fetch examples. See [https://developer.mozilla.org/en-US/docs/Web/API/Fetch_API](https://developer.mozilla.org/en-US/docs/Web/API/Fetch_API) for the corresponding documentation. See below for links to the live versions of each example: - [Basic fetch example](https://mdn.github.io/dom-examples/fetch/basic-fetch/) - [Fetch array buffer example](https://mdn.github.io/dom-examples/fetch/fetch-array-buffer/) - [Fetch JSON example](https://mdn.github.io/dom-examples/fetch/fetch-json/) - [Basic Fetch example with Request object](https://mdn.github.io/dom-examples/fetch/fetch-request/) - [Basic Fetch example with Request object and Init object](https://mdn.github.io/dom-examples/fetch/fetch-request-with-init/) - [Basic Fetch example with returned Response properties](https://mdn.github.io/dom-examples/fetch/fetch-response/) - [Fetch cloned Response example](https://mdn.github.io/dom-examples/fetch/fetch-response-clone/) - [Fetch text example](https://mdn.github.io/dom-examples/fetch/fetch-text/) - [Fetch example with Request object and Init object](https://mdn.github.io/dom-examples/fetch/fetch-with-init-then-request/) - [object-fit gallery with fetched images](https://mdn.github.io/dom-examples/fetch/object-fit-gallery-fetch/)
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }
<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8" /> <meta http-equiv="X-UA-Compatible" content="IE=edge" /> <meta name="viewport" content="width=device-width" /> <title>Object-fit gallery Fetch()</title> <link rel="stylesheet" href="style.css" /> </head> <body> <div class="top-row"> <img class="thumb" /> <img class="thumb" /> <img class="thumb" /> <img class="thumb" /> </div> <div class="second-row"> <img class="thumb" /> <img class="thumb" /> <img class="thumb" /> <img class="thumb" /> </div> <div class="third-row"> <img class="thumb" /> <img class="thumb" /> <img class="thumb" /> <img class="thumb" /> </div> <div class="bottom-row"> <img class="thumb" /> <img class="thumb" /> <img class="thumb" /> <img class="thumb" /> </div> <img class="main" /> </body> <script src="main.js"></script> </html>
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }
html { margin: 0; background: black; height: 100%; } body { margin: 0; width: 100%; height: inherit; } /* the three main rows going down the page */ body > div { height: 25%; } .thumb { float: left; width: 25%; height: 100%; object-fit: cover; } .main { display: none; } .blowup { display: block; position: absolute; object-fit: contain; object-position: center; top: 0; left: 0; width: 100%; height: 100%; z-index: 2000; } .darken { opacity: 0.4; }
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }
const thumbs = document.querySelectorAll(".thumb"); const mainImg = document.querySelector(".main"); thumbs.forEach((thumb, index) => { const requestObj = `images/pic${index + 1}.jpg`; fetch(requestObj) .then((response) => { if (!response.ok) { throw new Error(`HTTP error, status = ${response.status}`); } return response.blob(); }) .then((blob) => displayImage(thumb, blob)) .catch((error) => { thumb.title = `Image load failed: ${error.message}`; }); }); function displayImage(currentThumb, blob) { const objectURL = URL.createObjectURL(blob); currentThumb.setAttribute("src", objectURL); currentThumb.onclick = () => { mainImg.setAttribute("src", objectURL); mainImg.className = "blowup"; for (const thumb of thumbs) { thumb.className = "thumb darken"; } }; } mainImg.onclick = () => { mainImg.className = "main"; for (const thumb of thumbs) { thumb.className = "thumb"; } };
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }
<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8" /> <meta http-equiv="X-UA-Compatible" content="IE=edge" /> <meta name="viewport" content="width=device-width" /> <title>Fetch basic response example</title> <link rel="stylesheet" href="" /> </head> <body> <h1>Fetch basic response example</h1> <img src="" /> </body> <script> const myImage = document.querySelector("img"); const myRequest = new Request("flowers.jpg"); fetch(myRequest) .then((response) => { console.log('response.type =', response.type); console.log('response.url =', response.url); console.log('response.userFinalURL =', response.useFinalURL); console.log('response.status =', response.status); console.log('response.ok =', response.ok); console.log('response.statusText =', response.statusText); console.log('response.headers =', response.headers); if (!response.ok) { throw new Error(`HTTP error, status = ${response.status}`); } return response.blob(); }) .then((myBlob) => { const objectURL = URL.createObjectURL(myBlob); myImage.src = objectURL; }) .catch((error) => { const p = document.createElement("p"); p.appendChild(document.createTextNode(`Error: ${error.message}`)); document.body.insertBefore(p, myImage); }); const myBlob = new Blob(); const options = { status: 200, statusText: "SuperSmashingGreat!" }; const myResponse = new Response(myBlob, options); </script> </html>
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }
<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8" /> <meta http-equiv="X-UA-Compatible" content="IE=edge" /> <meta name="viewport" content="width=device-width" /> <title>Fetch Request example with init</title> <link rel="stylesheet" href="" /> </head> <body> <h1>Fetch Request with init example</h1> <img src="" /> </body> <script> const myImage = document.querySelector("img"); const myHeaders = new Headers(); const myOptions = { method: "GET", headers: myHeaders, mode: "cors", cache: "default", }; const myRequest = new Request("flowers.jpg", myOptions); fetch(myRequest) .then((response) => { if (!response.ok) { throw new Error(`HTTP error, status = ${response.status}`); } return response.blob(); }) .then((blob) => { const objectURL = URL.createObjectURL(blob); myImage.src = objectURL; }) .catch((error) => { const p = document.createElement("p"); p.appendChild(document.createTextNode(`Error: ${error.message}`)); document.body.insertBefore(p, myImage); }); </script> </html>
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }
<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8" /> <meta http-equiv="X-UA-Compatible" content="IE=edge" /> <meta name="viewport" content="width=device-width" /> <title>Fetch arrayBuffer example</title> <link rel="stylesheet" href="" /> </head> <body> <h1>Fetch arrayBuffer example</h1> <button class="play">Play</button> <button class="stop">Stop</button> <span class="error"></span> <pre></pre> </body> <script> const pre = document.querySelector("pre"); const myScript = document.querySelector("script"); const play = document.querySelector(".play"); const stop = document.querySelector(".stop"); const errorDisplay = document.querySelector(".error"); // use fetch to load an audio track, and // decodeAudioData to decode it and stick it in a buffer. // Then we put the buffer into the source function getData() { const audioCtx = new AudioContext(); return fetch("viper.ogg") .then((response) => { if (!response.ok) { throw new Error(`HTTP error, status = ${response.status}`); } return response.arrayBuffer(); }) .then((buffer) => audioCtx.decodeAudioData(buffer)) .then((decodedData) => { const source = new AudioBufferSourceNode(audioCtx); source.buffer = decodedData; source.connect(audioCtx.destination); return source; }); } // wire up buttons to stop and play audio play.onclick = () => { getData() .then((source) => { errorDisplay.innerHTML = ""; source.start(0); play.disabled = true; }) .catch((error) => { errorDisplay.appendChild(document.createTextNode(`Error: ${error.message}`)); }); }; stop.onclick = () => { source.stop(0); play.disabled = false; }; // dump script to pre element pre.innerHTML = myScript.innerHTML; </script> </html>
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }
<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8" /> <meta http-equiv="X-UA-Compatible" content="IE=edge" /> <meta name="viewport" content="width=device-width" /> <title>Fetch basic example</title> <link rel="stylesheet" href="" /> </head> <body> <h1>Fetch basic example</h1> <img src="" class="my-image" /> </body> <script> const myImage = document.querySelector(".my-image"); fetch("flowers.jpg") .then((response) => { if (!response.ok) { throw new Error(`HTTP error, status = ${response.status}`); } return response.blob(); }) .then((myBlob) => { const objectURL = URL.createObjectURL(myBlob); myImage.src = objectURL; }) .catch((error) => { const p = document.createElement("p"); p.appendChild(document.createTextNode(`Error: ${error.message}`)); document.body.insertBefore(p, myImage); }); </script> </html>
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }
<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8" /> <meta http-equiv="X-UA-Compatible" content="IE=edge" /> <meta name="viewport" content="width=device-width" /> <title>Fetch Request example</title> </head> <body> <h1>Fetch Request example</h1> <img src="" /> </body> <script> const myImage = document.querySelector("img"); const myRequest = new Request("flowers.jpg"); fetch(myRequest) .then((response) => { if (!response.ok) { throw new Error(`HTTP error, status = ${response.status}`); } return response.blob(); }) .then((myBlob) => { const objectURL = URL.createObjectURL(myBlob); myImage.src = objectURL; }) .catch((error) => { const p = document.createElement("p"); p.appendChild(document.createTextNode(`Error: ${error.message}`)); document.body.insertBefore(p, myImage); }); </script> </html>
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }
<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8" /> <meta http-equiv="X-UA-Compatible" content="IE=edge" /> <meta name="viewport" content="width=device-width" /> <title>Fetch with init then Request example</title> <link rel="stylesheet" href="" /> </head> <body> <h1>Fetch with init then Request example</h1> <img src="" /> </body> <script> const myImage = document.querySelector("img"); const myHeaders = new Headers(); myHeaders.append("Content-Type", "image/jpeg"); const myOptions = { method: "GET", headers: myHeaders, mode: "cors", cache: "default", }; const myRequest = new Request("flowers.jpg"); fetch(myRequest, myOptions) .then((response) => { if (!response.ok) { throw new Error(`HTTP error, status = ${response.status}`); } return response.blob(); }) .then((blob) => { const objectURL = URL.createObjectURL(blob); myImage.src = objectURL; }) .catch((error) => { var p = document.createElement("p"); p.appendChild(document.createTextNode(`Error: ${error.message}`)); document.body.insertBefore(p, myImage); }); </script> </html>
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }
<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8" /> <meta http-equiv="X-UA-Compatible" content="IE=edge" /> <meta name="viewport" content="width=device-width" /> <title>Fetch text example</title> <link rel="stylesheet" href="style.css" /> </head> <body> <h1>Fetch text example</h1> <ul> <li><a data-page="page1">Page 1</a></li> <li><a data-page="page2">Page 2</a></li> <li><a data-page="page3">Page 3</a></li> </ul> <article></article> </body> <script> const myArticle = document.querySelector("article"); const myLinks = document.querySelectorAll("ul a"); for (const link of myLinks) { link.onclick = (e) => { e.preventDefault(); const linkData = e.target.getAttribute("data-page"); getData(linkData); }; } function getData(pageId) { console.log(pageId); const myRequest = new Request(`${pageId}.txt`); fetch(myRequest) .then((response) => { if (!response.ok) { throw new Error(`HTTP error, status = ${response.status}`); } return response.text(); }) .then((text) => { myArticle.innerText = text; }) .catch((error) => { myArticle.innerText = `Error: ${error.message}`; }); } </script> </html>
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }
Page 3: The end unseen? A kilometer or so more, and the cottage was now in sight — our hero could see lights shining in the windows, and a figure shuffling around within! This was the moment he was waiting for surely! He tiptoed up to the door, and carefully tried the handle. Fnding it unlocked, he burst open the door and shouted "your time has come, scoundrel..!" He was slightly disappointed to come face to face with a slightly scared dog.
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }
html { font-family: sans-serif; } h1, h2 { text-align: center; } article { width: 400px; min-height: 480px; margin: 0 auto; padding: 10px; background-image: repeating-linear-gradient( to bottom, transparent 1px, transparent 20px, rgb(0, 0, 150) 21px ), linear-gradient(to bottom right, white, #ccc); border-radius: 20px; box-shadow: 5px 5px 10px rgba(0, 0, 0, 0.7); line-height: 1.5; } ul { list-style-type: none; padding-left: 0; width: 480px; margin: 0 auto; padding-bottom: 30px; } li { float: left; width: 33%; } ul li a { display: block; text-align: center; color: blue; text-decoration: underline; cursor: pointer; } ul li a:hover, ul li a:focus { text-decoration: none; }
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }
Page 2: Light at the end of the tunnel Brambles tugged at his clothes, and the rain lashed down, making our hero bruised and cold. when hope was all but lost, he noticed a dim light shining through the trees — this must be the woodcutter's cottage. This very sight spurred him to continue.
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }
Page 1: A desparate journey This is the first exciting part of our story! Once upon a time, a man was creeping into a dark forest, to try to find the woodcutter's cottage. Somewhere, an owl hooted. A feeling of lingering dread spread all over his body, but he pressed on regardless.
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }
<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8" /> <meta http-equiv="X-UA-Compatible" content="IE=edge" /> <meta name="viewport" content="width=device-width" /> <title>Fetch json example</title> <link rel="stylesheet" href="style.css" /> </head> <body> <h1>Fetch json example</h1> <ul></ul> </body> <script> const myList = document.querySelector("ul"); fetch("products.json") .then((response) => { if (!response.ok) { throw new Error(`HTTP error, status = ${response.status}`); } return response.json(); }) .then((data) => { for (const product of data.products) { const listItem = document.createElement("li"); const nameElement = document.createElement("strong"); nameElement.textContent = product.Name; const priceElement = document.createElement("strong"); priceElement.textContent = `£${product.Price}`; listItem.append( nameElement, ` can be found in ${product.Location}. Cost: `, priceElement, ); myList.appendChild(listItem); } }) .catch((error) => { const p = document.createElement("p"); p.appendChild(document.createTextNode(`Error: ${error.message}`)); document.body.insertBefore(p, myList); }); </script> </html>
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }
html { font-family: sans-serif; } ul { list-style-type: none; display: flex; flex-flow: column; align-items: flex-start; } li { margin-bottom: 10px; background-color: pink; font-size: 150%; border-top: 3px solid pink; border-bottom: 3px solid pink; box-shadow: 5px 5px 5px rgba(0, 0, 0, 0.7); } strong { background-color: purple; color: white; padding: 0 8px; border-top: 3px solid purple; border-bottom: 3px solid purple; text-shadow: 2px 2px 1px black; }
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }
<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8" /> <meta http-equiv="X-UA-Compatible" content="IE=edge" /> <meta name="viewport" content="width=device-width" /> <title>Fetch response clone example</title> <style> img { width: 300px; border: 2px solid black; box-shadow: 4px 4px 4px rgba(0, 0, 0, 0.9); margin: 0 10px; } </style> </head> <body> <h1>Fetch response clone example</h1> <img src="" class="img1" /> <img src="" class="img2" /> </body> <script> const image1 = document.querySelector(".img1"); const image2 = document.querySelector(".img2"); fetch("flowers.jpg").then((response1) => { if (!response1.ok) { showError(image1, `HTTP error, status = ${response1.status}`); } else { const response2 = response1.clone(); useResponse(response1, image1); useResponse(response2, image2); } }); function useResponse(response, image) { response .blob() .then((myBlob) => { const objectURL = URL.createObjectURL(myBlob); image.src = objectURL; }) .catch((error) => { const p = document.createElement("p"); p.appendChild(document.createTextNode(`Error: ${error}`)); document.body.insertBefore(p, image); }); } </script> </html>
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }
@font-face { font-family: 'zillaslab'; src: url('https://s3-us-west-2.amazonaws.com/s.cdpn.io/858/ZillaSlab.woff2') format('woff2'); } :root { --black: hsl(0, 0%, 16%); --white: hsl(0,0%,97%); --blue: hsl(198, 100%, 66%); --teal: hsl(198, 43%, 42%); --lightYellow: hsl(43, 100%, 92%); --grey: hsl(0, 0%, 80%); --unit: 1.2rem; } body { padding: var(--unit); background-color: var(--white); font-family: 'Arial', sans-serif; font-size: 100%; color: var(--black); line-height: 1.3; } /* page partials */ footer { padding: var(--unit); margin-top: calc(var(--unit)*2); border-top: 1px solid var(--grey); } footer p { margin: 0px; text-align: center; } /* base styles */ h1, h2 { font-family: "zillaslab", serif; } h2 { padding: calc(var(--unit)/2); background-color: var(--black); color: var(--white); font-weight: normal; } p {} a { border: 1px solid var(--teal); border-width: 0px 0px 1px 0px; color: var(--teal); text-decoration: none; } a:hover { border-width: 1px 0px 0px 0px; } nav ul { display: flex; justify-content: space-between; margin: 0px; padding: 0px; list-style: none; } nav li {margin: 0px; padding: 0px;} dl {display: flex; flex-wrap: wrap;} dt, dd {padding: 2%; box-sizing: border-box;} dt {width: 30%; font-weight: bold; text-align: right;} dd {width: 66%; margin: 0px;} code { background-color: var(--lightYellow); font-family:monospace; font-size:110%; letter-spacing:0.5px; } pre { padding: var(--unit); background-color: var(--grey); border-left: 4px solid var(--teal); white-space: pre-wrap; overflow-wrap: break-word; tab-size: 4; font-size: 86%; } pre code { background: none; }
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }
<!DOCTYPE html> <html lang="en" > <head> <meta charset="UTF-8"> <meta name="viewport" content="width=device-width, initial-scale=1"> <meta name="description" content="A media session API demo for MDN"> <title>Media Session Demo</title> <link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.1/normalize.min.css"> <!-- my styles --> <link rel="stylesheet" href="moz.css"> <style> #status { display: grid; grid-template-columns: 1fr 2fr; } #status h3 {grid-area: 1 / 1 / 2 / 3;} #status dl {grid-area: 2 / 2 / 3 / 3;} #status img { max-width: 300px; grid-area: 2 / 1 / 3 / 2; } </style> <script src="script.js" defer></script> </head> <body> <header> <h1>Media Session Demo</h1> </header> <article> <header> <h2>Here's a playlist.</h2> </header> <p>When you press play, if media session is supported, controls should be available through the UA.</p> <section id="playlist"> <audio controls src="tracks/secret.mp3"></audio> </section> <section id="status"> <h3>No track is playing</h3> <dl> <dt>Track:</dt> <dd>N/A</dd> <dt>Artist:</dt> <dd>N/A</dd> <dt>Album:</dt> <dd>N/A</dd> </dl> <img src="" alt="" /> </section> </article> </body> </html>
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }
Exposes metadata about currently available media within web pages and apps. Allows playing, pausing, seeking (via the scrubber), & skipping tracks. There are lots of different integrated OS level UIs for media control. Think audio controls on a smart watch, or picture-in-picture controls on a desktop. Even hardware buttons, like play and pause keys on a laptop... This API looks to provide consistant data for media, so <!-- This is taken directly from Francios --> Examples: A user pressing the next track button on keyboard, a user seeking back 30 secs whilst listening to a podcast or using a media hub to control media playing in browser tabs. There are two interfaces. Metadata to specify info about media and mediasession which is the controls. Let's take a moment clarify the terms for controlling media. Note the difference between seeking and skipping. Seeking is finding a specific point in a track or video, whereas skipping is moving track, either to the one before or after in the playlist. It's up to the UA to provide the interface, however we can use these interfaces to enhance that experience. When a website plays audio chrome gives notifications (how does firefox handle this?) There's only one metadata 'slot' so only one piece of media can be shown at anytime, or added to the interface. Let's make a data object with all the metadata we will need for our playlist. For the purposes of the code demo I'm using one large and one small image, however you can add in as many sizes as you want. The data also allows for blob & data urls. You must use the same context - so if something is playing in an iframe it's metadata must be set in the code used in teh iframe. When playback ends any media notifications will be removed from the interface. Remember there is one slot, so if the next media in a playlist starts the metadata will need to reflect that new data. - update metadata function? ## Media Session Actions A media session action is much like an event. They include interactions such as 'play' and 'pause' and are implemented by setting handlers on appropriate objects. Use a try catch when setting actions as some may not be supported. - take through each action Media action controls won't be shown unless the proper handler is set. ## Play and Pause Automatically handled by browser. Can be over-ridden. Need to update playback state if we do.
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }
console.clear(); // set up a faux status - to mimic what a UA would implement const statusEl = document.querySelector('#status'); function updateStatus(track, playState = 'Stopped') { // update header statusEl.querySelector('h3').innerText = playState; // update track info const info = statusEl.querySelectorAll('dd'); info[0].innerText = track.title; info[1].innerText = track.artist; info[2].innerText = track.album; //update image statusEl.querySelector('img').src = track.artwork[1].src; } // specify our data for our three tracks // NB we include src, which isn't part of the metadata interface, so we can load the requested file const allMeta = [ { src: 'tracks/secret.mp3', title: 'Secret Garden', artist: 'Eugenio Mininni', album: 'Mellow', artwork: [ { src: 'tracks/secret96.jpg', sizes: '96x96', type: 'image/jpeg' }, { src: 'tracks/secret512.jpg', sizes: '512x512', type: 'image/jpeg' }, ] }, { src: 'tracks/alignment.mp3', title: 'Alignment', artist: 'Arulo', album: 'Atmospheres', artwork: [ { src: 'tracks/align96.jpg', sizes: '96x96', type: 'image/jpeg' }, { src: 'tracks/align512.jpg', sizes: '512x512', type: 'image/jpeg' }, ] }, { src: 'tracks/okay.mp3', title: "We'll Be Okay", artist: 'Michael Ramir C', album: 'Dramatic', artwork: [ { src: 'tracks/okay96.jpg', sizes: '96x96', type: 'image/jpeg' }, { src: 'tracks/okay512.jpg', sizes: '512x512', type: 'image/jpeg' }, ] } ] // all tracks sourced from https://mixkit.co for use under creative commons license let isSupported = false; // check for compatibility & set the first track metatdata if ("mediaSession" in navigator) { isSupported = true; navigator.mediaSession.metadata = new MediaMetadata(allMeta[0]); } else { updateStatus(allMeta[index], 'Media Session is not supported ☹️') } // grab our audio element const audioEl = document.querySelector('audio'); // set first track index let index = 0; // function for updating metadata on track change (see actions below) function updateMetaData() { const track = allMeta[index]; navigator.mediaSession.metadata = new MediaMetadata(track) updatePositionState(); } // function to update position state, used when meta data is updated and seeking is actioned (see below) function updatePositionState() { navigator.mediaSession.setPositionState({ duration: audioEl.duration, playbackRate: audioEl.playbackRate, position: audioEl.currentTime }); } // create a generic play audio function which also updates which track we are playing function playAudio() { audioEl.src = allMeta[index].src; audioEl.play() .then(_ => updateMetaData()) .catch(error => console.log(error)); } // let's declare all actions handlers in an array and we'll loop through them with a try ... catch below // NB play/pause should be active by default, however other actions need to be set // set a skip time (seconds) for seeking back/forward const defaultSkipTime = 10; const actionHandlers = [ // play [ 'play', async function() { // play our audio await audioEl.play(); // set playback state navigator.mediaSession.playbackState = "playing"; // update our status element updateStatus(allMeta[index], 'Action: play | Track is playing...') } ], [ 'pause', () => { // pause out audio audioEl.pause(); // set playback state navigator.mediaSession.playbackState = "paused"; // update our status element updateStatus(allMeta[index], 'Action: pause | Track has been paused...'); } ], [ 'previoustrack', () => { // if it's the first track start from the beginning of the track (there is no previous) let statusMess = 'Previous track is now playing...'; if (index === 0) { statusMess = 'No previous track, first track is now playing...'; } else { index = index - 1; } // play playAudio(); // update our status element updateStatus(allMeta[index], 'Action: previoustrack | '+statusMess); } ], ['nexttrack', () => { // if it's the last track seek to the end of the track let statusMess = 'Next track is now playing...' if (index === (allMeta.length-1)) { statusMess = 'No next track, playlist finished...'; // empty metadata navigator.mediaSession.metadata = null; audioEl.pause(); index = 0; } else { index = index + 1; // play playAudio(); updateStatus(allMeta[index], 'Action: nexttrack | '+statusMess); } // update our status element } ], [ 'stop', () => { // there is no stop functionality on a media element, so let's pause the audio and reset the track to be the first audioEl.pause(); index = 0; // update our status element updateStatus(allMeta[index], 'Action: stop | Playlist has stopped.'); } ], [ 'seekbackward', (details) => { // details are: seekOffset, seekTime, fastSeek // get our skip time and set audio to that time const skipTime = details.seekOffset || defaultSkipTime; audioEl.currentTime = Math.max(audioEl.currentTime - skipTime, 0); // update our position updatePositionState(); // update our status element updateStatus(allMeta[index], 'Action: seekbackward | Track has moved backward...') } ], [ 'seekforward', (details) => { // details are: seekOffset, seekTime, fastSeek // get our skip time and set audio to that time const skipTime = details.seekOffset || defaultSkipTime; audioEl.currentTime = Math.min(audioEl.currentTime + skipTime, audioEl.currentTime); // update our position updatePositionState(); // update our status element updateStatus(allMeta[index], 'Action: seekforward | Track has moved forward...') } ], [ 'seekto', (details) => { // set our audio el to correct time audioEl.fastSeek(details.seekTime); audioEl.currentTime(details.seekTime); // update our position updatePositionState(); // update our status element updateStatus(allMeta[index], 'Action: seekto | Track has moved...') } ], // NB there is also a 'skipad' action ]; // TODO play next track when the previous has finished // TODO update playback state in certain actions for (const [action, handler] of actionHandlers) { try { navigator.mediaSession.setActionHandler(action, handler); } catch (error) { console.log(`The media session action "${action}" is not supported yet.`); } } // add a couple of event listeners to the audio el to move to next track and update page info on first click audioEl.addEventListener('ended', function() { // Play next track if (index < 2) { index++; playAudio(); } }); // on first play show correct info let first = true; audioEl.addEventListener('play', function() { if (first) { const message = isSupported ? "MediaSession active" : "MediaSession not supported"; updateStatus(allMeta[index], message) first = false; } })
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }
<!doctype html> <html lang="en"> <head> <title>Fullscreen API Example</title> <script src="main.js"></script> <link href="main.css" rel="stylesheet"> </head> <body> <div id="contents"> <p>A quick example to demonstrate usage of the <a href="https://developer.mozilla.org/en-US/docs/Web/API/Fullscreen_API">Fullscreen API</a>. Press <kbd>Enter</kbd> to toggle fullscreen mode.</p> <video src="assets/bigbuckbunny.mp4" id="video" controls width="480"></video> <div id="credits"><a href="https://peach.blender.org/download/">Video by Blender</a>; <a href="https://peach.blender.org/about/">licensed CC-BY 3.0</a></div> </div> </body> </html>
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }
#credits { padding: 0 0 10px 0; font: italic 12px "Open Sans", sans-serif; } #video { max-width: 100%; }
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }
window.addEventListener("load", startup, false); function startup() { // Get the reference to video const video = document.getElementById("video"); // On pressing ENTER call toggleFullScreen method document.addEventListener("keypress", function(e) { if (e.key === 'Enter') { toggleFullScreen(video); } }, false); } function toggleFullScreen(video) { if (!document.fullscreenElement) { // If the document is not in full screen mode // make the video full screen video.requestFullscreen(); } else { // Otherwise exit the full screen if (document.exitFullscreen) { document.exitFullscreen(); } } }
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }
# Fullscreen API example This example allows you to toggle the video in and out of fullscreen mode by pressing the <kbd>Enter</kbd> key. [See the example live](https://mdn.github.io/dom-examples/fullscreen-api/).
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }
const first = document.querySelector("#number1"); const second = document.querySelector("#number2"); const result1 = document.querySelector(".result1"); if (!!window.SharedWorker) { const myWorker = new SharedWorker("worker.js"); first.onchange = function () { myWorker.port.postMessage([first.value, second.value]); console.log("Message posted to worker"); }; second.onchange = function () { myWorker.port.postMessage([first.value, second.value]); console.log("Message posted to worker"); }; myWorker.port.onmessage = function (e) { result1.textContent = e.data; console.log("Message received from worker"); console.log(e.lastEventId); }; }
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }
const squareNumber = document.querySelector("#number3"); const result2 = document.querySelector(".result2"); if (!!window.SharedWorker) { const myWorker = new SharedWorker("worker.js"); squareNumber.onchange = function () { myWorker.port.postMessage([squareNumber.value, squareNumber.value]); console.log("Message posted to worker"); }; myWorker.port.onmessage = function (event) { result2.textContent = event.data; console.log("Message received from worker"); }; }
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }
<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8" /> <meta http-equiv="X-UA-Compatible" content="IE=edge" /> <meta name="viewport" content="width=device-width" /> <title>Shared Workers basic example</title> <link rel="stylesheet" href="style.css" /> </head> <body> <h1>Shared<br />Workers<br />basic<br />example</h1> <div class="controls" tabindex="0"> <form> <div> <label for="number1">Multiply number 1: </label> <input type="text" id="number1" value="0" /> </div> <div> <label for="number2">Multiply number 2: </label> <input type="text" id="number2" value="0" /> </div> </form> <p class="result1">Result: 0</p> <p><a href="index2.html" target="_blank">Go to second worker page</a></p> </div> <script src="multiply.js"></script> <script src="nosubmit.js"></script> </body> </html>
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }
html { background-color: #7d2663; font-family: sans-serif; } h1 { margin: 0; font-size: 15vw; letter-spacing: -0.2rem; position: absolute; top: 0; z-index: -1; } p { margin: 0 0 1rem 0; } .controls { padding: 4vw; width: 75%; margin: 3vw auto; background-color: rgba(255, 255, 255, 0.7); border: 5px solid black; opacity: 0.3; transition: 1s all; } .controls:hover, .controls:focus { opacity: 1; } .controls label, .controls p, .controls input { font-size: 3vw; } .controls div { padding-bottom: 1rem; }
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }
onconnect = function (event) { const port = event.ports[0]; port.onmessage = function (e) { const workerResult = `Result: ${e.data[0] * e.data[1]}`; port.postMessage(workerResult); }; };
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }
const form = document.querySelector("form"); form.onsubmit = function (event) { event.preventDefault(); };
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }
<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8" /> <meta http-equiv="X-UA-Compatible" content="IE=edge" /> <meta name="viewport" content="width=device-width" /> <title>Shared Workers basic example</title> <link rel="stylesheet" href="style.css" /> </head> <body> <h1>Shared<br />Workers<br />basic<br />example</h1> <div class="controls" tabindex="0"> <form> <div> <label for="number3">Square number: </label> <input type="text" id="number3" value="0" /> </div> </form> <p class="result2">Result: 0</p> </div> <script src="square.js"></script> <script src="nosubmit.js"></script> </body> </html>
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }
# simple-shared-worker A simple demo to show shared worker basics. [View the example live](https://mdn.github.io/dom-examples/web-workers/simple-shared-worker/).
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }
<!DOCTYPE html> <html lang="en"> <head> <meta name="viewport" content="width=device-width, initial-scale=1.0" /> <title>OffscreenCanvas API</title> <link rel="stylesheet" href="style.css" /> </head> <body> <header> <h1>OffscreenCanvas and worker threads</h1> <p> <b>Note:</b> your browser must support <a href="https://developer.mozilla.org/en-US/docs/Web/API/OffscreenCanvas" ><code>OffscreenCanvas</code></a >. </p> <p>This example has two canvases with incrementing counters.</p> <p> Canvas A is being drawn to on the main thread. Canvas B uses an <a href="https://developer.mozilla.org/en-US/docs/Web/API/OffscreenCanvas" ><code>OffscreenCanvas</code></a > so that we can draw to it in a worker thread. The purpose of this example is to show how a worker thread can keep the rest of our UI responsive. </p> <p> The button below each canvas creates <b>blocking work</b> on either the main thread (Canvas A) or a worker thread (Canvas B). When a thread is blocked, incrementing the related counter from that thread is also blocked. </p> </header> <main> <div class="canvases"> <div> <span class="canvas-title">Canvas A</span> <canvas id="main" width="200" height="200"></canvas> <div> <button id="main-button" onclick="slowMainThread()"> Block main thread </button> </div> </div> <div> <span class="canvas-title">Canvas B</span> <canvas id="worker" width="200" height="200"></canvas> <div> <button id="worker-button" onclick="slowdownWorker()"> Block worker thread </button> </div> </div> </div> <div id="canvas-description"> <p> <b>When the main thread is blocked</b>, all UI elements are frozen.<br /> The hover effect on buttons is also blocked: </p> <button>Example button</button> <p> <b>When the worker thread is blocked</b>, the main thread is free to do work such as the element hover effects and animations. Blocking the worker thread will still prevent Canvas B's counter from being updated, but the rest of the UI stays responsive while this is true. </p> </div> <footer> <p> This demo is based on an example by <a href="https://glitch.com/@PicchiKevin">Kevin Picchi</a>. </p> </footer> </main> <script> const canvasA = document.getElementById("main"); const canvasB = document.getElementById("worker"); const ctxA = canvasA.getContext("2d"); const canvasWidth = ctxA.width; const canvasHeight = ctxA.height; // Create a counter for Canvas A let counter = 0; setInterval(() => { redrawCanvasA(); counter++; }, 100); // Redraw Canvas A counter function redrawCanvasA() { ctxA.clearRect(0, 0, canvasA.width, canvasA.height); ctxA.font = "24px Verdana"; ctxA.textAlign = "center"; ctxA.fillText(counter, canvasA.width / 2, canvasA.height / 2); } // This function creates heavy (blocking) work on a thread function fibonacci(num) { if (num <= 1) { return 1; } return fibonacci(num - 1) + fibonacci(num - 2); } // Call our Fibonacci function on the main thread function slowMainThread() { fibonacci(42); } // Set up a worker thread to render Canvas B const worker = new Worker("worker.js"); // Use the OffscreenCanvas API and send to the worker thread const canvasWorker = canvasB.transferControlToOffscreen(); worker.postMessage({ canvas: canvasWorker }, [canvasWorker]); // A 'slowDown' message we can catch in the worker to start heavy work function slowdownWorker() { worker.postMessage("slowDown"); } </script> </body> </html>
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }
html, body { font-family: sans-serif; margin: 2rem; } main, footer { display: flex; align-content: left; justify-content: left; flex-direction: column; flex-wrap: wrap; text-align: left; margin-bottom: 4rem; } header { font-size: 1em; min-height: 1em; line-height: 1.5em; margin: 1rem; max-width: 50%; } button, canvas { border-radius: 10px; } canvas { border: 2px solid black; margin: 2rem; } button { background: #797676; padding: 10px; margin: 10px; border: 0; color: white; font-size: 1em; max-width: 150px; } /* Hover effect for buttons */ button:hover, canvas:hover { background: rgb(242, 134, 102); transition: transform 0.5s; transform: scale(1.3, 1.3); color: black; } #canvas-description { max-width: 50%; text-align: left; } .ui-example { margin: 2rem; } .ui-example button:hover { transition: transform 0.5s; transform: scale(1.3, 1.3); } .canvas-title { display: block; font-size: 1.4em; font-weight: bold; margin-bottom: 1rem; } .canvases { text-align: center; display: flex; flex-wrap: wrap; justify-content: left; align-content: left; padding: 2rem; }
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }
let canvasB = null; let ctxWorker = null; // Waiting to receive the OffScreenCanvas self.onmessage = (event) => { if (event.data === "slowDown") { fibonacci(42); } else { canvasB = event.data.canvas; ctxWorker = canvasB.getContext("2d"); startCounting(); } }; // Fibonacci function to add some delay to the thread function fibonacci(num) { if (num <= 1) { return 1; } return fibonacci(num - 1) + fibonacci(num - 2); } // Start the counter for Canvas B let counter = 0; function startCounting() { setInterval(() => { redrawCanvasB(); counter++; }, 100); } // Redraw Canvas B text function redrawCanvasB() { ctxWorker.clearRect(0, 0, canvasB.width, canvasB.height); ctxWorker.font = "24px Verdana"; ctxWorker.textAlign = "center"; ctxWorker.fillText(counter, canvasB.width / 2, canvasB.height / 2); }
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }
# offscreen-canvas-worker This example shows how to use an `OffscreenCanvasContext` with a web worked. [View this demo live](https://mdn.github.io/dom-examples/web-workers/offscreen-canvas-worker/). To run this code locally you'll need to serve it. If you have [node](https://nodejs.org/) installed, navigate to the folder containing the code and run `lite-server`: ```bash cd web-workers/offscreen-canvas-worker npx lite-server ```
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }
self.onmessage = function (event) { const userNum = Number(event.data); fibonacci(userNum); }; function fibonacci(num) { let a = 1; let b = 0; let temp; while (num >= 0) { temp = a; a = a + b; b = temp; num--; } self.postMessage(b); }
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }
<!DOCTYPE html> <html lang="en"> <head> <meta charset="UTF-8" /> <title>Fibonacci number generator</title> <style> body { width: 500px; } div, p { margin-bottom: 20px; } </style> </head> <body> <form> <div> <label for="number" >Enter a number that is an index position in the fibonacci sequence to see what number is in that position (e.g. enter 5 and you'll get a result of 8 — fibonacci index position 5 is 8).</label > <input type="number" id="number" /> </div> <div> <input type="submit" /> </div> </form> <p id="result"></p> <script> const form = document.querySelector("form"); const input = document.querySelector('input[type="number"]'); const result = document.querySelector("p#result"); const worker = new Worker("fibonacci.js"); worker.onmessage = function (event) { result.textContent = event.data; console.log("Got: " + event.data + "\n"); }; worker.onerror = function (error) { console.log(`Worker error: ${error.message} \n`); throw error; }; form.onsubmit = function (event) { event.preventDefault(); worker.postMessage(input.value); input.value = ""; }; </script> </body> </html>
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }
# fibonacci-worker A web worker that calculates fibonacci numbers. [See it live](https://mdn.github.io/dom-examples/web-workers/fibonacci-worker/).
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }
<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8" /> <meta http-equiv="X-UA-Compatible" content="IE=edge" /> <meta name="viewport" content="width=device-width" /> <title>Web Workers basic example</title> <link rel="stylesheet" href="style.css" /> </head> <body> <h1>Web<br />Workers<br />basic<br />example</h1> <div class="controls" tabindex="0"> <form> <div> <label for="number1">Multiply number 1: </label> <input type="text" id="number1" value="0" /> </div> <div> <label for="number2">Multiply number 2: </label> <input type="text" id="number2" value="0" /> </div> </form> <p class="result">Result: 0</p> </div> <script src="main.js"></script> </body> </html>
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }
html { background-color: #7D2663; font-family: sans-serif; } h1 { margin: 0; font-size: 20vmin; letter-spacing: -0.2rem; position: absolute; top: 0; z-index: -1; } p { margin: 0; } .controls { padding: 4vw; width: 75%; margin: 10vw auto; background-color: rgba(255,255,255,0.7); border: 5px solid black; opacity: 0.3; transition: 1s all; } .controls:hover, .controls:focus { opacity: 1; } .controls label, .controls p, .controls input { font-size: 3vw; } .controls div { padding-bottom: 1rem; }
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }
const first = document.querySelector('#number1'); const second = document.querySelector('#number2'); const result = document.querySelector('.result'); if (window.Worker) { const myWorker = new Worker("worker.js"); [first, second].forEach(input => { input.onchange = function() { myWorker.postMessage([first.value, second.value]); console.log('Message posted to worker'); } }) myWorker.onmessage = function(e) { result.textContent = e.data; console.log('Message received from worker'); } } else { console.log('Your browser doesn\'t support web workers.'); }
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }
onmessage = function(e) { console.log('Worker: Message received from main script'); const result = e.data[0] * e.data[1]; if (isNaN(result)) { postMessage('Please write two numbers'); } else { const workerResult = 'Result: ' + result; console.log('Worker: Posting message back to main script'); postMessage(workerResult); } }
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }
# simple-web-worker A simple web worker test. [View this demo live](https://mdn.github.io/dom-examples/web-workers/simple-web-worker/). To run this code locally you'll need to serve it. For example if you have [node](https://nodejs.org/) installed, navigate to the folder containing the code and run: `npx lite-server`
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }
<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"> <title>ScrollToOptions example</title> <style> html { font-family: sans-serif; } body { width: 60%; margin: 0; } p { width: 3000px; font-size: 2rem; letter-spacing: 1px; line-height: 1.5; } form { background: white; width: 300px; position: fixed; top: 20px; left: 20px; } div { display: flex; justify-content: space-between; margin-bottom: 10px; } div:first-of-type { margin-top: 10px; } fieldset, legend { border-radius: 5px; } legend { background: black; color: white; padding: 5px; } </style> <script defer src="main.js"></script> </head> <body> <section> <form> <fieldset> <legend>Choose position to scroll to!</legend> <div><label for="left">Left (number)</label> <input id="left" name="left" type="number"></div> <div><label for="top">Top (number)</label> <input id="top" name="top" type="number"></div> <div><label for="scroll">Smooth scroll?</label> <input id="scroll" name="scroll" type="checkbox"></div> <div> <button>Scroll</button> </div> </fieldset> </form> </section> <p>Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nullam sodales, mauris in interdum dictum, libero libero tristique lacus, eget rutrum sem est tincidunt dui. In hac habitasse platea dictumst. Ut est tortor, consequat sit amet leo ut, maximus sagittis ante. Sed at lectus mauris. In enim diam, sodales non gravida vitae, ullamcorper gravida leo. Aenean pulvinar dictum ipsum non tristique. Curabitur quis tortor suscipit, dictum libero a, sagittis odio. Interdum et malesuada fames ac ante ipsum primis in faucibus.</p> <p>Donec quis felis libero. Fusce sit amet pellentesque quam. Integer quis volutpat dui. Sed dictum viverra dictum. Nullam placerat et felis ut varius. Curabitur mollis vehicula commodo. Suspendisse viverra sit amet nulla sit amet malesuada.</p> <p>Suspendisse potenti. Nunc commodo vel elit vel malesuada. Nulla non ante nec arcu malesuada congue. In ut condimentum nulla. Mauris suscipit est sit amet metus rhoncus, eget porttitor mauris convallis. Nulla sit amet cursus mi, et tincidunt dolor. Interdum et malesuada fames ac ante ipsum primis in faucibus. Fusce molestie felis at justo sollicitudin, non condimentum orci pellentesque. Quisque in ante at mauris porttitor ornare nec et velit. Aliquam mi orci, volutpat sit amet fringilla sed, mollis nec lorem. Nunc sit amet convallis turpis, eu auctor dui. Etiam auctor mi in lorem faucibus, vel feugiat libero tempus. Nulla facilisi. Proin facilisis libero nisl, eu mollis est ullamcorper in. Nam ullamcorper mollis odio, eu vehicula magna ornare vitae. Mauris nec pharetra augue.</p> <p>Aliquam interdum, augue ullamcorper posuere consequat, dolor libero vehicula tellus, et dapibus orci ipsum vel leo. Maecenas molestie fringilla magna mollis luctus. Donec ultricies eleifend feugiat. Curabitur quis venenatis metus, tincidunt vestibulum augue. Integer non mollis mauris. Aenean bibendum consectetur varius. Duis eget odio quis magna tristique tristique ac vitae neque. Nulla facilisi. Praesent eu facilisis ipsum, eu convallis nisl. Pellentesque congue tempor massa at aliquam.</p> <p>Ut pulvinar ex at ante dictum, vitae iaculis dui pretium. Suspendisse rhoncus condimentum tortor ac mollis. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Aliquam vulputate tristique purus quis bibendum. Donec feugiat mattis sodales. Suspendisse eu auctor ligula. Donec quis justo odio. Praesent sed suscipit est. Nulla posuere nisl ac placerat vestibulum. Integer vulputate, sapien non luctus ultrices, nibh enim ullamcorper lorem, ut semper ligula tellus quis turpis. Etiam interdum lectus ac auctor rhoncus. Sed nulla nulla, ullamcorper pulvinar nisl vitae, varius dapibus mi. Sed augue nulla, efficitur in consectetur et, hendrerit ut purus. Aenean laoreet eros a leo gravida tempor. Mauris at mauris mauris.</p> <p>Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nullam sodales, mauris in interdum dictum, libero libero tristique lacus, eget rutrum sem est tincidunt dui. In hac habitasse platea dictumst. Ut est tortor, consequat sit amet leo ut, maximus sagittis ante. Sed at lectus mauris. In enim diam, sodales non gravida vitae, ullamcorper gravida leo. Aenean pulvinar dictum ipsum non tristique. Curabitur quis tortor suscipit, dictum libero a, sagittis odio. Interdum et malesuada fames ac ante ipsum primis in faucibus.</p> <p>Donec quis felis libero. Fusce sit amet pellentesque quam. Integer quis volutpat dui. Sed dictum viverra dictum. Nullam placerat et felis ut varius. Curabitur mollis vehicula commodo. Suspendisse viverra sit amet nulla sit amet malesuada.</p> <p>Suspendisse potenti. Nunc commodo vel elit vel malesuada. Nulla non ante nec arcu malesuada congue. In ut condimentum nulla. Mauris suscipit est sit amet metus rhoncus, eget porttitor mauris convallis. Nulla sit amet cursus mi, et tincidunt dolor. Interdum et malesuada fames ac ante ipsum primis in faucibus. Fusce molestie felis at justo sollicitudin, non condimentum orci pellentesque. Quisque in ante at mauris porttitor ornare nec et velit. Aliquam mi orci, volutpat sit amet fringilla sed, mollis nec lorem. Nunc sit amet convallis turpis, eu auctor dui. Etiam auctor mi in lorem faucibus, vel feugiat libero tempus. Nulla facilisi. Proin facilisis libero nisl, eu mollis est ullamcorper in. Nam ullamcorper mollis odio, eu vehicula magna ornare vitae. Mauris nec pharetra augue.</p> <p>Aliquam interdum, augue ullamcorper posuere consequat, dolor libero vehicula tellus, et dapibus orci ipsum vel leo. Maecenas molestie fringilla magna mollis luctus. Donec ultricies eleifend feugiat. Curabitur quis venenatis metus, tincidunt vestibulum augue. Integer non mollis mauris. Aenean bibendum consectetur varius. Duis eget odio quis magna tristique tristique ac vitae neque. Nulla facilisi. Praesent eu facilisis ipsum, eu convallis nisl. Pellentesque congue tempor massa at aliquam.</p> <p>Ut pulvinar ex at ante dictum, vitae iaculis dui pretium. Suspendisse rhoncus condimentum tortor ac mollis. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Aliquam vulputate tristique purus quis bibendum. Donec feugiat mattis sodales. Suspendisse eu auctor ligula. Donec quis justo odio. Praesent sed suscipit est. Nulla posuere nisl ac placerat vestibulum. Integer vulputate, sapien non luctus ultrices, nibh enim ullamcorper lorem, ut semper ligula tellus quis turpis. Etiam interdum lectus ac auctor rhoncus. Sed nulla nulla, ullamcorper pulvinar nisl vitae, varius dapibus mi. Sed augue nulla, efficitur in consectetur et, hendrerit ut purus. Aenean laoreet eros a leo gravida tempor. Mauris at mauris mauris.</p> <p>Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nullam sodales, mauris in interdum dictum, libero libero tristique lacus, eget rutrum sem est tincidunt dui. In hac habitasse platea dictumst. Ut est tortor, consequat sit amet leo ut, maximus sagittis ante. Sed at lectus mauris. In enim diam, sodales non gravida vitae, ullamcorper gravida leo. Aenean pulvinar dictum ipsum non tristique. Curabitur quis tortor suscipit, dictum libero a, sagittis odio. Interdum et malesuada fames ac ante ipsum primis in faucibus.</p> <p>Donec quis felis libero. Fusce sit amet pellentesque quam. Integer quis volutpat dui. Sed dictum viverra dictum. Nullam placerat et felis ut varius. Curabitur mollis vehicula commodo. Suspendisse viverra sit amet nulla sit amet malesuada.</p> <p>Suspendisse potenti. Nunc commodo vel elit vel malesuada. Nulla non ante nec arcu malesuada congue. In ut condimentum nulla. Mauris suscipit est sit amet metus rhoncus, eget porttitor mauris convallis. Nulla sit amet cursus mi, et tincidunt dolor. Interdum et malesuada fames ac ante ipsum primis in faucibus. Fusce molestie felis at justo sollicitudin, non condimentum orci pellentesque. Quisque in ante at mauris porttitor ornare nec et velit. Aliquam mi orci, volutpat sit amet fringilla sed, mollis nec lorem. Nunc sit amet convallis turpis, eu auctor dui. Etiam auctor mi in lorem faucibus, vel feugiat libero tempus. Nulla facilisi. Proin facilisis libero nisl, eu mollis est ullamcorper in. Nam ullamcorper mollis odio, eu vehicula magna ornare vitae. Mauris nec pharetra augue.</p> <p>Aliquam interdum, augue ullamcorper posuere consequat, dolor libero vehicula tellus, et dapibus orci ipsum vel leo. Maecenas molestie fringilla magna mollis luctus. Donec ultricies eleifend feugiat. Curabitur quis venenatis metus, tincidunt vestibulum augue. Integer non mollis mauris. Aenean bibendum consectetur varius. Duis eget odio quis magna tristique tristique ac vitae neque. Nulla facilisi. Praesent eu facilisis ipsum, eu convallis nisl. Pellentesque congue tempor massa at aliquam.</p> <p>Ut pulvinar ex at ante dictum, vitae iaculis dui pretium. Suspendisse rhoncus condimentum tortor ac mollis. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Aliquam vulputate tristique purus quis bibendum. Donec feugiat mattis sodales. Suspendisse eu auctor ligula. Donec quis justo odio. Praesent sed suscipit est. Nulla posuere nisl ac placerat vestibulum. Integer vulputate, sapien non luctus ultrices, nibh enim ullamcorper lorem, ut semper ligula tellus quis turpis. Etiam interdum lectus ac auctor rhoncus. Sed nulla nulla, ullamcorper pulvinar nisl vitae, varius dapibus mi. Sed augue nulla, efficitur in consectetur et, hendrerit ut purus. Aenean laoreet eros a leo gravida tempor. Mauris at mauris mauris.</p> </body> </html>
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }
let scrollOptions; const form = document.querySelector('form'); const leftInput = document.getElementById('left'); const topInput = document.getElementById('top'); const scrollInput = document.getElementById('scroll'); form.addEventListener('submit', (e) => { e.preventDefault(); scrollOptions = { left: leftInput.value, top: topInput.value, behavior: scrollInput.checked ? 'smooth' : 'auto' } window.scrollTo(scrollOptions); });
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }
<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"> <title>WebGPU triangle</title> <script src="script.js" defer></script> </head> <body> <h1>WebGPU triangle</h1> <canvas id="gpuCanvas" width="800" height="600"></canvas> </body> </html>
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }
// Clear color for GPURenderPassDescriptor const clearColor = { r: 0.0, g: 0.5, b: 1.0, a: 1.0 }; // Vertex data for triangle // Each vertex has 8 values representing position and color: X Y Z W R G B A const vertices = new Float32Array([ 0.0, 0.6, 0, 1, 1, 0, 0, 1, -0.5, -0.6, 0, 1, 0, 1, 0, 1, 0.5, -0.6, 0, 1, 0, 0, 1, 1 ]); // Vertex and fragment shaders const shaders = ` struct VertexOut { @builtin(position) position : vec4f, @location(0) color : vec4f } @vertex fn vertex_main(@location(0) position: vec4f, @location(1) color: vec4f) -> VertexOut { var output : VertexOut; output.position = position; output.color = color; return output; } @fragment fn fragment_main(fragData: VertexOut) -> @location(0) vec4f { return fragData.color; } `; // Main function async function init() { // 1: request adapter and device if (!navigator.gpu) { throw Error('WebGPU not supported.'); } const adapter = await navigator.gpu.requestAdapter(); if (!adapter) { throw Error('Couldn\'t request WebGPU adapter.'); } let device = await adapter.requestDevice(); // 2: Create a shader module from the shaders template literal const shaderModule = device.createShaderModule({ code: shaders }); // 3: Get reference to the canvas to render on const canvas = document.querySelector('#gpuCanvas'); const context = canvas.getContext('webgpu'); context.configure({ device: device, format: navigator.gpu.getPreferredCanvasFormat(), alphaMode: 'premultiplied' }); // 4: Create vertex buffer to contain vertex data const vertexBuffer = device.createBuffer({ size: vertices.byteLength, // make it big enough to store vertices in usage: GPUBufferUsage.VERTEX | GPUBufferUsage.COPY_DST, }); // Copy the vertex data over to the GPUBuffer using the writeBuffer() utility function device.queue.writeBuffer(vertexBuffer, 0, vertices, 0, vertices.length); // 5: Create a GPUVertexBufferLayout and GPURenderPipelineDescriptor to provide a definition of our render pipline const vertexBuffers = [{ attributes: [{ shaderLocation: 0, // position offset: 0, format: 'float32x4' }, { shaderLocation: 1, // color offset: 16, format: 'float32x4' }], arrayStride: 32, stepMode: 'vertex' }]; const pipelineDescriptor = { vertex: { module: shaderModule, entryPoint: 'vertex_main', buffers: vertexBuffers }, fragment: { module: shaderModule, entryPoint: 'fragment_main', targets: [{ format: navigator.gpu.getPreferredCanvasFormat() }] }, primitive: { topology: 'triangle-list' }, layout: 'auto' }; // 6: Create the actual render pipeline const renderPipeline = device.createRenderPipeline(pipelineDescriptor); // 7: Create GPUCommandEncoder to issue commands to the GPU // Note: render pass descriptor, command encoder, etc. are destroyed after use, fresh one needed for each frame. const commandEncoder = device.createCommandEncoder(); // 8: Create GPURenderPassDescriptor to tell WebGPU which texture to draw into, then initiate render pass const renderPassDescriptor = { colorAttachments: [{ clearValue: clearColor, loadOp: 'clear', storeOp: 'store', view: context.getCurrentTexture().createView() }] }; const passEncoder = commandEncoder.beginRenderPass(renderPassDescriptor); // 9: Draw the triangle passEncoder.setPipeline(renderPipeline); passEncoder.setVertexBuffer(0, vertexBuffer); passEncoder.draw(3); // End the render pass passEncoder.end(); // 10: End frame by passing array of command buffers to command queue for execution device.queue.submit([commandEncoder.finish()]); } init();
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }
// helper function const RADIUS = 20; // setup of the canvas const canvas = document.querySelector("canvas"); const ctx = canvas.getContext("2d"); let x = 50; let y = 50; function canvasDraw() { // Find center x and y for any partial balls function find2ndCenter(pos, max) { if (pos < RADIUS) { pos += max; } else if (pos + RADIUS > max) { pos -= max; } else { pos = 0; } return pos; } function drawBall(x, y) { ctx.beginPath(); ctx.arc(x, y, RADIUS, 0, 2 * Math.PI, true); ctx.fill(); } const x2 = find2ndCenter(x, canvas.width); const y2 = find2ndCenter(y, canvas.height); ctx.fillStyle = "black"; ctx.fillRect(0, 0, canvas.width, canvas.height); ctx.fillStyle = "#f00"; /* Draw the main ball (which may or may not be a full ball) and any * partial balls that result from moving close to one of the edges */ drawBall(x, y); // main ball if (x2) { drawBall(x2, y); // partial ball } if (y2) { drawBall(x, y2); // partial ball } if (x2 && y2) { drawBall(x2, y2); // partial ball } } // end of function canvasDraw canvasDraw(); canvas.addEventListener("click", async () => { if(!document.pointerLockElement) { await canvas.requestPointerLock({ unadjustedMovement: true, }); } }); // pointer lock event listeners document.addEventListener("pointerlockchange", lockChangeAlert, false); function lockChangeAlert() { if (document.pointerLockElement === canvas) { console.log("The pointer lock status is now locked"); document.addEventListener("mousemove", updatePosition, false); } else { console.log("The pointer lock status is now unlocked"); document.removeEventListener("mousemove", updatePosition, false); } } const tracker = document.getElementById("tracker"); let animation; function updatePosition(e) { function updateCoord(pos, delta, max) { pos += delta; pos %= max; if (pos < 0) { pos += max; } return pos; } x = updateCoord(x, e.movementX, canvas.width); y = updateCoord(y, e.movementY, canvas.height); tracker.textContent = `X position: ${x}, Y position: ${y}`; if (!animation) { animation = requestAnimationFrame(function () { animation = null; canvasDraw(); }); } }
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }
<!DOCTYPE HTML> <html lang="en-US"> <head> <meta charset="UTF-8"> <title>Pointer lock demo</title> <link type="text/css" rel="stylesheet" href="style.css"> </head> <body> <div class="information"> <h1>Pointer lock demo</h1> <p>This demo demonstrates usage of the pointer lock API. Click on the canvas area and your mouse will directly control the ball inside the canvas, not your mouse pointer. You can press escape to return to the standard expected state.</p> </div> <canvas width="640" height="360"> Your browser does not support HTML5 canvas </canvas> <div id="tracker"></div> <script src="app.js"></script> </body> </html>
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }
html, body { margin: 0; padding: 0; } html { font-family: sans-serif; } canvas { display: block; margin: 0 auto; border: 1px solid black; } .information { width: 640px; margin: 0 auto 50px; } #tracker { position: absolute; top: 0; right: 10px; background-color: white; } h1 { font-size: 200%; }
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }
const audioCtx = new window.AudioContext(); const playBtn = document.querySelector('.play'); const mediaDeviceBtn = document.querySelector('.media-devices'); const selectDiv = document.querySelector('.select-container'); playBtn.disabled = true; mediaDeviceBtn.addEventListener('click', async () => { if ("setSinkId" in AudioContext.prototype) { selectDiv.innerHTML = ''; const stream = await navigator.mediaDevices.getUserMedia({ audio: true }); const devices = await navigator.mediaDevices.enumerateDevices(); const label = document.createElement('label'); label.innerHTML = 'Select output device:'; label.htmlFor = 'output-device-select'; const select = document.createElement('select'); select.id = 'output-device-select'; selectDiv.appendChild(label); selectDiv.appendChild(select); selectDiv.style.margin = '0 0 20px'; selectDiv.style.padding = '20px 0'; selectDiv.style.borderTop = '1px solid #ddd'; selectDiv.style.borderBottom = '1px solid #ddd'; const audioOutputs = devices.filter((device) => device.kind === 'audiooutput' && device.deviceId !== 'default'); audioOutputs.forEach((device) => { const option = document.createElement('option') option.value = device.deviceId; option.textContent = device.label; select.appendChild(option); }); const option = document.createElement('option') option.value = 'none'; option.textContent = 'None'; select.appendChild(option); playBtn.disabled = false; select.addEventListener('change', async () => { if(select.value === 'none') { await audioCtx.setSinkId({ type : 'none' }); } else { await audioCtx.setSinkId(select.value); } }) // Stop audio tracks, as we don't need them running now the permission has been granted stream.getAudioTracks().forEach((track) => track.stop()); } else { const para = document.createElement('p'); para.innerHTML = 'Your browser doesn\'t support <code>AudioContext.setSinkId()</code>. Check the <a href="https://developer.mozilla.org/en-US/docs/Web/API/AudioContext/setSinkId#browser_compatibility">browser compatibility information</a> to see which browsers support it.' selectDiv.appendChild(para); } }); // Create an empty three-second stereo buffer at the sample rate of the AudioContext const myArrayBuffer = audioCtx.createBuffer( 2, audioCtx.sampleRate * 3, audioCtx.sampleRate ); // Fill the buffer with white noise; // just random values between -1.0 and 1.0 for (let channel = 0; channel < myArrayBuffer.numberOfChannels; channel++) { const nowBuffering = myArrayBuffer.getChannelData(channel); for (let i = 0; i < myArrayBuffer.length; i++) { nowBuffering[i] = Math.random() * 2 - 1; } } const gain = audioCtx.createGain(); gain.gain.value = 0.25; playBtn.addEventListener('click', () => { const source = audioCtx.createBufferSource(); source.buffer = myArrayBuffer; source.connect(gain); gain.connect(audioCtx.destination); source.start(); if(audioCtx.sinkId === '') { console.log('Audio playing on default device'); } else if(typeof audioCtx.sinkId === 'object' && audioCtx.sinkId.type === 'none') { console.log('Audio not playing on any device'); } else { console.log(`Audio playing on device ${ audioCtx.sinkId }`); } }); audioCtx.addEventListener('sinkchange', () => { if(typeof audioCtx.sinkId === 'object' && audioCtx.sinkId.type === 'none') { console.log('Audio changed to not play on any device'); } else { console.log(`Audio output device changed to ${ audioCtx.sinkId }`); } });
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }
<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8" /> <meta name="viewport" content="width=device-width, initial-scale=1" /> <title>setSinkId example</title> <!-- Import the webpage's stylesheet --> <link rel="stylesheet" href="style.css" /> <!-- Import the webpage's javascript file --> <script src="app.js" defer></script> </head> <body> <h1> SetSinkId test example </h1> <button class="media-devices"> Get media devices </button> <div class="select-container"> </div> <button class="play"> Play audio </button> <footer> <p> Check out the <a href="https://github.com/mdn/dom-examples/tree/main/audiocontext-setsinkid">source code</a>. </p> </footer> </body> </html>
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }
button, select, label { font-weight: 400; line-height: 1.5; font-size: 1rem; font-family: sans-serif; } button, select { padding: 6px 12px; text-align: center; background-color: transparent; border-radius: .25rem; } button { color: #0d6efd; border: 1px solid transparent; border-color: #0d6efd; cursor: pointer; outline: 0; display: inline-block; transition: color .15s ease-in-out,background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out; margin-bottom: 20px; } button:hover { color: #fff; background-color: #0d6efd; border-color: #0d6efd; } button:disabled { color: #999; border-color: #999; background: white; cursor: not-allowed; } label { padding-right: 24px; } .select-container p { color: red; margin-bottom: 36px; }
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }
<!DOCTYPE html> <html lang="en"> <head> <meta charset="UTF-8" /> <meta http-equiv="X-UA-Compatible" content="IE=edge" /> <meta name="viewport" content="width=device-width, initial-scale=1.0" /> <meta name="description" content="A resize event demo on MDN" /> <title>Window Size Logger</title> </head> <body> <p>Resize the browser window to fire the <code>resize</code> event.</p> <p>Window height: <span id="height"></span></p> <p>Window width: <span id="width"></span></p> <script> const heightOutput = document.querySelector("#height"); const widthOutput = document.querySelector("#width"); function resizeListener() { heightOutput.textContent = window.innerHeight; widthOutput.textContent = window.innerWidth; } window.addEventListener("resize", resizeListener); </script> </body> </html>
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }
<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8" /> <meta http-equiv="X-UA-Compatible" content="IE=edge" /> <meta name="viewport" content="width=device-width" /> <title>CSS Painting API examples</title> <link rel="stylesheet" href="style.css" /> <script src="script.js" defer></script> </head> <body> <ul> <li> <a href="half-highlight-fixed-size">Half highlight, fixed size</a> </li> <li> <a href="half-highlight-paintsize">Half highlight, variable size</a> </li> <li><a href="fancy-header-highlight">Fancy highlight</a></li> <li><a href="custom-properties">Using custom properties</a></li> <li><a href="hollow-highlight">Hollow highlight</a></li> </ul> </body> </html>
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }
<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8" /> <meta http-equiv="X-UA-Compatible" content="IE=edge" /> <meta name="viewport" content="width=device-width" /> <title>CSS Painting API example</title> <link rel="stylesheet" href="style.css" /> <script src="script.js" defer></script> </head> <body> <h1 class="fancy">My Cool Header</h1> </body> </html>
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }
registerPaint( "headerHighlight", class { /* define if alphatransparency is allowed alpha is set to true by default. If set to false, all colors used on the canvas will be fully opaque */ static get contextOptions() { return { alpha: true }; } /* ctx is the 2D drawing context a subset of the HTML Canvas API. */ paint(ctx) { ctx.fillStyle = "hsl(55 90% 60% / 1.0)"; ctx.fillRect(0, 15, 200, 20); /* order: x, y, w, h */ } } );
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }
.fancy { background-image: paint(headerHighlight); }
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }
CSS.paintWorklet.addModule("header-highlight.js");
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }
<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8" /> <meta http-equiv="X-UA-Compatible" content="IE=edge" /> <meta name="viewport" content="width=device-width" /> <title>CSS Painting API example</title> <link rel="stylesheet" href="style.css" /> <script src="script.js" defer></script> </head> <body> <h1 class="fancy">Largest Header</h1> <h3 class="fancy">Medium size header</h3> <h6 class="fancy">Smallest Header</h6> </body> </html>
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }
registerPaint( "headerHighlight", class { static get inputProperties() { return ["--highColor"]; } static get contextOptions() { return { alpha: true }; } paint(ctx, size, props) { /* set where to start the highlight & dimensions */ const x = 0; const y = size.height * 0.3; const blockWidth = size.width * 0.33; const highlightHeight = size.height * 0.85; const color = props.get("--highColor"); ctx.fillStyle = color; ctx.beginPath(); ctx.moveTo(x, y); ctx.lineTo(blockWidth, y); ctx.lineTo(blockWidth + highlightHeight, highlightHeight); ctx.lineTo(x, highlightHeight); ctx.lineTo(x, y); ctx.closePath(); ctx.fill(); /* create the dashes */ for (let start = 0; start < 8; start += 2) { ctx.beginPath(); ctx.moveTo(blockWidth + start * 10 + 10, y); ctx.lineTo(blockWidth + start * 10 + 20, y); ctx.lineTo( blockWidth + start * 10 + 20 + highlightHeight, highlightHeight ); ctx.lineTo( blockWidth + start * 10 + 10 + highlightHeight, highlightHeight ); ctx.lineTo(blockWidth + start * 10 + 10, y); ctx.closePath(); ctx.fill(); } } // paint } );
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }
.fancy { background-image: paint(headerHighlight); } h1 { --highColor: hsl(155 90% 60% / 0.7); } h3 { --highColor: hsl(255 90% 60% / 0.5); } h6 { --highColor: hsl(355 90% 60% / 0.3); }
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }
CSS.paintWorklet.addModule("header-highlight.js");
{ "repo_name": "mdn/dom-examples", "stars": "2648", "repo_language": "JavaScript", "file_name": "style.css", "mime_type": "text/plain" }